Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/arm/allwinner/if_emac.c b/sys/arm/allwinner/if_emac.c
index f581d361d3d9..1db43cbca26c 100644
--- a/sys/arm/allwinner/if_emac.c
+++ b/sys/arm/allwinner/if_emac.c
@@ -1,1195 +1,1190 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013 Ganbold Tsagaankhuu <ganbold@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* A10/A20 EMAC driver */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/gpio.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/intr.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_mib.h>
#include <net/ethernet.h>
#include <net/if_vlan_var.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#endif
#include <net/bpf.h>
#include <net/bpfdesc.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <arm/allwinner/if_emacreg.h>
#include <arm/allwinner/aw_sid.h>
#include <dev/clk/clk.h>
#include "miibus_if.h"
#include "gpio_if.h"
#include "a10_sramc.h"
struct emac_softc {
if_t emac_ifp;
device_t emac_dev;
device_t emac_miibus;
bus_space_handle_t emac_handle;
bus_space_tag_t emac_tag;
struct resource *emac_res;
struct resource *emac_irq;
void *emac_intrhand;
clk_t emac_clk;
int emac_if_flags;
struct mtx emac_mtx;
struct callout emac_tick_ch;
int emac_watchdog_timer;
int emac_rx_process_limit;
int emac_link;
uint32_t emac_fifo_mask;
};
static int emac_probe(device_t);
static int emac_attach(device_t);
static int emac_detach(device_t);
static int emac_shutdown(device_t);
static int emac_suspend(device_t);
static int emac_resume(device_t);
static int emac_sys_setup(struct emac_softc *);
static void emac_reset(struct emac_softc *);
static void emac_init_locked(struct emac_softc *);
static void emac_start_locked(if_t);
static void emac_init(void *);
static void emac_stop_locked(struct emac_softc *);
static void emac_intr(void *);
static int emac_ioctl(if_t, u_long, caddr_t);
static void emac_rxeof(struct emac_softc *, int);
static void emac_txeof(struct emac_softc *, uint32_t);
static int emac_miibus_readreg(device_t, int, int);
static int emac_miibus_writereg(device_t, int, int, int);
static void emac_miibus_statchg(device_t);
static int emac_ifmedia_upd(if_t);
static void emac_ifmedia_sts(if_t, struct ifmediareq *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_emac_proc_limit(SYSCTL_HANDLER_ARGS);
#define EMAC_READ_REG(sc, reg) \
bus_space_read_4(sc->emac_tag, sc->emac_handle, reg)
#define EMAC_WRITE_REG(sc, reg, val) \
bus_space_write_4(sc->emac_tag, sc->emac_handle, reg, val)
static int
emac_sys_setup(struct emac_softc *sc)
{
int error;
/* Activate EMAC clock. */
error = clk_get_by_ofw_index(sc->emac_dev, 0, 0, &sc->emac_clk);
if (error != 0) {
device_printf(sc->emac_dev, "cannot get clock\n");
return (error);
}
error = clk_enable(sc->emac_clk);
if (error != 0) {
device_printf(sc->emac_dev, "cannot enable clock\n");
return (error);
}
/* Map sram. */
a10_map_to_emac();
return (0);
}
static void
emac_get_hwaddr(struct emac_softc *sc, uint8_t *hwaddr)
{
uint32_t val0, val1, rnd;
u_char rootkey[16];
size_t rootkey_size;
/*
* Try to get MAC address from running hardware.
* If there is something non-zero there just use it.
*
* Otherwise set the address to a convenient locally assigned address,
* using the SID rootkey.
* This is was uboot does so we end up with the same mac as if uboot
* did set it.
* If we can't get the root key, generate a random one,
* 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally
* assigned bit set, and the broadcast/multicast bit clear.
*/
val0 = EMAC_READ_REG(sc, EMAC_MAC_A0);
val1 = EMAC_READ_REG(sc, EMAC_MAC_A1);
if ((val0 | val1) != 0 && (val0 | val1) != 0xffffff) {
hwaddr[0] = (val1 >> 16) & 0xff;
hwaddr[1] = (val1 >> 8) & 0xff;
hwaddr[2] = (val1 >> 0) & 0xff;
hwaddr[3] = (val0 >> 16) & 0xff;
hwaddr[4] = (val0 >> 8) & 0xff;
hwaddr[5] = (val0 >> 0) & 0xff;
} else {
rootkey_size = sizeof(rootkey);
if (aw_sid_get_fuse(AW_SID_FUSE_ROOTKEY, rootkey,
&rootkey_size) == 0) {
hwaddr[0] = 0x2;
hwaddr[1] = rootkey[3];
hwaddr[2] = rootkey[12];
hwaddr[3] = rootkey[13];
hwaddr[4] = rootkey[14];
hwaddr[5] = rootkey[15];
}
else {
rnd = arc4random() & 0x00ffffff;
hwaddr[0] = 'b';
hwaddr[1] = 's';
hwaddr[2] = 'd';
hwaddr[3] = (rnd >> 16) & 0xff;
hwaddr[4] = (rnd >> 8) & 0xff;
hwaddr[5] = (rnd >> 0) & 0xff;
}
}
if (bootverbose)
printf("MAC address: %s\n", ether_sprintf(hwaddr));
}
static u_int
emac_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t h, *hashes = arg;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
hashes[h >> 5] |= 1 << (h & 0x1f);
return (1);
}
static void
emac_set_rx_mode(struct emac_softc *sc)
{
if_t ifp;
uint32_t hashes[2];
uint32_t rcr = 0;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
rcr = EMAC_READ_REG(sc, EMAC_RX_CTL);
/* Unicast packet and DA filtering */
rcr |= EMAC_RX_UCAD;
rcr |= EMAC_RX_DAF;
hashes[0] = 0;
hashes[1] = 0;
if (if_getflags(ifp) & IFF_ALLMULTI) {
hashes[0] = 0xffffffff;
hashes[1] = 0xffffffff;
} else
if_foreach_llmaddr(ifp, emac_hash_maddr, hashes);
rcr |= EMAC_RX_MCO;
rcr |= EMAC_RX_MHF;
EMAC_WRITE_REG(sc, EMAC_RX_HASH0, hashes[0]);
EMAC_WRITE_REG(sc, EMAC_RX_HASH1, hashes[1]);
if (if_getflags(ifp) & IFF_BROADCAST) {
rcr |= EMAC_RX_BCO;
rcr |= EMAC_RX_MCO;
}
if (if_getflags(ifp) & IFF_PROMISC)
rcr |= EMAC_RX_PA;
else
rcr |= EMAC_RX_UCAD;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, rcr);
}
static void
emac_reset(struct emac_softc *sc)
{
EMAC_WRITE_REG(sc, EMAC_CTL, 0);
DELAY(200);
EMAC_WRITE_REG(sc, EMAC_CTL, 1);
DELAY(200);
}
static void
emac_drain_rxfifo(struct emac_softc *sc)
{
while (EMAC_READ_REG(sc, EMAC_RX_FBC) > 0)
(void)EMAC_READ_REG(sc, EMAC_RX_IO_DATA);
}
static void
emac_txeof(struct emac_softc *sc, uint32_t status)
{
if_t ifp;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
status &= (EMAC_TX_FIFO0 | EMAC_TX_FIFO1);
sc->emac_fifo_mask &= ~status;
if (status == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1))
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 2);
else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
/* Unarm watchdog timer if no TX */
sc->emac_watchdog_timer = 0;
}
static void
emac_rxeof(struct emac_softc *sc, int count)
{
if_t ifp;
struct mbuf *m, *m0;
uint32_t reg_val, rxcount;
int16_t len;
uint16_t status;
int i;
ifp = sc->emac_ifp;
for (; count > 0 &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; count--) {
/*
* Race warning: The first packet might arrive with
* the interrupts disabled, but the second will fix
*/
rxcount = EMAC_READ_REG(sc, EMAC_RX_FBC);
if (!rxcount) {
/* Had one stuck? */
rxcount = EMAC_READ_REG(sc, EMAC_RX_FBC);
if (!rxcount)
return;
}
/* Check packet header */
reg_val = EMAC_READ_REG(sc, EMAC_RX_IO_DATA);
if (reg_val != EMAC_PACKET_HEADER) {
/* Packet header is wrong */
if (bootverbose)
if_printf(ifp, "wrong packet header\n");
/* Disable RX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val &= ~EMAC_CTL_RX_EN;
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
/* Flush RX FIFO */
reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL);
reg_val |= EMAC_RX_FLUSH_FIFO;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val);
for (i = 100; i > 0; i--) {
DELAY(100);
if ((EMAC_READ_REG(sc, EMAC_RX_CTL) &
EMAC_RX_FLUSH_FIFO) == 0)
break;
}
if (i == 0) {
device_printf(sc->emac_dev,
"flush FIFO timeout\n");
/* Reinitialize controller */
emac_init_locked(sc);
return;
}
/* Enable RX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val |= EMAC_CTL_RX_EN;
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
return;
}
/* Get packet size and status */
reg_val = EMAC_READ_REG(sc, EMAC_RX_IO_DATA);
len = reg_val & 0xffff;
status = (reg_val >> 16) & 0xffff;
if (len < 64 || (status & EMAC_PKT_OK) == 0) {
if (bootverbose)
if_printf(ifp,
"bad packet: len = %i status = %i\n",
len, status);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
emac_drain_rxfifo(sc);
continue;
}
#if 0
if (status & (EMAC_CRCERR | EMAC_LENERR)) {
good_packet = 0;
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if (status & EMAC_CRCERR)
if_printf(ifp, "crc error\n");
if (status & EMAC_LENERR)
if_printf(ifp, "length error\n");
}
#endif
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
emac_drain_rxfifo(sc);
return;
}
m->m_len = m->m_pkthdr.len = MCLBYTES;
/* Copy entire frame to mbuf first. */
bus_space_read_multi_4(sc->emac_tag, sc->emac_handle,
EMAC_RX_IO_DATA, mtod(m, uint32_t *), roundup2(len, 4) / 4);
m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = len - ETHER_CRC_LEN;
/*
* Emac controller needs strict alignment, so to avoid
* copying over an entire frame to align, we allocate
* a new mbuf and copy ethernet header + IP header to
* the new mbuf. The new mbuf is prepended into the
* existing mbuf chain.
*/
if (m->m_len <= (MHLEN - ETHER_HDR_LEN)) {
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
} else if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN) &&
m->m_len > (MHLEN - ETHER_HDR_LEN)) {
MGETHDR(m0, M_NOWAIT, MT_DATA);
if (m0 != NULL) {
len = ETHER_HDR_LEN + m->m_pkthdr.l2hlen;
bcopy(m->m_data, m0->m_data, len);
m->m_data += len;
m->m_len -= len;
m0->m_len = len;
M_MOVE_PKTHDR(m0, m);
m0->m_next = m;
m = m0;
} else {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
m = NULL;
continue;
}
} else if (m->m_len > EMAC_MAC_MAXF) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
m = NULL;
continue;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
EMAC_UNLOCK(sc);
if_input(ifp, m);
EMAC_LOCK(sc);
}
}
static void
emac_watchdog(struct emac_softc *sc)
{
if_t ifp;
EMAC_ASSERT_LOCKED(sc);
if (sc->emac_watchdog_timer == 0 || --sc->emac_watchdog_timer)
return;
ifp = sc->emac_ifp;
if (sc->emac_link == 0) {
if (bootverbose)
if_printf(sc->emac_ifp, "watchdog timeout "
"(missed link)\n");
} else
if_printf(sc->emac_ifp, "watchdog timeout -- resetting\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
emac_init_locked(sc);
if (!if_sendq_empty(ifp))
emac_start_locked(ifp);
}
static void
emac_tick(void *arg)
{
struct emac_softc *sc;
struct mii_data *mii;
sc = (struct emac_softc *)arg;
mii = device_get_softc(sc->emac_miibus);
mii_tick(mii);
emac_watchdog(sc);
callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
}
static void
emac_init(void *xcs)
{
struct emac_softc *sc;
sc = (struct emac_softc *)xcs;
EMAC_LOCK(sc);
emac_init_locked(sc);
EMAC_UNLOCK(sc);
}
static void
emac_init_locked(struct emac_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint32_t reg_val;
uint8_t *eaddr;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/* Flush RX FIFO */
reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL);
reg_val |= EMAC_RX_FLUSH_FIFO;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val);
DELAY(1);
/* Soft reset MAC */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL0);
reg_val &= (~EMAC_MAC_CTL0_SOFT_RST);
EMAC_WRITE_REG(sc, EMAC_MAC_CTL0, reg_val);
/* Set MII clock */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_MCFG);
reg_val &= (~(0xf << 2));
reg_val |= (0xd << 2);
EMAC_WRITE_REG(sc, EMAC_MAC_MCFG, reg_val);
/* Clear RX counter */
EMAC_WRITE_REG(sc, EMAC_RX_FBC, 0);
/* Disable all interrupt and clear interrupt status */
EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0);
reg_val = EMAC_READ_REG(sc, EMAC_INT_STA);
EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val);
DELAY(1);
/* Set up TX */
reg_val = EMAC_READ_REG(sc, EMAC_TX_MODE);
reg_val |= EMAC_TX_AB_M;
reg_val &= EMAC_TX_TM;
EMAC_WRITE_REG(sc, EMAC_TX_MODE, reg_val);
/* Set up RX */
reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL);
reg_val |= EMAC_RX_SETUP;
reg_val &= EMAC_RX_TM;
EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val);
/* Set up MAC CTL0. */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL0);
reg_val |= EMAC_MAC_CTL0_SETUP;
EMAC_WRITE_REG(sc, EMAC_MAC_CTL0, reg_val);
/* Set up MAC CTL1. */
reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL1);
reg_val |= EMAC_MAC_CTL1_SETUP;
EMAC_WRITE_REG(sc, EMAC_MAC_CTL1, reg_val);
/* Set up IPGT */
EMAC_WRITE_REG(sc, EMAC_MAC_IPGT, EMAC_MAC_IPGT_FD);
/* Set up IPGR */
EMAC_WRITE_REG(sc, EMAC_MAC_IPGR, EMAC_MAC_NBTB_IPG2 |
(EMAC_MAC_NBTB_IPG1 << 8));
/* Set up Collison window */
EMAC_WRITE_REG(sc, EMAC_MAC_CLRT, EMAC_MAC_RM | (EMAC_MAC_CW << 8));
/* Set up Max Frame Length */
EMAC_WRITE_REG(sc, EMAC_MAC_MAXF, EMAC_MAC_MFL);
/* Setup ethernet address */
eaddr = if_getlladdr(ifp);
EMAC_WRITE_REG(sc, EMAC_MAC_A1, eaddr[0] << 16 |
eaddr[1] << 8 | eaddr[2]);
EMAC_WRITE_REG(sc, EMAC_MAC_A0, eaddr[3] << 16 |
eaddr[4] << 8 | eaddr[5]);
/* Setup rx filter */
emac_set_rx_mode(sc);
/* Enable RX/TX0/RX Hlevel interrupt */
reg_val = EMAC_READ_REG(sc, EMAC_INT_CTL);
reg_val |= EMAC_INT_EN;
EMAC_WRITE_REG(sc, EMAC_INT_CTL, reg_val);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->emac_link = 0;
/* Switch to the current media. */
mii = device_get_softc(sc->emac_miibus);
mii_mediachg(mii);
callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
}
static void
emac_start(if_t ifp)
{
struct emac_softc *sc;
sc = if_getsoftc(ifp);
EMAC_LOCK(sc);
emac_start_locked(ifp);
EMAC_UNLOCK(sc);
}
static void
emac_start_locked(if_t ifp)
{
struct emac_softc *sc;
struct mbuf *m, *m0;
uint32_t fifo, reg;
sc = if_getsoftc(ifp);
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
return;
if (sc->emac_fifo_mask == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1))
return;
if (sc->emac_link == 0)
return;
m = if_dequeue(ifp);
if (m == NULL)
return;
/* Select channel */
if (sc->emac_fifo_mask & EMAC_TX_FIFO0)
fifo = 1;
else
fifo = 0;
sc->emac_fifo_mask |= (1 << fifo);
if (sc->emac_fifo_mask == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1))
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
EMAC_WRITE_REG(sc, EMAC_TX_INS, fifo);
/*
* Emac controller wants 4 byte aligned TX buffers.
* We have to copy pretty much all the time.
*/
if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0) {
m0 = m_defrag(m, M_NOWAIT);
if (m0 == NULL) {
m_freem(m);
m = NULL;
return;
}
m = m0;
}
/* Write data */
bus_space_write_multi_4(sc->emac_tag, sc->emac_handle,
EMAC_TX_IO_DATA, mtod(m, uint32_t *),
roundup2(m->m_len, 4) / 4);
/* Send the data lengh. */
reg = (fifo == 0) ? EMAC_TX_PL0 : EMAC_TX_PL1;
EMAC_WRITE_REG(sc, reg, m->m_len);
/* Start translate from fifo to phy. */
reg = (fifo == 0) ? EMAC_TX_CTL0 : EMAC_TX_CTL1;
EMAC_WRITE_REG(sc, reg, EMAC_READ_REG(sc, reg) | 1);
/* Set timeout */
sc->emac_watchdog_timer = 5;
/* Data have been sent to hardware, it is okay to free the mbuf now. */
BPF_MTAP(ifp, m);
m_freem(m);
}
static void
emac_stop_locked(struct emac_softc *sc)
{
if_t ifp;
uint32_t reg_val;
EMAC_ASSERT_LOCKED(sc);
ifp = sc->emac_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->emac_link = 0;
/* Disable all interrupt and clear interrupt status */
EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0);
reg_val = EMAC_READ_REG(sc, EMAC_INT_STA);
EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val);
/* Disable RX/TX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val &= ~(EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN);
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
callout_stop(&sc->emac_tick_ch);
}
static void
emac_intr(void *arg)
{
struct emac_softc *sc;
if_t ifp;
uint32_t reg_val;
sc = (struct emac_softc *)arg;
EMAC_LOCK(sc);
/* Disable all interrupts */
EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0);
/* Get EMAC interrupt status */
reg_val = EMAC_READ_REG(sc, EMAC_INT_STA);
/* Clear ISR status */
EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val);
/* Received incoming packet */
if (reg_val & EMAC_INT_STA_RX)
emac_rxeof(sc, sc->emac_rx_process_limit);
/* Transmit Interrupt check */
if (reg_val & EMAC_INT_STA_TX) {
emac_txeof(sc, reg_val);
ifp = sc->emac_ifp;
if (!if_sendq_empty(ifp))
emac_start_locked(ifp);
}
/* Re-enable interrupt mask */
reg_val = EMAC_READ_REG(sc, EMAC_INT_CTL);
reg_val |= EMAC_INT_EN;
EMAC_WRITE_REG(sc, EMAC_INT_CTL, reg_val);
EMAC_UNLOCK(sc);
}
static int
emac_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct emac_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int error = 0;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFFLAGS:
EMAC_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((if_getflags(ifp) ^ sc->emac_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
emac_set_rx_mode(sc);
} else
emac_init_locked(sc);
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
emac_stop_locked(sc);
}
sc->emac_if_flags = if_getflags(ifp);
EMAC_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
EMAC_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
emac_set_rx_mode(sc);
}
EMAC_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->emac_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
emac_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-emac"))
return (ENXIO);
device_set_desc(dev, "A10/A20 EMAC ethernet controller");
return (BUS_PROBE_DEFAULT);
}
static int
emac_detach(device_t dev)
{
struct emac_softc *sc;
sc = device_get_softc(dev);
if_setdrvflagbits(sc->emac_ifp, 0, IFF_DRV_RUNNING);
if (device_is_attached(dev)) {
ether_ifdetach(sc->emac_ifp);
EMAC_LOCK(sc);
emac_stop_locked(sc);
EMAC_UNLOCK(sc);
callout_drain(&sc->emac_tick_ch);
}
if (sc->emac_intrhand != NULL)
bus_teardown_intr(sc->emac_dev, sc->emac_irq,
sc->emac_intrhand);
if (sc->emac_miibus != NULL) {
device_delete_child(sc->emac_dev, sc->emac_miibus);
bus_generic_detach(sc->emac_dev);
}
if (sc->emac_clk != NULL)
clk_disable(sc->emac_clk);
if (sc->emac_res != NULL)
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->emac_res);
if (sc->emac_irq != NULL)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->emac_irq);
if (sc->emac_ifp != NULL)
if_free(sc->emac_ifp);
if (mtx_initialized(&sc->emac_mtx))
mtx_destroy(&sc->emac_mtx);
return (0);
}
static int
emac_shutdown(device_t dev)
{
return (emac_suspend(dev));
}
static int
emac_suspend(device_t dev)
{
struct emac_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
EMAC_LOCK(sc);
ifp = sc->emac_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
emac_stop_locked(sc);
EMAC_UNLOCK(sc);
return (0);
}
static int
emac_resume(device_t dev)
{
struct emac_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
EMAC_LOCK(sc);
ifp = sc->emac_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
emac_init_locked(sc);
}
EMAC_UNLOCK(sc);
return (0);
}
static int
emac_attach(device_t dev)
{
struct emac_softc *sc;
if_t ifp;
int error, rid;
uint8_t eaddr[ETHER_ADDR_LEN];
sc = device_get_softc(dev);
sc->emac_dev = dev;
error = 0;
mtx_init(&sc->emac_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->emac_tick_ch, &sc->emac_mtx, 0);
rid = 0;
sc->emac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->emac_res == NULL) {
device_printf(dev, "unable to map memory\n");
error = ENXIO;
goto fail;
}
sc->emac_tag = rman_get_bustag(sc->emac_res);
sc->emac_handle = rman_get_bushandle(sc->emac_res);
rid = 0;
sc->emac_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->emac_irq == NULL) {
device_printf(dev, "cannot allocate IRQ resources.\n");
error = ENXIO;
goto fail;
}
/* Create device sysctl node. */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "process_limit",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->emac_rx_process_limit, 0, sysctl_hw_emac_proc_limit, "I",
"max number of Rx events to process");
sc->emac_rx_process_limit = EMAC_PROC_DEFAULT;
error = resource_int_value(device_get_name(dev), device_get_unit(dev),
"process_limit", &sc->emac_rx_process_limit);
if (error == 0) {
if (sc->emac_rx_process_limit < EMAC_PROC_MIN ||
sc->emac_rx_process_limit > EMAC_PROC_MAX) {
device_printf(dev, "process_limit value out of range; "
"using default: %d\n", EMAC_PROC_DEFAULT);
sc->emac_rx_process_limit = EMAC_PROC_DEFAULT;
}
}
/* Setup EMAC */
error = emac_sys_setup(sc);
if (error != 0)
goto fail;
emac_reset(sc);
ifp = sc->emac_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "unable to allocate ifp\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
/* Setup MII */
error = mii_attach(dev, &sc->emac_miibus, ifp, emac_ifmedia_upd,
emac_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "PHY probe failed\n");
goto fail;
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setstartfn(ifp, emac_start);
if_setioctlfn(ifp, emac_ioctl);
if_setinitfn(ifp, emac_init);
if_setsendqlen(ifp, IFQ_MAXLEN);
/* Get MAC address */
emac_get_hwaddr(sc, eaddr);
ether_ifattach(ifp, eaddr);
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Tell the upper layer we support VLAN over-sized frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
error = bus_setup_intr(dev, sc->emac_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, emac_intr, sc, &sc->emac_intrhand);
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
emac_detach(dev);
return (error);
}
static bool
emac_miibus_iowait(struct emac_softc *sc)
{
uint32_t timeout;
for (timeout = 100; timeout != 0; --timeout) {
DELAY(100);
if ((EMAC_READ_REG(sc, EMAC_MAC_MIND) & 0x1) == 0)
return (true);
}
return (false);
}
/*
* The MII bus interface
*/
static int
emac_miibus_readreg(device_t dev, int phy, int reg)
{
struct emac_softc *sc;
int rval;
sc = device_get_softc(dev);
/* Issue phy address and reg */
EMAC_WRITE_REG(sc, EMAC_MAC_MADR, (phy << 8) | reg);
/* Pull up the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x1);
if (!emac_miibus_iowait(sc)) {
device_printf(dev, "timeout waiting for mii read\n");
return (0);
}
/* Push down the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x0);
/* Read data */
rval = EMAC_READ_REG(sc, EMAC_MAC_MRDD);
return (rval);
}
static int
emac_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct emac_softc *sc;
sc = device_get_softc(dev);
/* Issue phy address and reg */
EMAC_WRITE_REG(sc, EMAC_MAC_MADR, (phy << 8) | reg);
/* Write data */
EMAC_WRITE_REG(sc, EMAC_MAC_MWTD, data);
/* Pull up the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x1);
if (!emac_miibus_iowait(sc)) {
device_printf(dev, "timeout waiting for mii write\n");
return (0);
}
/* Push down the phy io line */
EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x0);
return (0);
}
static void
emac_miibus_statchg(device_t dev)
{
struct emac_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t reg_val;
sc = device_get_softc(dev);
mii = device_get_softc(sc->emac_miibus);
ifp = sc->emac_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->emac_link = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->emac_link = 1;
break;
default:
break;
}
}
/* Program MACs with resolved speed/duplex. */
if (sc->emac_link != 0) {
reg_val = EMAC_READ_REG(sc, EMAC_MAC_IPGT);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
reg_val &= ~EMAC_MAC_IPGT_HD;
reg_val |= EMAC_MAC_IPGT_FD;
} else {
reg_val &= ~EMAC_MAC_IPGT_FD;
reg_val |= EMAC_MAC_IPGT_HD;
}
EMAC_WRITE_REG(sc, EMAC_MAC_IPGT, reg_val);
/* Enable RX/TX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val |= EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN;
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
} else {
/* Disable RX/TX */
reg_val = EMAC_READ_REG(sc, EMAC_CTL);
reg_val &= ~(EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN);
EMAC_WRITE_REG(sc, EMAC_CTL, reg_val);
}
}
static int
emac_ifmedia_upd(if_t ifp)
{
struct emac_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->emac_miibus);
EMAC_LOCK(sc);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
EMAC_UNLOCK(sc);
return (error);
}
static void
emac_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct emac_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->emac_miibus);
EMAC_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
EMAC_UNLOCK(sc);
}
static device_method_t emac_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, emac_probe),
DEVMETHOD(device_attach, emac_attach),
DEVMETHOD(device_detach, emac_detach),
DEVMETHOD(device_shutdown, emac_shutdown),
DEVMETHOD(device_suspend, emac_suspend),
DEVMETHOD(device_resume, emac_resume),
/* bus interface, for miibus */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
/* MII interface */
DEVMETHOD(miibus_readreg, emac_miibus_readreg),
DEVMETHOD(miibus_writereg, emac_miibus_writereg),
DEVMETHOD(miibus_statchg, emac_miibus_statchg),
DEVMETHOD_END
};
static driver_t emac_driver = {
"emac",
emac_methods,
sizeof(struct emac_softc)
};
DRIVER_MODULE(emac, simplebus, emac_driver, 0, 0);
DRIVER_MODULE(miibus, emac, miibus_driver, 0, 0);
MODULE_DEPEND(emac, miibus, 1, 1, 1);
MODULE_DEPEND(emac, ether, 1, 1, 1);
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_emac_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
EMAC_PROC_MIN, EMAC_PROC_MAX));
}
diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c
index c2651c33e1b1..8012f8441e76 100644
--- a/sys/arm/ti/cpsw/if_cpsw.c
+++ b/sys/arm/ti/cpsw/if_cpsw.c
@@ -1,3021 +1,3016 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
* Copyright (c) 2016 Rubicon Communications, LLC (Netgate)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TI Common Platform Ethernet Switch (CPSW) Driver
* Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
*
* This controller is documented in the AM335x Technical Reference
* Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
* and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
*
* It is basically a single Ethernet port (port 0) wired internally to
* a 3-port store-and-forward switch connected to two independent
* "sliver" controllers (port 1 and port 2). You can operate the
* controller in a variety of different ways by suitably configuring
* the slivers and the Address Lookup Engine (ALE) that routes packets
* between the ports.
*
* This code was developed and tested on a BeagleBone with
* an AM335x SoC.
*/
#include <sys/cdefs.h>
#include "opt_cpsw.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/stdarg.h>
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/syscon/syscon.h>
#include "syscon_if.h"
#include <arm/ti/am335x/am335x_scm.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/fdt/fdt_common.h>
#ifdef CPSW_ETHERSWITCH
#include <dev/etherswitch/etherswitch.h>
#include "etherswitch_if.h"
#endif
#include "if_cpswreg.h"
#include "if_cpswvar.h"
#include "miibus_if.h"
/* Device probe/attach/detach. */
static int cpsw_probe(device_t);
static int cpsw_attach(device_t);
static int cpsw_detach(device_t);
static int cpswp_probe(device_t);
static int cpswp_attach(device_t);
static int cpswp_detach(device_t);
static phandle_t cpsw_get_node(device_t, device_t);
/* Device Init/shutdown. */
static int cpsw_shutdown(device_t);
static void cpswp_init(void *);
static void cpswp_init_locked(void *);
static void cpswp_stop_locked(struct cpswp_softc *);
/* Device Suspend/Resume. */
static int cpsw_suspend(device_t);
static int cpsw_resume(device_t);
/* Ioctl. */
static int cpswp_ioctl(if_t, u_long command, caddr_t data);
static int cpswp_miibus_readreg(device_t, int phy, int reg);
static int cpswp_miibus_writereg(device_t, int phy, int reg, int value);
static void cpswp_miibus_statchg(device_t);
/* Send/Receive packets. */
static void cpsw_intr_rx(void *arg);
static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
static void cpsw_rx_enqueue(struct cpsw_softc *);
static void cpswp_start(if_t);
static void cpsw_intr_tx(void *);
static void cpswp_tx_enqueue(struct cpswp_softc *);
static int cpsw_tx_dequeue(struct cpsw_softc *);
/* Misc interrupts and watchdog. */
static void cpsw_intr_rx_thresh(void *);
static void cpsw_intr_misc(void *);
static void cpswp_tick(void *);
static void cpswp_ifmedia_sts(if_t, struct ifmediareq *);
static int cpswp_ifmedia_upd(if_t);
static void cpsw_tx_watchdog(void *);
/* ALE support */
static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *);
static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *);
static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *);
static void cpsw_ale_dump_table(struct cpsw_softc *);
static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int,
int);
static int cpswp_ale_update_addresses(struct cpswp_softc *, int);
/* Statistics and sysctls. */
static void cpsw_add_sysctls(struct cpsw_softc *);
static void cpsw_stats_collect(struct cpsw_softc *);
static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
#ifdef CPSW_ETHERSWITCH
static etherswitch_info_t *cpsw_getinfo(device_t);
static int cpsw_getport(device_t, etherswitch_port_t *);
static int cpsw_setport(device_t, etherswitch_port_t *);
static int cpsw_getconf(device_t, etherswitch_conf_t *);
static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *);
static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *);
static int cpsw_readreg(device_t, int);
static int cpsw_writereg(device_t, int, int);
static int cpsw_readphy(device_t, int, int);
static int cpsw_writephy(device_t, int, int, int);
#endif
/*
* Arbitrary limit on number of segments in an mbuf to be transmitted.
* Packets with more segments than this will be defragmented before
* they are queued.
*/
#define CPSW_TXFRAGS 16
/* Shared resources. */
static device_method_t cpsw_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cpsw_probe),
DEVMETHOD(device_attach, cpsw_attach),
DEVMETHOD(device_detach, cpsw_detach),
DEVMETHOD(device_shutdown, cpsw_shutdown),
DEVMETHOD(device_suspend, cpsw_suspend),
DEVMETHOD(device_resume, cpsw_resume),
/* Bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* OFW methods */
DEVMETHOD(ofw_bus_get_node, cpsw_get_node),
#ifdef CPSW_ETHERSWITCH
/* etherswitch interface */
DEVMETHOD(etherswitch_getinfo, cpsw_getinfo),
DEVMETHOD(etherswitch_readreg, cpsw_readreg),
DEVMETHOD(etherswitch_writereg, cpsw_writereg),
DEVMETHOD(etherswitch_readphyreg, cpsw_readphy),
DEVMETHOD(etherswitch_writephyreg, cpsw_writephy),
DEVMETHOD(etherswitch_getport, cpsw_getport),
DEVMETHOD(etherswitch_setport, cpsw_setport),
DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup),
DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup),
DEVMETHOD(etherswitch_getconf, cpsw_getconf),
#endif
DEVMETHOD_END
};
static driver_t cpsw_driver = {
"cpswss",
cpsw_methods,
sizeof(struct cpsw_softc),
};
DRIVER_MODULE(cpswss, simplebus, cpsw_driver, 0, 0);
/* Port/Slave resources. */
static device_method_t cpswp_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cpswp_probe),
DEVMETHOD(device_attach, cpswp_attach),
DEVMETHOD(device_detach, cpswp_detach),
/* MII interface */
DEVMETHOD(miibus_readreg, cpswp_miibus_readreg),
DEVMETHOD(miibus_writereg, cpswp_miibus_writereg),
DEVMETHOD(miibus_statchg, cpswp_miibus_statchg),
DEVMETHOD_END
};
static driver_t cpswp_driver = {
"cpsw",
cpswp_methods,
sizeof(struct cpswp_softc),
};
#ifdef CPSW_ETHERSWITCH
DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, 0, 0);
MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1);
#endif
DRIVER_MODULE(cpsw, cpswss, cpswp_driver, 0, 0);
DRIVER_MODULE(miibus, cpsw, miibus_driver, 0, 0);
MODULE_DEPEND(cpsw, ether, 1, 1, 1);
MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
#ifdef CPSW_ETHERSWITCH
static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS];
#endif
static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 };
static struct resource_spec irq_res_spec[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0 }
};
static struct {
void (*cb)(void *);
} cpsw_intr_cb[] = {
{ cpsw_intr_rx_thresh },
{ cpsw_intr_rx },
{ cpsw_intr_tx },
{ cpsw_intr_misc },
};
/* Number of entries here must match size of stats
* array in struct cpswp_softc. */
static struct cpsw_stat {
int reg;
char *oid;
} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
{0x00, "GoodRxFrames"},
{0x04, "BroadcastRxFrames"},
{0x08, "MulticastRxFrames"},
{0x0C, "PauseRxFrames"},
{0x10, "RxCrcErrors"},
{0x14, "RxAlignErrors"},
{0x18, "OversizeRxFrames"},
{0x1c, "RxJabbers"},
{0x20, "ShortRxFrames"},
{0x24, "RxFragments"},
{0x30, "RxOctets"},
{0x34, "GoodTxFrames"},
{0x38, "BroadcastTxFrames"},
{0x3c, "MulticastTxFrames"},
{0x40, "PauseTxFrames"},
{0x44, "DeferredTxFrames"},
{0x48, "CollisionsTxFrames"},
{0x4c, "SingleCollisionTxFrames"},
{0x50, "MultipleCollisionTxFrames"},
{0x54, "ExcessiveCollisions"},
{0x58, "LateCollisions"},
{0x5c, "TxUnderrun"},
{0x60, "CarrierSenseErrors"},
{0x64, "TxOctets"},
{0x68, "RxTx64OctetFrames"},
{0x6c, "RxTx65to127OctetFrames"},
{0x70, "RxTx128to255OctetFrames"},
{0x74, "RxTx256to511OctetFrames"},
{0x78, "RxTx512to1024OctetFrames"},
{0x7c, "RxTx1024upOctetFrames"},
{0x80, "NetOctets"},
{0x84, "RxStartOfFrameOverruns"},
{0x88, "RxMiddleOfFrameOverruns"},
{0x8c, "RxDmaOverruns"}
};
/*
* Basic debug support.
*/
static void
cpsw_debugf_head(const char *funcname)
{
int t = (int)(time_second % (24 * 60 * 60));
printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
}
static void
cpsw_debugf(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
va_end(ap);
printf("\n");
}
#define CPSW_DEBUGF(_sc, a) do { \
if ((_sc)->debug) { \
cpsw_debugf_head(__func__); \
cpsw_debugf a; \
} \
} while (0)
/*
* Locking macros
*/
#define CPSW_TX_LOCK(sc) do { \
mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \
mtx_lock(&(sc)->tx.lock); \
} while (0)
#define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock)
#define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED)
#define CPSW_RX_LOCK(sc) do { \
mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \
mtx_lock(&(sc)->rx.lock); \
} while (0)
#define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock)
#define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED)
#define CPSW_PORT_LOCK(_sc) do { \
mtx_assert(&(_sc)->lock, MA_NOTOWNED); \
mtx_lock(&(_sc)->lock); \
} while (0)
#define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
#define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED)
/*
* Read/Write macros
*/
#define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg))
#define cpsw_write_4(_sc, _reg, _val) \
bus_write_4((_sc)->mem_res, (_reg), (_val))
#define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16))
#define cpsw_cpdma_bd_paddr(sc, slot) \
BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset)
#define cpsw_cpdma_read_bd(sc, slot, val) \
bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
#define cpsw_cpdma_write_bd(sc, slot, val) \
bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
#define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \
cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
#define cpsw_cpdma_write_bd_flags(sc, slot, val) \
bus_write_2(sc->mem_res, slot->bd_offset + 14, val)
#define cpsw_cpdma_read_bd_flags(sc, slot) \
bus_read_2(sc->mem_res, slot->bd_offset + 14)
#define cpsw_write_hdp_slot(sc, queue, slot) \
cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
#define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
#define cpsw_read_cp(sc, queue) \
cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
#define cpsw_write_cp(sc, queue, val) \
cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
#define cpsw_write_cp_slot(sc, queue, slot) \
cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
#if 0
/* XXX temporary function versions for debugging. */
static void
cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
{
uint32_t reg = queue->hdp_offset;
uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
cpsw_write_4(sc, reg, v);
}
static void
cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
{
uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
cpsw_write_cp(sc, queue, v);
}
#endif
/*
* Expanded dump routines for verbose debugging.
*/
static void
cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
{
static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
"TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
"PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
"Port0"};
struct cpsw_cpdma_bd bd;
const char *sep;
int i;
cpsw_cpdma_read_bd(sc, slot, &bd);
printf("BD Addr : 0x%08x Next : 0x%08x\n",
cpsw_cpdma_bd_paddr(sc, slot), bd.next);
printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
printf(" Flags: ");
sep = "";
for (i = 0; i < 16; ++i) {
if (bd.flags & (1 << (15 - i))) {
printf("%s%s", sep, flags[i]);
sep = ",";
}
}
printf("\n");
if (slot->mbuf) {
printf(" Ether: %14D\n",
(char *)(slot->mbuf->m_data), " ");
printf(" Packet: %16D\n",
(char *)(slot->mbuf->m_data) + 14, " ");
}
}
#define CPSW_DUMP_SLOT(cs, slot) do { \
IF_DEBUG(sc) { \
cpsw_dump_slot(sc, slot); \
} \
} while (0)
static void
cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
{
struct cpsw_slot *slot;
int i = 0;
int others = 0;
STAILQ_FOREACH(slot, q, next) {
if (i > CPSW_TXFRAGS)
++others;
else
cpsw_dump_slot(sc, slot);
++i;
}
if (others)
printf(" ... and %d more.\n", others);
printf("\n");
}
#define CPSW_DUMP_QUEUE(sc, q) do { \
IF_DEBUG(sc) { \
cpsw_dump_queue(sc, q); \
} \
} while (0)
static void
cpsw_init_slots(struct cpsw_softc *sc)
{
struct cpsw_slot *slot;
int i;
STAILQ_INIT(&sc->avail);
/* Put the slot descriptors onto the global avail list. */
for (i = 0; i < nitems(sc->_slots); i++) {
slot = &sc->_slots[i];
slot->bd_offset = cpsw_cpdma_bd_offset(i);
STAILQ_INSERT_TAIL(&sc->avail, slot, next);
}
}
static int
cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
{
const int max_slots = nitems(sc->_slots);
struct cpsw_slot *slot;
int i;
if (requested < 0)
requested = max_slots;
for (i = 0; i < requested; ++i) {
slot = STAILQ_FIRST(&sc->avail);
if (slot == NULL)
return (0);
if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
device_printf(sc->dev, "failed to create dmamap\n");
return (ENOMEM);
}
STAILQ_REMOVE_HEAD(&sc->avail, next);
STAILQ_INSERT_TAIL(&queue->avail, slot, next);
++queue->avail_queue_len;
++queue->queue_slots;
}
return (0);
}
static void
cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
{
int error __diagused;
if (slot->dmamap) {
if (slot->mbuf)
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
KASSERT(error == 0, ("Mapping still active"));
slot->dmamap = NULL;
}
if (slot->mbuf) {
m_freem(slot->mbuf);
slot->mbuf = NULL;
}
}
static void
cpsw_reset(struct cpsw_softc *sc)
{
int i;
callout_stop(&sc->watchdog.callout);
/* Reset RMII/RGMII wrapper. */
cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
;
/* Disable TX and RX interrupts for all cores. */
for (i = 0; i < 3; ++i) {
cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
}
/* Reset CPSW subsystem. */
cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
;
/* Reset Sliver port 1 and 2 */
for (i = 0; i < 2; i++) {
/* Reset */
cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
;
}
/* Reset DMA controller. */
cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
;
/* Disable TX & RX DMA */
cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
/* Clear all queues. */
for (i = 0; i < 8; i++) {
cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
}
/* Clear all interrupt Masks */
cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
}
static void
cpsw_init(struct cpsw_softc *sc)
{
struct cpsw_slot *slot;
uint32_t reg;
/* Disable the interrupt pacing. */
reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg);
/* Clear ALE */
cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL);
/* Enable ALE */
reg = CPSW_ALE_CTL_ENABLE;
if (sc->dualemac)
reg |= CPSW_ALE_CTL_VLAN_AWARE;
cpsw_write_4(sc, CPSW_ALE_CONTROL, reg);
/* Set Host Port Mapping. */
cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
/* Initialize ALE: set host port to forwarding(3). */
cpsw_write_4(sc, CPSW_ALE_PORTCTL(0),
ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
/* Enable statistics for ports 0, 1 and 2 */
cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
/* Turn off flow control. */
cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
/* Make IP hdr aligned with 4 */
cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
/* Initialize RX Buffer Descriptors */
cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0);
cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
/* Enable TX & RX DMA */
cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
/* Enable Interrupts for core 0 */
cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF);
cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
/* Enable host Error Interrupt */
cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
/* Enable interrupts for RX and TX on Channel 0 */
cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET,
CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0));
cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff);
/* Select MII in GMII_SEL, Internal Delay mode */
//ti_scm_reg_write_4(0x650, 0);
/* Initialize active queues. */
slot = STAILQ_FIRST(&sc->tx.active);
if (slot != NULL)
cpsw_write_hdp_slot(sc, &sc->tx, slot);
slot = STAILQ_FIRST(&sc->rx.active);
if (slot != NULL)
cpsw_write_hdp_slot(sc, &sc->rx, slot);
cpsw_rx_enqueue(sc);
cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len);
cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS);
/* Activate network interface. */
sc->rx.running = 1;
sc->tx.running = 1;
sc->watchdog.timer = 0;
callout_init(&sc->watchdog.callout, 0);
callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
}
/*
*
* Device Probe, Attach, Detach.
*
*/
static int
cpsw_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
return (ENXIO);
device_set_desc(dev, "3-port Switch Ethernet Subsystem");
return (BUS_PROBE_DEFAULT);
}
static int
cpsw_intr_attach(struct cpsw_softc *sc)
{
int i;
for (i = 0; i < CPSW_INTR_COUNT; i++) {
if (bus_setup_intr(sc->dev, sc->irq_res[i],
INTR_TYPE_NET | INTR_MPSAFE, NULL,
cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) {
return (-1);
}
}
return (0);
}
static void
cpsw_intr_detach(struct cpsw_softc *sc)
{
int i;
for (i = 0; i < CPSW_INTR_COUNT; i++) {
if (sc->ih_cookie[i]) {
bus_teardown_intr(sc->dev, sc->irq_res[i],
sc->ih_cookie[i]);
}
}
}
static int
cpsw_get_fdt_data(struct cpsw_softc *sc, int port)
{
char *name;
int len, phy, vlan;
pcell_t phy_id[3], vlan_id;
phandle_t child;
unsigned long mdio_child_addr;
/* Find any slave with phy-handle/phy_id */
phy = -1;
vlan = -1;
for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) {
if (OF_getprop_alloc(child, "name", (void **)&name) < 0)
continue;
if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) {
OF_prop_free(name);
continue;
}
OF_prop_free(name);
if (mdio_child_addr != slave_mdio_addr[port] &&
mdio_child_addr != (slave_mdio_addr[port] & 0xFFF))
continue;
if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){
/* Users with old DTB will have phy_id instead */
phy = -1;
len = OF_getproplen(child, "phy_id");
if (len / sizeof(pcell_t) == 2) {
/* Get phy address from fdt */
if (OF_getencprop(child, "phy_id", phy_id, len) > 0)
phy = phy_id[1];
}
}
len = OF_getproplen(child, "dual_emac_res_vlan");
if (len / sizeof(pcell_t) == 1) {
/* Get phy address from fdt */
if (OF_getencprop(child, "dual_emac_res_vlan",
&vlan_id, len) > 0) {
vlan = vlan_id;
}
}
break;
}
if (phy == -1)
return (ENXIO);
sc->port[port].phy = phy;
sc->port[port].vlan = vlan;
return (0);
}
static int
cpsw_attach(device_t dev)
{
int error, i;
struct cpsw_softc *sc;
uint32_t reg;
sc = device_get_softc(dev);
sc->dev = dev;
sc->node = ofw_bus_get_node(dev);
getbinuptime(&sc->attach_uptime);
if (OF_getencprop(sc->node, "active_slave", &sc->active_slave,
sizeof(sc->active_slave)) <= 0) {
sc->active_slave = 0;
}
if (sc->active_slave > 1)
sc->active_slave = 1;
if (OF_hasprop(sc->node, "dual_emac"))
sc->dualemac = 1;
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
if (cpsw_get_fdt_data(sc, i) != 0) {
device_printf(dev,
"failed to get PHY address from FDT\n");
return (ENXIO);
}
}
/* Initialize mutexes */
mtx_init(&sc->tx.lock, device_get_nameunit(dev),
"cpsw TX lock", MTX_DEF);
mtx_init(&sc->rx.lock, device_get_nameunit(dev),
"cpsw RX lock", MTX_DEF);
/* Allocate IRQ resources */
error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res);
if (error) {
device_printf(dev, "could not allocate IRQ resources\n");
cpsw_detach(dev);
return (ENXIO);
}
sc->mem_rid = 0;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->mem_rid, RF_ACTIVE);
if (sc->mem_res == NULL) {
device_printf(sc->dev, "failed to allocate memory resource\n");
cpsw_detach(dev);
return (ENXIO);
}
reg = cpsw_read_4(sc, CPSW_SS_IDVER);
device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
reg & 0xFF, (reg >> 11) & 0x1F);
cpsw_add_sysctls(sc);
/* Allocate a busdma tag and DMA safe memory for mbufs. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */
MCLBYTES, 0, /* maxsegsz, flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->mbuf_dtag); /* dmatag */
if (error) {
device_printf(dev, "bus_dma_tag_create failed\n");
cpsw_detach(dev);
return (error);
}
/* Allocate a NULL buffer for padding. */
sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO);
cpsw_init_slots(sc);
/* Allocate slots to TX and RX queues. */
STAILQ_INIT(&sc->rx.avail);
STAILQ_INIT(&sc->rx.active);
STAILQ_INIT(&sc->tx.avail);
STAILQ_INIT(&sc->tx.active);
// For now: 128 slots to TX, rest to RX.
// XXX TODO: start with 32/64 and grow dynamically based on demand.
if (cpsw_add_slots(sc, &sc->tx, 128) ||
cpsw_add_slots(sc, &sc->rx, -1)) {
device_printf(dev, "failed to allocate dmamaps\n");
cpsw_detach(dev);
return (ENOMEM);
}
device_printf(dev, "Initial queue size TX=%d RX=%d\n",
sc->tx.queue_slots, sc->rx.queue_slots);
sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
if (cpsw_intr_attach(sc) == -1) {
device_printf(dev, "failed to setup interrupts\n");
cpsw_detach(dev);
return (ENXIO);
}
#ifdef CPSW_ETHERSWITCH
for (i = 0; i < CPSW_VLANS; i++)
cpsw_vgroups[i].vid = -1;
#endif
/* Reset the controller. */
cpsw_reset(sc);
cpsw_init(sc);
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
sc->port[i].dev = device_add_child(dev, "cpsw", i);
if (sc->port[i].dev == NULL) {
cpsw_detach(dev);
return (ENXIO);
}
}
bus_generic_probe(dev);
bus_generic_attach(dev);
return (0);
}
static int
cpsw_detach(device_t dev)
{
struct cpsw_softc *sc;
int error, i;
bus_generic_detach(dev);
sc = device_get_softc(dev);
for (i = 0; i < CPSW_PORTS; i++) {
if (sc->port[i].dev)
device_delete_child(dev, sc->port[i].dev);
}
if (device_is_attached(dev)) {
callout_stop(&sc->watchdog.callout);
callout_drain(&sc->watchdog.callout);
}
/* Stop and release all interrupts */
cpsw_intr_detach(sc);
/* Free dmamaps and mbufs */
for (i = 0; i < nitems(sc->_slots); ++i)
cpsw_free_slot(sc, &sc->_slots[i]);
/* Free null padding buffer. */
if (sc->nullpad)
free(sc->nullpad, M_DEVBUF);
/* Free DMA tag */
if (sc->mbuf_dtag) {
error = bus_dma_tag_destroy(sc->mbuf_dtag);
KASSERT(error == 0, ("Unable to destroy DMA tag"));
}
/* Free IO memory handler */
if (sc->mem_res != NULL)
bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
bus_release_resources(dev, irq_res_spec, sc->irq_res);
/* Destroy mutexes */
mtx_destroy(&sc->rx.lock);
mtx_destroy(&sc->tx.lock);
/* Detach the switch device, if present. */
error = bus_generic_detach(dev);
if (error != 0)
return (error);
return (device_delete_children(dev));
}
static phandle_t
cpsw_get_node(device_t bus, device_t dev)
{
/* Share controller node with port device. */
return (ofw_bus_get_node(bus));
}
static int
cpswp_probe(device_t dev)
{
if (device_get_unit(dev) > 1) {
device_printf(dev, "Only two ports are supported.\n");
return (ENXIO);
}
device_set_desc(dev, "Ethernet Switch Port");
return (BUS_PROBE_DEFAULT);
}
static int
cpswp_attach(device_t dev)
{
int error;
if_t ifp;
struct cpswp_softc *sc;
uint32_t reg;
uint8_t mac_addr[ETHER_ADDR_LEN];
phandle_t opp_table;
struct syscon *syscon;
sc = device_get_softc(dev);
sc->dev = dev;
sc->pdev = device_get_parent(dev);
sc->swsc = device_get_softc(sc->pdev);
sc->unit = device_get_unit(dev);
sc->phy = sc->swsc->port[sc->unit].phy;
sc->vlan = sc->swsc->port[sc->unit].vlan;
if (sc->swsc->dualemac && sc->vlan == -1)
sc->vlan = sc->unit + 1;
if (sc->unit == 0) {
sc->physel = MDIOUSERPHYSEL0;
sc->phyaccess = MDIOUSERACCESS0;
} else {
sc->physel = MDIOUSERPHYSEL1;
sc->phyaccess = MDIOUSERACCESS1;
}
mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock",
MTX_DEF);
/* Allocate network interface */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- cpswp_detach(dev);
- return (ENXIO);
- }
-
if_initname(ifp, device_get_name(sc->dev), sc->unit);
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setinitfn(ifp, cpswp_init);
if_setstartfn(ifp, cpswp_start);
if_setioctlfn(ifp, cpswp_ioctl);
if_setsendqlen(ifp, sc->swsc->tx.queue_slots);
if_setsendqready(ifp);
/* FIXME: For now; Go and kidnap syscon from opp-table */
/* ti,cpsw actually have an optional syscon reference but only for am33xx?? */
opp_table = OF_finddevice("/opp-table");
if (opp_table == -1) {
device_printf(dev, "Cant find /opp-table\n");
cpswp_detach(dev);
return (ENXIO);
}
if (!OF_hasprop(opp_table, "syscon")) {
device_printf(dev, "/opp-table doesnt have required syscon property\n");
cpswp_detach(dev);
return (ENXIO);
}
if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) {
device_printf(dev, "Failed to get syscon\n");
cpswp_detach(dev);
return (ENXIO);
}
/* Get high part of MAC address from control module (mac_id[0|1]_hi) */
reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8);
mac_addr[0] = reg & 0xFF;
mac_addr[1] = (reg >> 8) & 0xFF;
mac_addr[2] = (reg >> 16) & 0xFF;
mac_addr[3] = (reg >> 24) & 0xFF;
/* Get low part of MAC address from control module (mac_id[0|1]_lo) */
reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8);
mac_addr[4] = reg & 0xFF;
mac_addr[5] = (reg >> 8) & 0xFF;
error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd,
cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0);
if (error) {
device_printf(dev, "attaching PHYs failed\n");
cpswp_detach(dev);
return (error);
}
sc->mii = device_get_softc(sc->miibus);
/* Select PHY and enable interrupts */
cpsw_write_4(sc->swsc, sc->physel,
MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F));
ether_ifattach(sc->ifp, mac_addr);
callout_init(&sc->mii_callout, 0);
return (0);
}
static int
cpswp_detach(device_t dev)
{
struct cpswp_softc *sc;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc->swsc, (""));
if (device_is_attached(dev)) {
ether_ifdetach(sc->ifp);
CPSW_PORT_LOCK(sc);
cpswp_stop_locked(sc);
CPSW_PORT_UNLOCK(sc);
callout_drain(&sc->mii_callout);
}
bus_generic_detach(dev);
if_free(sc->ifp);
mtx_destroy(&sc->lock);
return (0);
}
/*
*
* Init/Shutdown.
*
*/
static int
cpsw_ports_down(struct cpsw_softc *sc)
{
struct cpswp_softc *psc;
if_t ifp1, ifp2;
if (!sc->dualemac)
return (1);
psc = device_get_softc(sc->port[0].dev);
ifp1 = psc->ifp;
psc = device_get_softc(sc->port[1].dev);
ifp2 = psc->ifp;
if ((if_getflags(ifp1) & IFF_UP) == 0 && (if_getflags(ifp2) & IFF_UP) == 0)
return (1);
return (0);
}
static void
cpswp_init(void *arg)
{
struct cpswp_softc *sc = arg;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK(sc);
cpswp_init_locked(arg);
CPSW_PORT_UNLOCK(sc);
}
static void
cpswp_init_locked(void *arg)
{
#ifdef CPSW_ETHERSWITCH
int i;
#endif
struct cpswp_softc *sc = arg;
if_t ifp;
uint32_t reg;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK_ASSERT(sc);
ifp = sc->ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
getbinuptime(&sc->init_uptime);
if (!sc->swsc->rx.running && !sc->swsc->tx.running) {
/* Reset the controller. */
cpsw_reset(sc->swsc);
cpsw_init(sc->swsc);
}
/* Set Slave Mapping. */
cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210);
cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1),
0x33221100);
cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2);
/* Enable MAC RX/TX modules. */
/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
/* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
reg |= CPSW_SL_MACTL_GMII_ENABLE;
cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
/* Initialize ALE: set port to forwarding, initialize addrs */
cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1),
ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
cpswp_ale_update_addresses(sc, 1);
if (sc->swsc->dualemac) {
/* Set Port VID. */
cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1),
sc->vlan & 0xfff);
cpsw_ale_update_vlan_table(sc->swsc, sc->vlan,
(1 << (sc->unit + 1)) | (1 << 0), /* Member list */
(1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */
(1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */
#ifdef CPSW_ETHERSWITCH
for (i = 0; i < CPSW_VLANS; i++) {
if (cpsw_vgroups[i].vid != -1)
continue;
cpsw_vgroups[i].vid = sc->vlan;
break;
}
#endif
}
mii_mediachg(sc->mii);
callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
static int
cpsw_shutdown(device_t dev)
{
struct cpsw_softc *sc;
struct cpswp_softc *psc;
int i;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc, (""));
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
psc = device_get_softc(sc->port[i].dev);
CPSW_PORT_LOCK(psc);
cpswp_stop_locked(psc);
CPSW_PORT_UNLOCK(psc);
}
return (0);
}
static void
cpsw_rx_teardown(struct cpsw_softc *sc)
{
int i = 0;
CPSW_RX_LOCK(sc);
CPSW_DEBUGF(sc, ("starting RX teardown"));
sc->rx.teardown = 1;
cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
CPSW_RX_UNLOCK(sc);
while (sc->rx.running) {
if (++i > 10) {
device_printf(sc->dev,
"Unable to cleanly shutdown receiver\n");
return;
}
DELAY(200);
}
if (!sc->rx.running)
CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i));
}
static void
cpsw_tx_teardown(struct cpsw_softc *sc)
{
int i = 0;
CPSW_TX_LOCK(sc);
CPSW_DEBUGF(sc, ("starting TX teardown"));
/* Start the TX queue teardown if queue is not empty. */
if (STAILQ_FIRST(&sc->tx.active) != NULL)
cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
else
sc->tx.teardown = 1;
cpsw_tx_dequeue(sc);
while (sc->tx.running && ++i < 10) {
DELAY(200);
cpsw_tx_dequeue(sc);
}
if (sc->tx.running) {
device_printf(sc->dev,
"Unable to cleanly shutdown transmitter\n");
}
CPSW_DEBUGF(sc,
("finished TX teardown (%d retries, %d idle buffers)", i,
sc->tx.active_queue_len));
CPSW_TX_UNLOCK(sc);
}
static void
cpswp_stop_locked(struct cpswp_softc *sc)
{
if_t ifp;
uint32_t reg;
ifp = sc->ifp;
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
/* Disable interface */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
/* Stop ticker */
callout_stop(&sc->mii_callout);
/* Tear down the RX/TX queues. */
if (cpsw_ports_down(sc->swsc)) {
cpsw_rx_teardown(sc->swsc);
cpsw_tx_teardown(sc->swsc);
}
/* Stop MAC RX/TX modules. */
reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
reg &= ~CPSW_SL_MACTL_GMII_ENABLE;
cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
if (cpsw_ports_down(sc->swsc)) {
/* Capture stats before we reset controller. */
cpsw_stats_collect(sc->swsc);
cpsw_reset(sc->swsc);
cpsw_init(sc->swsc);
}
}
/*
* Suspend/Resume.
*/
static int
cpsw_suspend(device_t dev)
{
struct cpsw_softc *sc;
struct cpswp_softc *psc;
int i;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc, (""));
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
psc = device_get_softc(sc->port[i].dev);
CPSW_PORT_LOCK(psc);
cpswp_stop_locked(psc);
CPSW_PORT_UNLOCK(psc);
}
return (0);
}
static int
cpsw_resume(device_t dev)
{
struct cpsw_softc *sc;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc, ("UNIMPLEMENTED"));
return (0);
}
/*
*
* IOCTL
*
*/
static void
cpsw_set_promisc(struct cpswp_softc *sc, int set)
{
uint32_t reg;
/*
* Enabling promiscuous mode requires ALE_BYPASS to be enabled.
* That disables the ALE forwarding logic and causes every
* packet to be sent only to the host port. In bypass mode,
* the ALE processes host port transmit packets the same as in
* normal mode.
*/
reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL);
reg &= ~CPSW_ALE_CTL_BYPASS;
if (set)
reg |= CPSW_ALE_CTL_BYPASS;
cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg);
}
static void
cpsw_set_allmulti(struct cpswp_softc *sc, int set)
{
if (set) {
printf("All-multicast mode unimplemented\n");
}
}
static int
cpswp_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct cpswp_softc *sc;
struct ifreq *ifr;
int error;
uint32_t changed;
error = 0;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFCAP:
changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if (changed & IFCAP_HWCSUM) {
if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
else
if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
}
error = 0;
break;
case SIOCSIFFLAGS:
CPSW_PORT_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
changed = if_getflags(ifp) ^ sc->if_flags;
CPSW_DEBUGF(sc->swsc,
("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)",
changed));
if (changed & IFF_PROMISC)
cpsw_set_promisc(sc,
if_getflags(ifp) & IFF_PROMISC);
if (changed & IFF_ALLMULTI)
cpsw_set_allmulti(sc,
if_getflags(ifp) & IFF_ALLMULTI);
} else {
CPSW_DEBUGF(sc->swsc,
("SIOCSIFFLAGS: starting up"));
cpswp_init_locked(sc);
}
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down"));
cpswp_stop_locked(sc);
}
sc->if_flags = if_getflags(ifp);
CPSW_PORT_UNLOCK(sc);
break;
case SIOCADDMULTI:
cpswp_ale_update_addresses(sc, 0);
break;
case SIOCDELMULTI:
/* Ugh. DELMULTI doesn't provide the specific address
being removed, so the best we can do is remove
everything and rebuild it all. */
cpswp_ale_update_addresses(sc, 1);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
/*
*
* MIIBUS
*
*/
static int
cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg)
{
uint32_t r, retries = CPSW_MIIBUS_RETRIES;
while (--retries) {
r = cpsw_read_4(sc, reg);
if ((r & MDIO_PHYACCESS_GO) == 0)
return (1);
DELAY(CPSW_MIIBUS_DELAY);
}
return (0);
}
static int
cpswp_miibus_readreg(device_t dev, int phy, int reg)
{
struct cpswp_softc *sc;
uint32_t cmd, r;
sc = device_get_softc(dev);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO not ready to read\n");
return (0);
}
/* Set GO, reg, phy */
cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO timed out during read\n");
return (0);
}
r = cpsw_read_4(sc->swsc, sc->phyaccess);
if ((r & MDIO_PHYACCESS_ACK) == 0) {
device_printf(dev, "Failed to read from PHY.\n");
r = 0;
}
return (r & 0xFFFF);
}
static int
cpswp_miibus_writereg(device_t dev, int phy, int reg, int value)
{
struct cpswp_softc *sc;
uint32_t cmd;
sc = device_get_softc(dev);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO not ready to write\n");
return (0);
}
/* Set GO, WRITE, reg, phy, and value */
cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE |
(reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF);
cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
device_printf(dev, "MDIO timed out during write\n");
return (0);
}
return (0);
}
static void
cpswp_miibus_statchg(device_t dev)
{
struct cpswp_softc *sc;
uint32_t mac_control, reg;
sc = device_get_softc(dev);
CPSW_DEBUGF(sc->swsc, (""));
reg = CPSW_SL_MACCONTROL(sc->unit);
mac_control = cpsw_read_4(sc->swsc, reg);
mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A |
CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX);
switch(IFM_SUBTYPE(sc->mii->mii_media_active)) {
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
case IFM_1000_T:
mac_control |= CPSW_SL_MACTL_GIG;
break;
case IFM_100_TX:
mac_control |= CPSW_SL_MACTL_IFCTL_A;
break;
}
if (sc->mii->mii_media_active & IFM_FDX)
mac_control |= CPSW_SL_MACTL_FULLDUPLEX;
cpsw_write_4(sc->swsc, reg, mac_control);
}
/*
*
* Transmit/Receive Packets.
*
*/
static void
cpsw_intr_rx(void *arg)
{
struct cpsw_softc *sc;
if_t ifp;
struct mbuf *received, *next;
sc = (struct cpsw_softc *)arg;
CPSW_RX_LOCK(sc);
if (sc->rx.teardown) {
sc->rx.running = 0;
sc->rx.teardown = 0;
cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
}
received = cpsw_rx_dequeue(sc);
cpsw_rx_enqueue(sc);
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
CPSW_RX_UNLOCK(sc);
while (received != NULL) {
next = received->m_nextpkt;
received->m_nextpkt = NULL;
ifp = received->m_pkthdr.rcvif;
if_input(ifp, received);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
received = next;
}
}
static struct mbuf *
cpsw_rx_dequeue(struct cpsw_softc *sc)
{
int nsegs, port, removed;
struct cpsw_cpdma_bd bd;
struct cpsw_slot *last, *slot;
struct cpswp_softc *psc;
struct mbuf *m, *m0, *mb_head, *mb_tail;
uint16_t m0_flags;
nsegs = 0;
m0 = NULL;
last = NULL;
mb_head = NULL;
mb_tail = NULL;
removed = 0;
/* Pull completed packets off hardware RX queue. */
while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
cpsw_cpdma_read_bd(sc, slot, &bd);
/*
* Stop on packets still in use by hardware, but do not stop
* on packets with the teardown complete flag, they will be
* discarded later.
*/
if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) ==
CPDMA_BD_OWNER)
break;
last = slot;
++removed;
STAILQ_REMOVE_HEAD(&sc->rx.active, next);
STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
m = slot->mbuf;
slot->mbuf = NULL;
if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
CPSW_DEBUGF(sc, ("RX teardown is complete"));
m_freem(m);
sc->rx.running = 0;
sc->rx.teardown = 0;
break;
}
port = (bd.flags & CPDMA_BD_PORT_MASK) - 1;
KASSERT(port >= 0 && port <= 1,
("patcket received with invalid port: %d", port));
psc = device_get_softc(sc->port[port].dev);
/* Set up mbuf */
m->m_data += bd.bufoff;
m->m_len = bd.buflen;
if (bd.flags & CPDMA_BD_SOP) {
m->m_pkthdr.len = bd.pktlen;
m->m_pkthdr.rcvif = psc->ifp;
m->m_flags |= M_PKTHDR;
m0_flags = bd.flags;
m0 = m;
}
nsegs++;
m->m_next = NULL;
m->m_nextpkt = NULL;
if (bd.flags & CPDMA_BD_EOP && m0 != NULL) {
if (m0_flags & CPDMA_BD_PASS_CRC)
m_adj(m0, -ETHER_CRC_LEN);
m0_flags = 0;
m0 = NULL;
if (nsegs > sc->rx.longest_chain)
sc->rx.longest_chain = nsegs;
nsegs = 0;
}
if ((if_getcapenable(psc->ifp) & IFCAP_RXCSUM) != 0) {
/* check for valid CRC by looking into pkt_err[5:4] */
if ((bd.flags &
(CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) ==
CPDMA_BD_SOP) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
m->m_pkthdr.csum_data = 0xffff;
}
}
if (STAILQ_FIRST(&sc->rx.active) != NULL &&
(bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) ==
(CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
cpsw_write_hdp_slot(sc, &sc->rx,
STAILQ_FIRST(&sc->rx.active));
sc->rx.queue_restart++;
}
/* Add mbuf to packet list to be returned. */
if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) {
mb_tail->m_nextpkt = m;
} else if (mb_tail != NULL) {
mb_tail->m_next = m;
} else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) {
if (bootverbose)
printf(
"%s: %s: discanding fragment packet w/o header\n",
__func__, if_name(psc->ifp));
m_freem(m);
continue;
} else {
mb_head = m;
}
mb_tail = m;
}
if (removed != 0) {
cpsw_write_cp_slot(sc, &sc->rx, last);
sc->rx.queue_removes += removed;
sc->rx.avail_queue_len += removed;
sc->rx.active_queue_len -= removed;
if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed));
}
return (mb_head);
}
static void
cpsw_rx_enqueue(struct cpsw_softc *sc)
{
bus_dma_segment_t seg[1];
struct cpsw_cpdma_bd bd;
struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot;
int error, nsegs, added = 0;
/* Register new mbufs with hardware. */
first_new_slot = NULL;
last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
if (first_new_slot == NULL)
first_new_slot = slot;
if (slot->mbuf == NULL) {
slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (slot->mbuf == NULL) {
device_printf(sc->dev,
"Unable to fill RX queue\n");
break;
}
slot->mbuf->m_len =
slot->mbuf->m_pkthdr.len =
slot->mbuf->m_ext.ext_size;
}
error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
KASSERT(error == 0, ("DMA error (error=%d)", error));
if (error != 0 || nsegs != 1) {
device_printf(sc->dev,
"%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
__func__, nsegs, error);
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
m_freem(slot->mbuf);
slot->mbuf = NULL;
break;
}
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
/* Create and submit new rx descriptor. */
if ((next = STAILQ_NEXT(slot, next)) != NULL)
bd.next = cpsw_cpdma_bd_paddr(sc, next);
else
bd.next = 0;
bd.bufptr = seg->ds_addr;
bd.bufoff = 0;
bd.buflen = MCLBYTES - 1;
bd.pktlen = bd.buflen;
bd.flags = CPDMA_BD_OWNER;
cpsw_cpdma_write_bd(sc, slot, &bd);
++added;
STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
STAILQ_INSERT_TAIL(&sc->rx.active, slot, next);
}
if (added == 0 || first_new_slot == NULL)
return;
CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added));
/* Link new entries to hardware RX queue. */
if (last_old_slot == NULL) {
/* Start a fresh queue. */
cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
} else {
/* Add buffers to end of current queue. */
cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
}
sc->rx.queue_adds += added;
sc->rx.avail_queue_len -= added;
sc->rx.active_queue_len += added;
cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added);
if (sc->rx.active_queue_len > sc->rx.max_active_queue_len)
sc->rx.max_active_queue_len = sc->rx.active_queue_len;
}
static void
cpswp_start(if_t ifp)
{
struct cpswp_softc *sc;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
sc->swsc->tx.running == 0) {
return;
}
CPSW_TX_LOCK(sc->swsc);
cpswp_tx_enqueue(sc);
cpsw_tx_dequeue(sc->swsc);
CPSW_TX_UNLOCK(sc->swsc);
}
static void
cpsw_intr_tx(void *arg)
{
struct cpsw_softc *sc;
sc = (struct cpsw_softc *)arg;
CPSW_TX_LOCK(sc);
if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc)
cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
cpsw_tx_dequeue(sc);
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
CPSW_TX_UNLOCK(sc);
}
static void
cpswp_tx_enqueue(struct cpswp_softc *sc)
{
bus_dma_segment_t segs[CPSW_TXFRAGS];
struct cpsw_cpdma_bd bd;
struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot;
struct mbuf *m0;
int error, nsegs, seg, added = 0, padlen;
/* Pull pending packets from IF queue and prep them for DMA. */
last = NULL;
first_new_slot = NULL;
last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next);
while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) {
m0 = if_dequeue(sc->ifp);
if (m0 == NULL)
break;
slot->mbuf = m0;
padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len;
if (padlen < 0)
padlen = 0;
else if (padlen > 0)
m_append(slot->mbuf, padlen, sc->swsc->nullpad);
/* Create mapping in DMA memory */
error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag,
slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
/* If the packet is too fragmented, try to simplify. */
if (error == EFBIG ||
(error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) {
bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
m0 = m_defrag(slot->mbuf, M_NOWAIT);
if (m0 == NULL) {
device_printf(sc->dev,
"Can't defragment packet; dropping\n");
m_freem(slot->mbuf);
} else {
CPSW_DEBUGF(sc->swsc,
("Requeueing defragmented packet"));
if_sendq_prepend(sc->ifp, m0);
}
slot->mbuf = NULL;
continue;
}
if (error != 0) {
device_printf(sc->dev,
"%s: Can't setup DMA (error=%d), dropping packet\n",
__func__, error);
bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
m_freem(slot->mbuf);
slot->mbuf = NULL;
break;
}
bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap,
BUS_DMASYNC_PREWRITE);
CPSW_DEBUGF(sc->swsc,
("Queueing TX packet: %d segments + %d pad bytes",
nsegs, padlen));
if (first_new_slot == NULL)
first_new_slot = slot;
/* Link from the previous descriptor. */
if (last != NULL)
cpsw_cpdma_write_bd_next(sc->swsc, last, slot);
slot->ifp = sc->ifp;
/* If there is only one segment, the for() loop
* gets skipped and the single buffer gets set up
* as both SOP and EOP. */
if (nsegs > 1) {
next = STAILQ_NEXT(slot, next);
bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
} else
bd.next = 0;
/* Start by setting up the first buffer. */
bd.bufptr = segs[0].ds_addr;
bd.bufoff = 0;
bd.buflen = segs[0].ds_len;
bd.pktlen = m_length(slot->mbuf, NULL);
bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER;
if (sc->swsc->dualemac) {
bd.flags |= CPDMA_BD_TO_PORT;
bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
}
for (seg = 1; seg < nsegs; ++seg) {
/* Save the previous buffer (which isn't EOP) */
cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
slot = STAILQ_FIRST(&sc->swsc->tx.avail);
/* Setup next buffer (which isn't SOP) */
if (nsegs > seg + 1) {
next = STAILQ_NEXT(slot, next);
bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
} else
bd.next = 0;
bd.bufptr = segs[seg].ds_addr;
bd.bufoff = 0;
bd.buflen = segs[seg].ds_len;
bd.pktlen = 0;
bd.flags = CPDMA_BD_OWNER;
}
/* Save the final buffer. */
bd.flags |= CPDMA_BD_EOP;
cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
last = slot;
added += nsegs;
if (nsegs > sc->swsc->tx.longest_chain)
sc->swsc->tx.longest_chain = nsegs;
BPF_MTAP(sc->ifp, m0);
}
if (first_new_slot == NULL)
return;
/* Attach the list of new buffers to the hardware TX queue. */
if (last_old_slot != NULL &&
(cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) &
CPDMA_BD_EOQ) == 0) {
/* Add buffers to end of current queue. */
cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot,
first_new_slot);
} else {
/* Start a fresh queue. */
cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot);
}
sc->swsc->tx.queue_adds += added;
sc->swsc->tx.avail_queue_len -= added;
sc->swsc->tx.active_queue_len += added;
if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) {
sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len;
}
CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added));
}
static int
cpsw_tx_dequeue(struct cpsw_softc *sc)
{
struct cpsw_slot *slot, *last_removed_slot = NULL;
struct cpsw_cpdma_bd bd;
uint32_t flags, removed = 0;
/* Pull completed buffers off the hardware TX queue. */
slot = STAILQ_FIRST(&sc->tx.active);
while (slot != NULL) {
flags = cpsw_cpdma_read_bd_flags(sc, slot);
/* TearDown complete is only marked on the SOP for the packet. */
if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) ==
(CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) {
sc->tx.teardown = 1;
}
if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) ==
(CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0)
break; /* Hardware is still using this packet. */
bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
m_freem(slot->mbuf);
slot->mbuf = NULL;
if (slot->ifp) {
if (sc->tx.teardown == 0)
if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1);
else
if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1);
}
/* Dequeue any additional buffers used by this packet. */
while (slot != NULL && slot->mbuf == NULL) {
STAILQ_REMOVE_HEAD(&sc->tx.active, next);
STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
++removed;
last_removed_slot = slot;
slot = STAILQ_FIRST(&sc->tx.active);
}
cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
/* Restart the TX queue if necessary. */
cpsw_cpdma_read_bd(sc, last_removed_slot, &bd);
if (slot != NULL && bd.next != 0 && (bd.flags &
(CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) ==
(CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
cpsw_write_hdp_slot(sc, &sc->tx, slot);
sc->tx.queue_restart++;
break;
}
}
if (removed != 0) {
sc->tx.queue_removes += removed;
sc->tx.active_queue_len -= removed;
sc->tx.avail_queue_len += removed;
if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed));
}
if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) {
CPSW_DEBUGF(sc, ("TX teardown is complete"));
sc->tx.teardown = 0;
sc->tx.running = 0;
}
return (removed);
}
/*
*
* Miscellaneous interrupts.
*
*/
static void
cpsw_intr_rx_thresh(void *arg)
{
struct cpsw_softc *sc;
if_t ifp;
struct mbuf *received, *next;
sc = (struct cpsw_softc *)arg;
CPSW_RX_LOCK(sc);
received = cpsw_rx_dequeue(sc);
cpsw_rx_enqueue(sc);
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
CPSW_RX_UNLOCK(sc);
while (received != NULL) {
next = received->m_nextpkt;
received->m_nextpkt = NULL;
ifp = received->m_pkthdr.rcvif;
if_input(ifp, received);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
received = next;
}
}
static void
cpsw_intr_misc_host_error(struct cpsw_softc *sc)
{
uint32_t intstat;
uint32_t dmastat;
int txerr, rxerr, txchan, rxchan;
printf("\n\n");
device_printf(sc->dev,
"HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n");
printf("\n\n");
intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
txerr = (dmastat >> 20) & 15;
txchan = (dmastat >> 16) & 7;
rxerr = (dmastat >> 12) & 15;
rxchan = (dmastat >> 8) & 7;
switch (txerr) {
case 0: break;
case 1: printf("SOP error on TX channel %d\n", txchan);
break;
case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
break;
case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
break;
case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan);
break;
case 5: printf("Zero Buffer Length on TX channel %d\n", txchan);
break;
case 6: printf("Packet length error on TX channel %d\n", txchan);
break;
default: printf("Unknown error on TX channel %d\n", txchan);
break;
}
if (txerr != 0) {
printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
cpsw_dump_queue(sc, &sc->tx.active);
}
switch (rxerr) {
case 0: break;
case 2: printf("Ownership bit not set on RX channel %d\n", rxchan);
break;
case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
break;
case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan);
break;
case 6: printf("Buffer offset too big on RX channel %d\n", rxchan);
break;
default: printf("Unknown RX error on RX channel %d\n", rxchan);
break;
}
if (rxerr != 0) {
printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
cpsw_dump_queue(sc, &sc->rx.active);
}
printf("\nALE Table\n");
cpsw_ale_dump_table(sc);
// XXX do something useful here??
panic("CPSW HOST ERROR INTERRUPT");
// Suppress this interrupt in the future.
cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
// The watchdog will probably reset the controller
// in a little while. It will probably fail again.
}
static void
cpsw_intr_misc(void *arg)
{
struct cpsw_softc *sc = arg;
uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
if (stat & CPSW_WR_C_MISC_EVNT_PEND)
CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented"));
if (stat & CPSW_WR_C_MISC_STAT_PEND)
cpsw_stats_collect(sc);
if (stat & CPSW_WR_C_MISC_HOST_PEND)
cpsw_intr_misc_host_error(sc);
if (stat & CPSW_WR_C_MISC_MDIOLINK) {
cpsw_write_4(sc, MDIOLINKINTMASKED,
cpsw_read_4(sc, MDIOLINKINTMASKED));
}
if (stat & CPSW_WR_C_MISC_MDIOUSER) {
CPSW_DEBUGF(sc,
("MDIO operation completed interrupt unimplemented"));
}
cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
}
/*
*
* Periodic Checks and Watchdog.
*
*/
static void
cpswp_tick(void *msc)
{
struct cpswp_softc *sc = msc;
/* Check for media type change */
mii_tick(sc->mii);
if (sc->media_status != sc->mii->mii_media.ifm_media) {
printf("%s: media type changed (ifm_media=%x)\n", __func__,
sc->mii->mii_media.ifm_media);
cpswp_ifmedia_upd(sc->ifp);
}
/* Schedule another timeout one second from now */
callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
}
static void
cpswp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct cpswp_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK(sc);
mii = sc->mii;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
CPSW_PORT_UNLOCK(sc);
}
static int
cpswp_ifmedia_upd(if_t ifp)
{
struct cpswp_softc *sc;
sc = if_getsoftc(ifp);
CPSW_DEBUGF(sc->swsc, (""));
CPSW_PORT_LOCK(sc);
mii_mediachg(sc->mii);
sc->media_status = sc->mii->mii_media.ifm_media;
CPSW_PORT_UNLOCK(sc);
return (0);
}
static void
cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
{
struct cpswp_softc *psc;
int i;
cpsw_debugf_head("CPSW watchdog");
device_printf(sc->dev, "watchdog timeout\n");
printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0,
cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)));
printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0,
cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)));
cpsw_dump_queue(sc, &sc->tx.active);
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
psc = device_get_softc(sc->port[i].dev);
CPSW_PORT_LOCK(psc);
cpswp_stop_locked(psc);
CPSW_PORT_UNLOCK(psc);
}
}
static void
cpsw_tx_watchdog(void *msc)
{
struct cpsw_softc *sc;
sc = msc;
CPSW_TX_LOCK(sc);
if (sc->tx.active_queue_len == 0 || !sc->tx.running) {
sc->watchdog.timer = 0; /* Nothing to do. */
} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */
} else if (cpsw_tx_dequeue(sc) > 0) {
sc->watchdog.timer = 0; /* We just did something. */
} else {
/* There was something to do but it didn't get done. */
++sc->watchdog.timer;
if (sc->watchdog.timer > 5) {
sc->watchdog.timer = 0;
++sc->watchdog.resets;
cpsw_tx_watchdog_full_reset(sc);
}
}
sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
CPSW_TX_UNLOCK(sc);
/* Schedule another timeout one second from now */
callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
}
/*
*
* ALE support routines.
*
*/
static void
cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
{
cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
}
static void
cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
{
cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
}
static void
cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
{
int i;
uint32_t ale_entry[3];
/* First four entries are link address and broadcast. */
for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR ||
ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) &&
ALE_MCAST(ale_entry) == 1) { /* MCast link addr */
ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
cpsw_ale_write_entry(sc, i, ale_entry);
}
}
}
static int
cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan,
uint8_t *mac)
{
int free_index = -1, matching_index = -1, i;
uint32_t ale_entry[3], ale_type;
/* Find a matching entry or a free entry. */
for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
/* Entry Type[61:60] is 0 for free entry */
if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
free_index = i;
if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
(((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
(((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
(((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
(((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
(((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
matching_index = i;
break;
}
}
if (matching_index < 0) {
if (free_index < 0)
return (ENOMEM);
i = free_index;
}
if (vlan != -1)
ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16;
else
ale_type = ALE_TYPE_ADDR << 28;
/* Set MAC address */
ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
ale_entry[1] = mac[0] << 8 | mac[1];
/* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */
ale_entry[1] |= ALE_MCAST_FWD | ale_type;
/* Set portmask [68:66] */
ale_entry[2] = (portmap & 7) << 2;
cpsw_ale_write_entry(sc, i, ale_entry);
return 0;
}
static void
cpsw_ale_dump_table(struct cpsw_softc *sc) {
int i;
uint32_t ale_entry[3];
for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
switch (ALE_TYPE(ale_entry)) {
case ALE_TYPE_VLAN:
printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
ale_entry[1], ale_entry[0]);
printf("type: %u ", ALE_TYPE(ale_entry));
printf("vlan: %u ", ALE_VLAN(ale_entry));
printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry));
printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry));
printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry));
printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry));
printf("\n");
break;
case ALE_TYPE_ADDR:
case ALE_TYPE_VLAN_ADDR:
printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
ale_entry[1], ale_entry[0]);
printf("type: %u ", ALE_TYPE(ale_entry));
printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
(ale_entry[1] >> 8) & 0xFF,
(ale_entry[1] >> 0) & 0xFF,
(ale_entry[0] >>24) & 0xFF,
(ale_entry[0] >>16) & 0xFF,
(ale_entry[0] >> 8) & 0xFF,
(ale_entry[0] >> 0) & 0xFF);
printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast ");
if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR)
printf("vlan: %u ", ALE_VLAN(ale_entry));
printf("port: %u ", ALE_PORTS(ale_entry));
printf("\n");
break;
}
}
printf("\n");
}
static u_int
cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct cpswp_softc *sc = arg;
uint32_t portmask;
if (sc->swsc->dualemac)
portmask = 1 << (sc->unit + 1) | 1 << 0;
else
portmask = 7;
cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl));
return (1);
}
static int
cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge)
{
uint8_t *mac;
uint32_t ale_entry[3], ale_type, portmask;
if (sc->swsc->dualemac) {
ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16;
portmask = 1 << (sc->unit + 1) | 1 << 0;
} else {
ale_type = ALE_TYPE_ADDR << 28;
portmask = 7;
}
/*
* Route incoming packets for our MAC address to Port 0 (host).
* For simplicity, keep this entry at table index 0 for port 1 and
* at index 2 for port 2 in the ALE.
*/
mac = LLADDR((struct sockaddr_dl *)if_getifaddr(sc->ifp)->ifa_addr);
ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */
ale_entry[2] = 0; /* port = 0 */
cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry);
/* Set outgoing MAC Address for slave port. */
cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1),
mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1),
mac[5] << 8 | mac[4]);
/* Keep the broadcast address at table entry 1 (or 3). */
ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
/* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */
ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff;
ale_entry[2] = portmask << 2;
cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry);
/* SIOCDELMULTI doesn't specify the particular address
being removed, so we have to remove all and rebuild. */
if (purge)
cpsw_ale_remove_all_mc_entries(sc->swsc);
/* Set other multicast addrs desired. */
if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc);
return (0);
}
static int
cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports,
int untag, int mcregflood, int mcunregflood)
{
int free_index, i, matching_index;
uint32_t ale_entry[3];
free_index = matching_index = -1;
/* Find a matching entry or a free entry. */
for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
/* Entry Type[61:60] is 0 for free entry */
if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
free_index = i;
if (ALE_VLAN(ale_entry) == vlan) {
matching_index = i;
break;
}
}
if (matching_index < 0) {
if (free_index < 0)
return (-1);
i = free_index;
}
ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 |
(mcunregflood & 7) << 8 | (ports & 7);
ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16;
ale_entry[2] = 0;
cpsw_ale_write_entry(sc, i, ale_entry);
return (0);
}
/*
*
* Statistics and Sysctls.
*
*/
#if 0
static void
cpsw_stats_dump(struct cpsw_softc *sc)
{
int i;
uint32_t r;
for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
cpsw_stat_sysctls[i].reg);
CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
(intmax_t)sc->shadow_stats[i], r,
(intmax_t)sc->shadow_stats[i] + r));
}
}
#endif
static void
cpsw_stats_collect(struct cpsw_softc *sc)
{
int i;
uint32_t r;
CPSW_DEBUGF(sc, ("Controller shadow statistics updated."));
for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
cpsw_stat_sysctls[i].reg);
sc->shadow_stats[i] += r;
cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg,
r);
}
}
static int
cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
{
struct cpsw_softc *sc;
struct cpsw_stat *stat;
uint64_t result;
sc = (struct cpsw_softc *)arg1;
stat = &cpsw_stat_sysctls[oidp->oid_number];
result = sc->shadow_stats[oidp->oid_number];
result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
return (sysctl_handle_64(oidp, &result, 0, req));
}
static int
cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
{
struct cpsw_softc *sc;
struct bintime t;
unsigned result;
sc = (struct cpsw_softc *)arg1;
getbinuptime(&t);
bintime_sub(&t, &sc->attach_uptime);
result = t.sec;
return (sysctl_handle_int(oidp, &result, 0, req));
}
static int
cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS)
{
int error;
struct cpsw_softc *sc;
uint32_t ctrl, intr_per_ms;
sc = (struct cpsw_softc *)arg1;
error = sysctl_handle_int(oidp, &sc->coal_us, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
if (sc->coal_us == 0) {
/* Disable the interrupt pace hardware. */
cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0);
cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0);
return (0);
}
if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX)
sc->coal_us = CPSW_WR_C_IMAX_US_MAX;
if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN)
sc->coal_us = CPSW_WR_C_IMAX_US_MIN;
intr_per_ms = 1000 / sc->coal_us;
/* Just to make sure... */
if (intr_per_ms > CPSW_WR_C_IMAX_MAX)
intr_per_ms = CPSW_WR_C_IMAX_MAX;
if (intr_per_ms < CPSW_WR_C_IMAX_MIN)
intr_per_ms = CPSW_WR_C_IMAX_MIN;
/* Set the prescale to produce 4us pulses from the 125 Mhz clock. */
ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK;
/* Enable the interrupt pace hardware. */
cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms);
cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms);
ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE;
cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
return (0);
}
static int
cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
{
struct cpsw_softc *swsc;
struct cpswp_softc *sc;
struct bintime t;
unsigned result;
swsc = arg1;
sc = device_get_softc(swsc->port[arg2].dev);
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
getbinuptime(&t);
bintime_sub(&t, &sc->init_uptime);
result = t.sec;
} else
result = 0;
return (sysctl_handle_int(oidp, &result, 0, req));
}
static void
cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
struct cpsw_queue *queue)
{
struct sysctl_oid_list *parent;
parent = SYSCTL_CHILDREN(node);
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
CTLFLAG_RD, &queue->queue_slots, 0,
"Total buffers currently assigned to this queue");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
CTLFLAG_RD, &queue->active_queue_len, 0,
"Buffers currently registered with hardware controller");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
CTLFLAG_RD, &queue->max_active_queue_len, 0,
"Max value of activeBuffers since last driver reset");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
CTLFLAG_RD, &queue->avail_queue_len, 0,
"Buffers allocated to this queue but not currently "
"registered with hardware controller");
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
CTLFLAG_RD, &queue->max_avail_queue_len, 0,
"Max value of availBuffers since last driver reset");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
CTLFLAG_RD, &queue->queue_adds, 0,
"Total buffers added to queue");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
CTLFLAG_RD, &queue->queue_removes, 0,
"Total buffers removed from queue");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart",
CTLFLAG_RD, &queue->queue_restart, 0,
"Total times the queue has been restarted");
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
CTLFLAG_RD, &queue->longest_chain, 0,
"Max buffers used for a single packet");
}
static void
cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
struct cpsw_softc *sc)
{
struct sysctl_oid_list *parent;
parent = SYSCTL_CHILDREN(node);
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
CTLFLAG_RD, &sc->watchdog.resets, 0,
"Total number of watchdog resets");
}
static void
cpsw_add_sysctls(struct cpsw_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *stats_node, *queue_node, *node;
struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
struct sysctl_oid_list *ports_parent, *port_parent;
char port[16];
int i;
ctx = device_get_sysctl_ctx(sc->dev);
parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug",
CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
sc, 0, cpsw_stat_attached, "IU",
"Time since driver attach");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
sc, 0, cpsw_intr_coalesce, "IU",
"minimum time between interrupts");
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics");
ports_parent = SYSCTL_CHILDREN(node);
for (i = 0; i < CPSW_PORTS; i++) {
if (!sc->dualemac && i != sc->active_slave)
continue;
port[0] = '0' + i;
port[1] = '\0';
node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO,
port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"CPSW Port Statistics");
port_parent = SYSCTL_CHILDREN(node);
SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i,
cpsw_stat_uptime, "IU", "Seconds since driver init");
}
stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics");
stats_parent = SYSCTL_CHILDREN(stats_node);
for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
SYSCTL_ADD_PROC(ctx, stats_parent, i,
cpsw_stat_sysctls[i].oid,
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
sc, 0, cpsw_stats_sysctl, "IU",
cpsw_stat_sysctls[i].oid);
}
queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics");
queue_parent = SYSCTL_CHILDREN(queue_node);
node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics");
cpsw_add_queue_sysctls(ctx, node, &sc->tx);
node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics");
cpsw_add_queue_sysctls(ctx, node, &sc->rx);
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics");
cpsw_add_watchdog_sysctls(ctx, node, sc);
}
#ifdef CPSW_ETHERSWITCH
static etherswitch_info_t etherswitch_info = {
.es_nports = CPSW_PORTS + 1,
.es_nvlangroups = CPSW_VLANS,
.es_name = "TI Common Platform Ethernet Switch (CPSW)",
.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q,
};
static etherswitch_info_t *
cpsw_getinfo(device_t dev)
{
return (&etherswitch_info);
}
static int
cpsw_getport(device_t dev, etherswitch_port_t *p)
{
int err;
struct cpsw_softc *sc;
struct cpswp_softc *psc;
struct ifmediareq *ifmr;
uint32_t reg;
if (p->es_port < 0 || p->es_port > CPSW_PORTS)
return (ENXIO);
err = 0;
sc = device_get_softc(dev);
if (p->es_port == CPSW_CPU_PORT) {
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr = &p->es_ifmr;
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
ifmr->ifm_count = 0;
} else {
psc = device_get_softc(sc->port[p->es_port - 1].dev);
err = ifmedia_ioctl(psc->ifp, &p->es_ifr,
&psc->mii->mii_media, SIOCGIFMEDIA);
}
reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port));
p->es_pvid = reg & ETHERSWITCH_VID_MASK;
reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
if (reg & ALE_PORTCTL_DROP_UNTAGGED)
p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
if (reg & ALE_PORTCTL_INGRESS)
p->es_flags |= ETHERSWITCH_PORT_INGRESS;
return (err);
}
static int
cpsw_setport(device_t dev, etherswitch_port_t *p)
{
struct cpsw_softc *sc;
struct cpswp_softc *psc;
struct ifmedia *ifm;
uint32_t reg;
if (p->es_port < 0 || p->es_port > CPSW_PORTS)
return (ENXIO);
sc = device_get_softc(dev);
if (p->es_pvid != 0) {
cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port),
p->es_pvid & ETHERSWITCH_VID_MASK);
}
reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
reg |= ALE_PORTCTL_DROP_UNTAGGED;
else
reg &= ~ALE_PORTCTL_DROP_UNTAGGED;
if (p->es_flags & ETHERSWITCH_PORT_INGRESS)
reg |= ALE_PORTCTL_INGRESS;
else
reg &= ~ALE_PORTCTL_INGRESS;
cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg);
/* CPU port does not allow media settings. */
if (p->es_port == CPSW_CPU_PORT)
return (0);
psc = device_get_softc(sc->port[p->es_port - 1].dev);
ifm = &psc->mii->mii_media;
return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
}
static int
cpsw_getconf(device_t dev, etherswitch_conf_t *conf)
{
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
return (0);
}
static int
cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
int i, vid;
uint32_t ale_entry[3];
struct cpsw_softc *sc;
sc = device_get_softc(dev);
if (vg->es_vlangroup >= CPSW_VLANS)
return (EINVAL);
vg->es_vid = 0;
vid = cpsw_vgroups[vg->es_vlangroup].vid;
if (vid == -1)
return (0);
for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
continue;
if (vid != ALE_VLAN(ale_entry))
continue;
vg->es_fid = 0;
vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID;
vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry);
vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry);
}
return (0);
}
static void
cpsw_remove_vlan(struct cpsw_softc *sc, int vlan)
{
int i;
uint32_t ale_entry[3];
for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
cpsw_ale_read_entry(sc, i, ale_entry);
if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
continue;
if (vlan != ALE_VLAN(ale_entry))
continue;
ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
cpsw_ale_write_entry(sc, i, ale_entry);
break;
}
}
static int
cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
int i;
struct cpsw_softc *sc;
sc = device_get_softc(dev);
for (i = 0; i < CPSW_VLANS; i++) {
/* Is this Vlan ID in use by another vlangroup ? */
if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid)
return (EINVAL);
}
if (vg->es_vid == 0) {
if (cpsw_vgroups[vg->es_vlangroup].vid == -1)
return (0);
cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid);
cpsw_vgroups[vg->es_vlangroup].vid = -1;
vg->es_untagged_ports = 0;
vg->es_member_ports = 0;
vg->es_vid = 0;
return (0);
}
vg->es_vid &= ETHERSWITCH_VID_MASK;
vg->es_member_ports &= CPSW_PORTS_MASK;
vg->es_untagged_ports &= CPSW_PORTS_MASK;
if (cpsw_vgroups[vg->es_vlangroup].vid != -1 &&
cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid)
return (EINVAL);
cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid;
cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports,
vg->es_untagged_ports, vg->es_member_ports, 0);
return (0);
}
static int
cpsw_readreg(device_t dev, int addr)
{
/* Not supported. */
return (0);
}
static int
cpsw_writereg(device_t dev, int addr, int value)
{
/* Not supported. */
return (0);
}
static int
cpsw_readphy(device_t dev, int phy, int reg)
{
/* Not supported. */
return (0);
}
static int
cpsw_writephy(device_t dev, int phy, int reg, int data)
{
/* Not supported. */
return (0);
}
#endif
diff --git a/sys/dev/ae/if_ae.c b/sys/dev/ae/if_ae.c
index 62c1a8a30cc8..e424e1bd0e76 100644
--- a/sys/dev/ae/if_ae.c
+++ b/sys/dev/ae/if_ae.c
@@ -1,2254 +1,2248 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Driver for Attansic Technology Corp. L2 FastEthernet adapter.
*
* This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include "miibus_if.h"
#include "if_aereg.h"
#include "if_aevar.h"
/*
* Devices supported by this driver.
*/
static struct ae_dev {
uint16_t vendorid;
uint16_t deviceid;
const char *name;
} ae_devs[] = {
{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
"Attansic Technology Corp, L2 FastEthernet" },
};
#define AE_DEVS_COUNT nitems(ae_devs)
static struct resource_spec ae_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec ae_res_spec_irq[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec ae_res_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static int ae_probe(device_t dev);
static int ae_attach(device_t dev);
static void ae_pcie_init(ae_softc_t *sc);
static void ae_phy_reset(ae_softc_t *sc);
static void ae_phy_init(ae_softc_t *sc);
static int ae_reset(ae_softc_t *sc);
static void ae_init(void *arg);
static int ae_init_locked(ae_softc_t *sc);
static int ae_detach(device_t dev);
static int ae_miibus_readreg(device_t dev, int phy, int reg);
static int ae_miibus_writereg(device_t dev, int phy, int reg, int val);
static void ae_miibus_statchg(device_t dev);
static void ae_mediastatus(if_t ifp, struct ifmediareq *ifmr);
static int ae_mediachange(if_t ifp);
static void ae_retrieve_address(ae_softc_t *sc);
static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
int error);
static int ae_alloc_rings(ae_softc_t *sc);
static void ae_dma_free(ae_softc_t *sc);
static int ae_shutdown(device_t dev);
static int ae_suspend(device_t dev);
static void ae_powersave_disable(ae_softc_t *sc);
static void ae_powersave_enable(ae_softc_t *sc);
static int ae_resume(device_t dev);
static unsigned int ae_tx_avail_size(ae_softc_t *sc);
static int ae_encap(ae_softc_t *sc, struct mbuf **m_head);
static void ae_start(if_t ifp);
static void ae_start_locked(if_t ifp);
static void ae_link_task(void *arg, int pending);
static void ae_stop_rxmac(ae_softc_t *sc);
static void ae_stop_txmac(ae_softc_t *sc);
static void ae_mac_config(ae_softc_t *sc);
static int ae_intr(void *arg);
static void ae_int_task(void *arg, int pending);
static void ae_tx_intr(ae_softc_t *sc);
static void ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
static void ae_rx_intr(ae_softc_t *sc);
static void ae_watchdog(ae_softc_t *sc);
static void ae_tick(void *arg);
static void ae_rxfilter(ae_softc_t *sc);
static void ae_rxvlan(ae_softc_t *sc);
static int ae_ioctl(if_t ifp, u_long cmd, caddr_t data);
static void ae_stop(ae_softc_t *sc);
static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
static void ae_init_tunables(ae_softc_t *sc);
static device_method_t ae_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, ae_probe),
DEVMETHOD(device_attach, ae_attach),
DEVMETHOD(device_detach, ae_detach),
DEVMETHOD(device_shutdown, ae_shutdown),
DEVMETHOD(device_suspend, ae_suspend),
DEVMETHOD(device_resume, ae_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, ae_miibus_readreg),
DEVMETHOD(miibus_writereg, ae_miibus_writereg),
DEVMETHOD(miibus_statchg, ae_miibus_statchg),
{ NULL, NULL }
};
static driver_t ae_driver = {
"ae",
ae_methods,
sizeof(ae_softc_t)
};
DRIVER_MODULE(ae, pci, ae_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs,
nitems(ae_devs));
DRIVER_MODULE(miibus, ae, miibus_driver, 0, 0);
MODULE_DEPEND(ae, pci, 1, 1, 1);
MODULE_DEPEND(ae, ether, 1, 1, 1);
MODULE_DEPEND(ae, miibus, 1, 1, 1);
/*
* Tunables.
*/
static int msi_disable = 0;
TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
#define AE_READ_4(sc, reg) \
bus_read_4((sc)->mem[0], (reg))
#define AE_READ_2(sc, reg) \
bus_read_2((sc)->mem[0], (reg))
#define AE_READ_1(sc, reg) \
bus_read_1((sc)->mem[0], (reg))
#define AE_WRITE_4(sc, reg, val) \
bus_write_4((sc)->mem[0], (reg), (val))
#define AE_WRITE_2(sc, reg, val) \
bus_write_2((sc)->mem[0], (reg), (val))
#define AE_WRITE_1(sc, reg, val) \
bus_write_1((sc)->mem[0], (reg), (val))
#define AE_PHY_READ(sc, reg) \
ae_miibus_readreg(sc->dev, 0, reg)
#define AE_PHY_WRITE(sc, reg, val) \
ae_miibus_writereg(sc->dev, 0, reg, val)
#define AE_CHECK_EADDR_VALID(eaddr) \
((eaddr[0] == 0 && eaddr[1] == 0) || \
(eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
#define AE_RXD_VLAN(vtag) \
(((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
#define AE_TXD_VLAN(vtag) \
(((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
static int
ae_probe(device_t dev)
{
uint16_t deviceid, vendorid;
int i;
vendorid = pci_get_vendor(dev);
deviceid = pci_get_device(dev);
/*
* Search through the list of supported devs for matching one.
*/
for (i = 0; i < AE_DEVS_COUNT; i++) {
if (vendorid == ae_devs[i].vendorid &&
deviceid == ae_devs[i].deviceid) {
device_set_desc(dev, ae_devs[i].name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
ae_attach(device_t dev)
{
ae_softc_t *sc;
if_t ifp;
uint8_t chiprev;
uint32_t pcirev;
int nmsi, pmc;
int error;
sc = device_get_softc(dev); /* Automatically allocated and zeroed
on attach. */
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
sc->dev = dev;
/*
* Initialize mutexes and tasks.
*/
mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
pci_enable_busmaster(dev); /* Enable bus mastering. */
sc->spec_mem = ae_res_spec_mem;
/*
* Allocate memory-mapped registers.
*/
error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
if (error != 0) {
device_printf(dev, "could not allocate memory resources.\n");
sc->spec_mem = NULL;
goto fail;
}
/*
* Retrieve PCI and chip revisions.
*/
pcirev = pci_get_revid(dev);
chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
AE_MASTER_REVNUM_MASK;
if (bootverbose) {
device_printf(dev, "pci device revision: %#04x\n", pcirev);
device_printf(dev, "chip id: %#02x\n", chiprev);
}
nmsi = pci_msi_count(dev);
if (bootverbose)
device_printf(dev, "MSI count: %d.\n", nmsi);
/*
* Allocate interrupt resources.
*/
if (msi_disable == 0 && nmsi == 1) {
error = pci_alloc_msi(dev, &nmsi);
if (error == 0) {
device_printf(dev, "Using MSI messages.\n");
sc->spec_irq = ae_res_spec_msi;
error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
if (error != 0) {
device_printf(dev, "MSI allocation failed.\n");
sc->spec_irq = NULL;
pci_release_msi(dev);
} else {
sc->flags |= AE_FLAG_MSI;
}
}
}
if (sc->spec_irq == NULL) {
sc->spec_irq = ae_res_spec_irq;
error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
if (error != 0) {
device_printf(dev, "could not allocate IRQ resources.\n");
sc->spec_irq = NULL;
goto fail;
}
}
ae_init_tunables(sc);
ae_phy_reset(sc); /* Reset PHY. */
error = ae_reset(sc); /* Reset the controller itself. */
if (error != 0)
goto fail;
ae_pcie_init(sc);
ae_retrieve_address(sc); /* Load MAC address. */
error = ae_alloc_rings(sc); /* Allocate ring buffers. */
if (error != 0)
goto fail;
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "could not allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, ae_ioctl);
if_setstartfn(ifp, ae_start);
if_setinitfn(ifp, ae_init);
if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
if_sethwassist(ifp, 0);
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
sc->flags |= AE_FLAG_PMG;
}
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Configure and attach MII bus.
*/
error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->eaddr);
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* Create and run all helper tasks.
*/
sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
if (sc->tq == NULL) {
device_printf(dev, "could not create taskqueue.\n");
ether_ifdetach(ifp);
error = ENXIO;
goto fail;
}
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->dev));
/*
* Configure interrupt handlers.
*/
error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
ae_intr, NULL, sc, &sc->intrhand);
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
taskqueue_free(sc->tq);
sc->tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
ae_detach(dev);
return (error);
}
#define AE_SYSCTL(stx, parent, name, desc, ptr) \
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
static void
ae_init_tunables(ae_softc_t *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
struct ae_stats *ae_stats;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
ae_stats = &sc->stats;
ctx = device_get_sysctl_ctx(sc->dev);
root = device_get_sysctl_tree(sc->dev);
stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics");
/*
* Receiver statistcics.
*/
stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
"broadcast frames", &ae_stats->rx_bcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
"multicast frames", &ae_stats->rx_mcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
"PAUSE frames", &ae_stats->rx_pause);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
"control frames", &ae_stats->rx_ctrl);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
"frames with CRC errors", &ae_stats->rx_crcerr);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
"frames with invalid opcode", &ae_stats->rx_codeerr);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
"runt frames", &ae_stats->rx_runt);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
"fragmented frames", &ae_stats->rx_frag);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
"frames with alignment errors", &ae_stats->rx_align);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
"frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
/*
* Receiver statistcics.
*/
stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
"broadcast frames", &ae_stats->tx_bcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
"multicast frames", &ae_stats->tx_mcast);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
"PAUSE frames", &ae_stats->tx_pause);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
"control frames", &ae_stats->tx_ctrl);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
"deferrals occuried", &ae_stats->tx_defer);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
"excessive deferrals occuried", &ae_stats->tx_excdefer);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
"single collisions occuried", &ae_stats->tx_singlecol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
"multiple collisions occuried", &ae_stats->tx_multicol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
"late collisions occuried", &ae_stats->tx_latecol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
"transmit aborts due collisions", &ae_stats->tx_abortcol);
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
"Tx FIFO underruns", &ae_stats->tx_underrun);
}
static void
ae_pcie_init(ae_softc_t *sc)
{
AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
}
static void
ae_phy_reset(ae_softc_t *sc)
{
AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
DELAY(1000); /* XXX: pause(9) ? */
}
static int
ae_reset(ae_softc_t *sc)
{
int i;
/*
* Issue a soft reset.
*/
AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
/*
* Wait for reset to complete.
*/
for (i = 0; i < AE_RESET_TIMEOUT; i++) {
if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
break;
DELAY(10);
}
if (i == AE_RESET_TIMEOUT) {
device_printf(sc->dev, "reset timeout.\n");
return (ENXIO);
}
/*
* Wait for everything to enter idle state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
if (AE_READ_4(sc, AE_IDLE_REG) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT) {
device_printf(sc->dev, "could not enter idle state.\n");
return (ENXIO);
}
return (0);
}
static void
ae_init(void *arg)
{
ae_softc_t *sc;
sc = (ae_softc_t *)arg;
AE_LOCK(sc);
ae_init_locked(sc);
AE_UNLOCK(sc);
}
static void
ae_phy_init(ae_softc_t *sc)
{
/*
* Enable link status change interrupt.
* XXX magic numbers.
*/
#ifdef notyet
AE_PHY_WRITE(sc, 18, 0xc00);
#endif
}
static int
ae_init_locked(ae_softc_t *sc)
{
if_t ifp;
struct mii_data *mii;
uint8_t eaddr[ETHER_ADDR_LEN];
uint32_t val;
bus_addr_t addr;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return (0);
mii = device_get_softc(sc->miibus);
ae_stop(sc);
ae_reset(sc);
ae_pcie_init(sc); /* Initialize PCIE stuff. */
ae_phy_init(sc);
ae_powersave_disable(sc);
/*
* Clear and disable interrupts.
*/
AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
/*
* Set the MAC address.
*/
bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
AE_WRITE_4(sc, AE_EADDR0_REG, val);
val = eaddr[0] << 8 | eaddr[1];
AE_WRITE_4(sc, AE_EADDR1_REG, val);
bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
/*
* Set ring buffers base addresses.
*/
addr = sc->dma_rxd_busaddr;
AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
addr = sc->dma_txd_busaddr;
AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
addr = sc->dma_txs_busaddr;
AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
/*
* Configure ring buffers sizes.
*/
AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
/*
* Configure interframe gap parameters.
*/
val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
AE_IFG_TXIPG_MASK) |
((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
AE_IFG_RXIPG_MASK) |
((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
AE_IFG_IPGR1_MASK) |
((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
AE_IFG_IPGR2_MASK);
AE_WRITE_4(sc, AE_IFG_REG, val);
/*
* Configure half-duplex operation.
*/
val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
AE_HDPX_LCOL_MASK) |
((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
AE_HDPX_RETRY_MASK) |
((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
AE_HDPX_ABEBT_MASK) |
((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
AE_WRITE_4(sc, AE_HDPX_REG, val);
/*
* Configure interrupt moderate timer.
*/
AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
val = AE_READ_4(sc, AE_MASTER_REG);
val |= AE_MASTER_IMT_EN;
AE_WRITE_4(sc, AE_MASTER_REG, val);
/*
* Configure interrupt clearing timer.
*/
AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
/*
* Configure MTU.
*/
val = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
ETHER_CRC_LEN;
AE_WRITE_2(sc, AE_MTU_REG, val);
/*
* Configure cut-through threshold.
*/
AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
/*
* Configure flow control.
*/
AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
(AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
(AE_RXD_COUNT_DEFAULT / 12));
/*
* Init mailboxes.
*/
sc->txd_cur = sc->rxd_cur = 0;
sc->txs_ack = sc->txd_ack = 0;
sc->rxd_cur = 0;
AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
sc->tx_inproc = 0; /* Number of packets the chip processes now. */
sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
/*
* Enable DMA.
*/
AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
/*
* Check if everything is OK.
*/
val = AE_READ_4(sc, AE_ISR_REG);
if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
device_printf(sc->dev, "Initialization failed.\n");
return (ENXIO);
}
/*
* Clear interrupt status.
*/
AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
AE_WRITE_4(sc, AE_ISR_REG, 0x0);
/*
* Enable interrupts.
*/
val = AE_READ_4(sc, AE_MASTER_REG);
AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
/*
* Disable WOL.
*/
AE_WRITE_4(sc, AE_WOL_REG, 0);
/*
* Configure MAC.
*/
val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
AE_MAC_PREAMBLE_MASK);
AE_WRITE_4(sc, AE_MAC_REG, val);
/*
* Configure Rx MAC.
*/
ae_rxfilter(sc);
ae_rxvlan(sc);
/*
* Enable Tx/Rx.
*/
val = AE_READ_4(sc, AE_MAC_REG);
AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
sc->flags &= ~AE_FLAG_LINK;
mii_mediachg(mii); /* Switch to the current media. */
callout_reset(&sc->tick_ch, hz, ae_tick, sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
#ifdef AE_DEBUG
device_printf(sc->dev, "Initialization complete.\n");
#endif
return (0);
}
static int
ae_detach(device_t dev)
{
struct ae_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
ifp = sc->ifp;
if (device_is_attached(dev)) {
AE_LOCK(sc);
sc->flags |= AE_FLAG_DETACH;
ae_stop(sc);
AE_UNLOCK(sc);
callout_drain(&sc->tick_ch);
taskqueue_drain(sc->tq, &sc->int_task);
taskqueue_drain(taskqueue_swi, &sc->link_task);
ether_ifdetach(ifp);
}
if (sc->tq != NULL) {
taskqueue_drain(sc->tq, &sc->int_task);
taskqueue_free(sc->tq);
sc->tq = NULL;
}
if (sc->miibus != NULL) {
device_delete_child(dev, sc->miibus);
sc->miibus = NULL;
}
bus_generic_detach(sc->dev);
ae_dma_free(sc);
if (sc->intrhand != NULL) {
bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
sc->intrhand = NULL;
}
if (ifp != NULL) {
if_free(ifp);
sc->ifp = NULL;
}
if (sc->spec_irq != NULL)
bus_release_resources(dev, sc->spec_irq, sc->irq);
if (sc->spec_mem != NULL)
bus_release_resources(dev, sc->spec_mem, sc->mem);
if ((sc->flags & AE_FLAG_MSI) != 0)
pci_release_msi(dev);
mtx_destroy(&sc->mtx);
return (0);
}
static int
ae_miibus_readreg(device_t dev, int phy, int reg)
{
ae_softc_t *sc;
uint32_t val;
int i;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
/*
* Locking is done in upper layers.
*/
val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
AE_WRITE_4(sc, AE_MDIO_REG, val);
/*
* Wait for operation to complete.
*/
for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
DELAY(2);
val = AE_READ_4(sc, AE_MDIO_REG);
if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
break;
}
if (i == AE_MDIO_TIMEOUT) {
device_printf(sc->dev, "phy read timeout: %d.\n", reg);
return (0);
}
return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
}
static int
ae_miibus_writereg(device_t dev, int phy, int reg, int val)
{
ae_softc_t *sc;
uint32_t aereg;
int i;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
/*
* Locking is done in upper layers.
*/
aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
AE_WRITE_4(sc, AE_MDIO_REG, aereg);
/*
* Wait for operation to complete.
*/
for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
DELAY(2);
aereg = AE_READ_4(sc, AE_MDIO_REG);
if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
break;
}
if (i == AE_MDIO_TIMEOUT) {
device_printf(sc->dev, "phy write timeout: %d.\n", reg);
}
return (0);
}
static void
ae_miibus_statchg(device_t dev)
{
ae_softc_t *sc;
sc = device_get_softc(dev);
taskqueue_enqueue(taskqueue_swi, &sc->link_task);
}
static void
ae_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
ae_softc_t *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
mii = device_get_softc(sc->miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
AE_UNLOCK(sc);
}
static int
ae_mediachange(if_t ifp)
{
ae_softc_t *sc;
struct mii_data *mii;
struct mii_softc *mii_sc;
int error;
/* XXX: check IFF_UP ?? */
sc = if_getsoftc(ifp);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
mii = device_get_softc(sc->miibus);
LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
PHY_RESET(mii_sc);
error = mii_mediachg(mii);
AE_UNLOCK(sc);
return (error);
}
static int
ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
{
int error;
uint32_t val;
KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
/*
* Not sure why, but Linux does this.
*/
val = AE_READ_4(sc, AE_SPICTL_REG);
if ((val & AE_SPICTL_VPD_EN) != 0) {
val &= ~AE_SPICTL_VPD_EN;
AE_WRITE_4(sc, AE_SPICTL_REG, val);
}
error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
return (error);
}
static int
ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
{
uint32_t val;
int i;
AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
/*
* VPD registers start at offset 0x100. Read them.
*/
val = 0x100 + reg * 4;
AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
AE_VPD_CAP_ADDR_MASK);
for (i = 0; i < AE_VPD_TIMEOUT; i++) {
DELAY(2000);
val = AE_READ_4(sc, AE_VPD_CAP_REG);
if ((val & AE_VPD_CAP_DONE) != 0)
break;
}
if (i == AE_VPD_TIMEOUT) {
device_printf(sc->dev, "timeout reading VPD register %d.\n",
reg);
return (ETIMEDOUT);
}
*word = AE_READ_4(sc, AE_VPD_DATA_REG);
return (0);
}
static int
ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
{
uint32_t word, reg, val;
int error;
int found;
int vpdc;
int i;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
/*
* Check for EEPROM.
*/
error = ae_check_eeprom_present(sc, &vpdc);
if (error != 0)
return (error);
/*
* Read the VPD configuration space.
* Each register is prefixed with signature,
* so we can check if it is valid.
*/
for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
error = ae_vpd_read_word(sc, i, &word);
if (error != 0)
break;
/*
* Check signature.
*/
if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
break;
reg = word >> AE_VPD_REG_SHIFT;
i++; /* Move to the next word. */
if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
continue;
error = ae_vpd_read_word(sc, i, &val);
if (error != 0)
break;
if (reg == AE_EADDR0_REG)
eaddr[0] = val;
else
eaddr[1] = val;
found++;
}
if (found < 2)
return (ENOENT);
eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
if (bootverbose)
device_printf(sc->dev,
"VPD ethernet address registers are invalid.\n");
return (EINVAL);
}
return (0);
}
static int
ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
{
/*
* BIOS is supposed to set this.
*/
eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
if (bootverbose)
device_printf(sc->dev,
"Ethernet address registers are invalid.\n");
return (EINVAL);
}
return (0);
}
static void
ae_retrieve_address(ae_softc_t *sc)
{
uint32_t eaddr[2] = {0, 0};
int error;
/*
*Check for EEPROM.
*/
error = ae_get_vpd_eaddr(sc, eaddr);
if (error != 0)
error = ae_get_reg_eaddr(sc, eaddr);
if (error != 0) {
if (bootverbose)
device_printf(sc->dev,
"Generating random ethernet address.\n");
eaddr[0] = arc4random();
/*
* Set OUI to ASUSTek COMPUTER INC.
*/
sc->eaddr[0] = 0x02; /* U/L bit set. */
sc->eaddr[1] = 0x1f;
sc->eaddr[2] = 0xc6;
sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
} else {
sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
}
}
static void
ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *addr = arg;
if (error != 0)
return;
KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
nsegs));
*addr = segs[0].ds_addr;
}
static int
ae_alloc_rings(ae_softc_t *sc)
{
bus_addr_t busaddr;
int error;
/*
* Create parent DMA tag.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
&sc->dma_parent_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare parent DMA tag.\n");
return (error);
}
/*
* Create DMA tag for TxD.
*/
error = bus_dma_tag_create(sc->dma_parent_tag,
8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
&sc->dma_txd_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare TxD DMA tag.\n");
return (error);
}
/*
* Create DMA tag for TxS.
*/
error = bus_dma_tag_create(sc->dma_parent_tag,
8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
&sc->dma_txs_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare TxS DMA tag.\n");
return (error);
}
/*
* Create DMA tag for RxD.
*/
error = bus_dma_tag_create(sc->dma_parent_tag,
128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
&sc->dma_rxd_tag);
if (error != 0) {
device_printf(sc->dev, "could not creare TxS DMA tag.\n");
return (error);
}
/*
* Allocate TxD DMA memory.
*/
error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->dma_txd_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate DMA memory for TxD ring.\n");
return (error);
}
error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
if (error != 0 || busaddr == 0) {
device_printf(sc->dev,
"could not load DMA map for TxD ring.\n");
return (error);
}
sc->dma_txd_busaddr = busaddr;
/*
* Allocate TxS DMA memory.
*/
error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->dma_txs_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate DMA memory for TxS ring.\n");
return (error);
}
error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
if (error != 0 || busaddr == 0) {
device_printf(sc->dev,
"could not load DMA map for TxS ring.\n");
return (error);
}
sc->dma_txs_busaddr = busaddr;
/*
* Allocate RxD DMA memory.
*/
error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->dma_rxd_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate DMA memory for RxD ring.\n");
return (error);
}
error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
if (error != 0 || busaddr == 0) {
device_printf(sc->dev,
"could not load DMA map for RxD ring.\n");
return (error);
}
sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
return (0);
}
static void
ae_dma_free(ae_softc_t *sc)
{
if (sc->dma_txd_tag != NULL) {
if (sc->dma_txd_busaddr != 0)
bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
if (sc->txd_base != NULL)
bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
sc->dma_txd_map);
bus_dma_tag_destroy(sc->dma_txd_tag);
sc->dma_txd_tag = NULL;
sc->txd_base = NULL;
sc->dma_txd_busaddr = 0;
}
if (sc->dma_txs_tag != NULL) {
if (sc->dma_txs_busaddr != 0)
bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
if (sc->txs_base != NULL)
bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
sc->dma_txs_map);
bus_dma_tag_destroy(sc->dma_txs_tag);
sc->dma_txs_tag = NULL;
sc->txs_base = NULL;
sc->dma_txs_busaddr = 0;
}
if (sc->dma_rxd_tag != NULL) {
if (sc->dma_rxd_busaddr != 0)
bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
if (sc->rxd_base_dma != NULL)
bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma,
sc->dma_rxd_map);
bus_dma_tag_destroy(sc->dma_rxd_tag);
sc->dma_rxd_tag = NULL;
sc->rxd_base_dma = NULL;
sc->dma_rxd_busaddr = 0;
}
if (sc->dma_parent_tag != NULL) {
bus_dma_tag_destroy(sc->dma_parent_tag);
sc->dma_parent_tag = NULL;
}
}
static int
ae_shutdown(device_t dev)
{
ae_softc_t *sc;
int error;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
error = ae_suspend(dev);
AE_LOCK(sc);
ae_powersave_enable(sc);
AE_UNLOCK(sc);
return (error);
}
static void
ae_powersave_disable(ae_softc_t *sc)
{
uint32_t val;
AE_LOCK_ASSERT(sc);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
if (val & AE_PHY_DBG_POWERSAVE) {
val &= ~AE_PHY_DBG_POWERSAVE;
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
DELAY(1000);
}
}
static void
ae_powersave_enable(ae_softc_t *sc)
{
uint32_t val;
AE_LOCK_ASSERT(sc);
/*
* XXX magic numbers.
*/
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
}
static void
ae_pm_init(ae_softc_t *sc)
{
if_t ifp;
uint32_t val;
uint16_t pmstat;
struct mii_data *mii;
int pmc;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
if ((sc->flags & AE_FLAG_PMG) == 0) {
/* Disable WOL entirely. */
AE_WRITE_4(sc, AE_WOL_REG, 0);
return;
}
/*
* Configure WOL if enabled.
*/
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
mii = device_get_softc(sc->miibus);
mii_pollstat(mii);
if ((mii->mii_media_status & IFM_AVALID) != 0 &&
(mii->mii_media_status & IFM_ACTIVE) != 0) {
AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
AE_WOL_MAGIC_PME);
/*
* Configure MAC.
*/
val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
AE_HALFBUF_MASK) | \
((AE_MAC_PREAMBLE_DEFAULT << \
AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
if ((IFM_OPTIONS(mii->mii_media_active) & \
IFM_FDX) != 0)
val |= AE_MAC_FULL_DUPLEX;
AE_WRITE_4(sc, AE_MAC_REG, val);
} else { /* No link. */
AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
AE_WOL_LNKCHG_PME);
AE_WRITE_4(sc, AE_MAC_REG, 0);
}
} else {
ae_powersave_enable(sc);
}
/*
* PCIE hacks. Magic numbers.
*/
val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
/*
* Configure PME.
*/
if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
}
static int
ae_suspend(device_t dev)
{
ae_softc_t *sc;
sc = device_get_softc(dev);
AE_LOCK(sc);
ae_stop(sc);
ae_pm_init(sc);
AE_UNLOCK(sc);
return (0);
}
static int
ae_resume(device_t dev)
{
ae_softc_t *sc;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
if ((if_getflags(sc->ifp) & IFF_UP) != 0)
ae_init_locked(sc);
AE_UNLOCK(sc);
return (0);
}
static unsigned int
ae_tx_avail_size(ae_softc_t *sc)
{
unsigned int avail;
if (sc->txd_cur >= sc->txd_ack)
avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
else
avail = sc->txd_ack - sc->txd_cur;
return (avail);
}
static int
ae_encap(ae_softc_t *sc, struct mbuf **m_head)
{
struct mbuf *m0;
ae_txd_t *hdr;
unsigned int to_end;
uint16_t len;
AE_LOCK_ASSERT(sc);
m0 = *m_head;
len = m0->m_pkthdr.len;
if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
#ifdef AE_DEBUG
if_printf(sc->ifp, "No free Tx available.\n");
#endif
return ENOBUFS;
}
hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
bzero(hdr, sizeof(*hdr));
/* Skip header size. */
sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
/* Space available to the end of the ring */
to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
if (to_end >= len) {
m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
} else {
m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
sc->txd_cur));
m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
}
/*
* Set TxD flags and parameters.
*/
if ((m0->m_flags & M_VLANTAG) != 0) {
hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
} else {
hdr->len = htole16(len);
}
/*
* Set current TxD position and round up to a 4-byte boundary.
*/
sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
if (sc->txd_cur == sc->txd_ack)
sc->flags &= ~AE_FLAG_TXAVAIL;
#ifdef AE_DEBUG
if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
#endif
/*
* Update TxS position and check if there are empty TxS available.
*/
sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
if (sc->txs_cur == sc->txs_ack)
sc->flags &= ~AE_FLAG_TXAVAIL;
/*
* Synchronize DMA memory.
*/
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
ae_start(if_t ifp)
{
ae_softc_t *sc;
sc = if_getsoftc(ifp);
AE_LOCK(sc);
ae_start_locked(ifp);
AE_UNLOCK(sc);
}
static void
ae_start_locked(if_t ifp)
{
ae_softc_t *sc;
unsigned int count;
struct mbuf *m0;
int error;
sc = if_getsoftc(ifp);
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK_ASSERT(sc);
#ifdef AE_DEBUG
if_printf(ifp, "Start called.\n");
#endif
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
return;
count = 0;
while (!if_sendq_empty(ifp)) {
m0 = if_dequeue(ifp);
if (m0 == NULL)
break; /* Nothing to do. */
error = ae_encap(sc, &m0);
if (error != 0) {
if (m0 != NULL) {
if_sendq_prepend(ifp, m0);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
#ifdef AE_DEBUG
if_printf(ifp, "Setting OACTIVE.\n");
#endif
}
break;
}
count++;
sc->tx_inproc++;
/* Bounce a copy of the frame to BPF. */
ETHER_BPF_MTAP(ifp, m0);
m_freem(m0);
}
if (count > 0) { /* Something was dequeued. */
AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */
#ifdef AE_DEBUG
if_printf(ifp, "%d packets dequeued.\n", count);
if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
#endif
}
}
static void
ae_link_task(void *arg, int pending)
{
ae_softc_t *sc;
struct mii_data *mii;
if_t ifp;
uint32_t val;
sc = (ae_softc_t *)arg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
AE_LOCK(sc);
ifp = sc->ifp;
mii = device_get_softc(sc->miibus);
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
AE_UNLOCK(sc); /* XXX: could happen? */
return;
}
sc->flags &= ~AE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
(IFM_AVALID | IFM_ACTIVE)) {
switch(IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->flags |= AE_FLAG_LINK;
break;
default:
break;
}
}
/*
* Stop Rx/Tx MACs.
*/
ae_stop_rxmac(sc);
ae_stop_txmac(sc);
if ((sc->flags & AE_FLAG_LINK) != 0) {
ae_mac_config(sc);
/*
* Restart DMA engines.
*/
AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
/*
* Enable Rx and Tx MACs.
*/
val = AE_READ_4(sc, AE_MAC_REG);
val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
AE_UNLOCK(sc);
}
static void
ae_stop_rxmac(ae_softc_t *sc)
{
uint32_t val;
int i;
AE_LOCK_ASSERT(sc);
/*
* Stop Rx MAC engine.
*/
val = AE_READ_4(sc, AE_MAC_REG);
if ((val & AE_MAC_RX_EN) != 0) {
val &= ~AE_MAC_RX_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
/*
* Stop Rx DMA engine.
*/
if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
/*
* Wait for IDLE state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
val = AE_READ_4(sc, AE_IDLE_REG);
if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT)
device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
}
static void
ae_stop_txmac(ae_softc_t *sc)
{
uint32_t val;
int i;
AE_LOCK_ASSERT(sc);
/*
* Stop Tx MAC engine.
*/
val = AE_READ_4(sc, AE_MAC_REG);
if ((val & AE_MAC_TX_EN) != 0) {
val &= ~AE_MAC_TX_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
/*
* Stop Tx DMA engine.
*/
if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
/*
* Wait for IDLE state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
val = AE_READ_4(sc, AE_IDLE_REG);
if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT)
device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
}
static void
ae_mac_config(ae_softc_t *sc)
{
struct mii_data *mii;
uint32_t val;
AE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->miibus);
val = AE_READ_4(sc, AE_MAC_REG);
val &= ~AE_MAC_FULL_DUPLEX;
/* XXX disable AE_MAC_TX_FLOW_EN? */
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
val |= AE_MAC_FULL_DUPLEX;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
static int
ae_intr(void *arg)
{
ae_softc_t *sc;
uint32_t val;
sc = (ae_softc_t *)arg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
val = AE_READ_4(sc, AE_ISR_REG);
if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
return (FILTER_STRAY);
/* Disable interrupts. */
AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
/* Schedule interrupt processing. */
taskqueue_enqueue(sc->tq, &sc->int_task);
return (FILTER_HANDLED);
}
static void
ae_int_task(void *arg, int pending)
{
ae_softc_t *sc;
if_t ifp;
uint32_t val;
sc = (ae_softc_t *)arg;
AE_LOCK(sc);
ifp = sc->ifp;
val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */
if (val == 0) {
AE_UNLOCK(sc);
return;
}
/*
* Clear interrupts and disable them.
*/
AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
#ifdef AE_DEBUG
if_printf(ifp, "Interrupt received: 0x%08x\n", val);
#endif
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
AE_ISR_PHY_LINKDOWN)) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ae_init_locked(sc);
AE_UNLOCK(sc);
return;
}
if ((val & AE_ISR_TX_EVENT) != 0)
ae_tx_intr(sc);
if ((val & AE_ISR_RX_EVENT) != 0)
ae_rx_intr(sc);
/*
* Re-enable interrupts.
*/
AE_WRITE_4(sc, AE_ISR_REG, 0);
if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
if (!if_sendq_empty(ifp))
ae_start_locked(ifp);
}
}
AE_UNLOCK(sc);
}
static void
ae_tx_intr(ae_softc_t *sc)
{
if_t ifp;
ae_txd_t *txd;
ae_txs_t *txs;
uint16_t flags;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
#ifdef AE_DEBUG
if_printf(ifp, "Tx interrupt occuried.\n");
#endif
/*
* Syncronize DMA buffers.
*/
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (;;) {
txs = sc->txs_base + sc->txs_ack;
flags = le16toh(txs->flags);
if ((flags & AE_TXS_UPDATE) == 0)
break;
txs->flags = htole16(flags & ~AE_TXS_UPDATE);
/* Update stats. */
ae_update_stats_tx(flags, &sc->stats);
/*
* Update TxS position.
*/
sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
sc->flags |= AE_FLAG_TXAVAIL;
txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
if (txs->len != txd->len)
device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
le16toh(txs->len), le16toh(txd->len));
/*
* Move txd ack and align on 4-byte boundary.
*/
sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
if ((flags & AE_TXS_SUCCESS) != 0)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
sc->tx_inproc--;
}
if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (sc->tx_inproc < 0) {
if_printf(ifp, "Received stray Tx interrupt(s).\n");
sc->tx_inproc = 0;
}
if (sc->tx_inproc == 0)
sc->wd_timer = 0; /* Unarm watchdog. */
/*
* Syncronize DMA buffers.
*/
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
{
if_t ifp;
struct mbuf *m;
unsigned int size;
uint16_t flags;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
flags = le16toh(rxd->flags);
#ifdef AE_DEBUG
if_printf(ifp, "Rx interrupt occuried.\n");
#endif
size = le16toh(rxd->len) - ETHER_CRC_LEN;
if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
if_printf(ifp, "Runt frame received.");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
return;
}
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
(flags & AE_RXD_HAS_VLAN) != 0) {
m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
m->m_flags |= M_VLANTAG;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
/*
* Pass it through.
*/
AE_UNLOCK(sc);
if_input(ifp, m);
AE_LOCK(sc);
}
static void
ae_rx_intr(ae_softc_t *sc)
{
ae_rxd_t *rxd;
if_t ifp;
uint16_t flags;
int count;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
/*
* Syncronize DMA buffers.
*/
bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (count = 0;; count++) {
rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
flags = le16toh(rxd->flags);
if ((flags & AE_RXD_UPDATE) == 0)
break;
rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
/* Update stats. */
ae_update_stats_rx(flags, &sc->stats);
/*
* Update position index.
*/
sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
if ((flags & AE_RXD_SUCCESS) != 0)
ae_rxeof(sc, rxd);
else
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
}
if (count > 0) {
bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Update Rx index.
*/
AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
}
}
static void
ae_watchdog(ae_softc_t *sc)
{
if_t ifp;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
if (sc->wd_timer == 0 || --sc->wd_timer != 0)
return; /* Noting to do. */
if ((sc->flags & AE_FLAG_LINK) == 0)
if_printf(ifp, "watchdog timeout (missed link).\n");
else
if_printf(ifp, "watchdog timeout - resetting.\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ae_init_locked(sc);
if (!if_sendq_empty(ifp))
ae_start_locked(ifp);
}
static void
ae_tick(void *arg)
{
ae_softc_t *sc;
struct mii_data *mii;
sc = (ae_softc_t *)arg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->miibus);
mii_tick(mii);
ae_watchdog(sc); /* Watchdog check. */
callout_reset(&sc->tick_ch, hz, ae_tick, sc);
}
static void
ae_rxvlan(ae_softc_t *sc)
{
if_t ifp;
uint32_t val;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
val = AE_READ_4(sc, AE_MAC_REG);
val &= ~AE_MAC_RMVLAN_EN;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
val |= AE_MAC_RMVLAN_EN;
AE_WRITE_4(sc, AE_MAC_REG, val);
}
static u_int
ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *mchash = arg;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
return (1);
}
static void
ae_rxfilter(ae_softc_t *sc)
{
if_t ifp;
uint32_t mchash[2];
uint32_t rxcfg;
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
rxcfg = AE_READ_4(sc, AE_MAC_REG);
rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxcfg |= AE_MAC_BCAST_EN;
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxcfg |= AE_MAC_PROMISC_EN;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
rxcfg |= AE_MAC_MCAST_EN;
/*
* Wipe old settings.
*/
AE_WRITE_4(sc, AE_REG_MHT0, 0);
AE_WRITE_4(sc, AE_REG_MHT1, 0);
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
return;
}
/*
* Load multicast tables.
*/
bzero(mchash, sizeof(mchash));
if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash);
AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
}
static int
ae_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct ae_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
AE_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ae_init_locked(sc);
}
AE_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
AE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->if_flags)
& (IFF_PROMISC | IFF_ALLMULTI)) != 0)
ae_rxfilter(sc);
} else {
if ((sc->flags & AE_FLAG_DETACH) == 0)
ae_init_locked(sc);
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
ae_stop(sc);
}
sc->if_flags = if_getflags(ifp);
AE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
AE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
ae_rxfilter(sc);
AE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
AE_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
ae_rxvlan(sc);
}
VLAN_CAPABILITIES(ifp);
AE_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
ae_stop(ae_softc_t *sc)
{
if_t ifp;
int i;
AE_LOCK_ASSERT(sc);
ifp = sc->ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->flags &= ~AE_FLAG_LINK;
sc->wd_timer = 0; /* Cancel watchdog. */
callout_stop(&sc->tick_ch);
/*
* Clear and disable interrupts.
*/
AE_WRITE_4(sc, AE_IMR_REG, 0);
AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
/*
* Stop Rx/Tx MACs.
*/
ae_stop_txmac(sc);
ae_stop_rxmac(sc);
/*
* Stop DMA engines.
*/
AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
/*
* Wait for everything to enter idle state.
*/
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
if (AE_READ_4(sc, AE_IDLE_REG) == 0)
break;
DELAY(100);
}
if (i == AE_IDLE_TIMEOUT)
device_printf(sc->dev, "could not enter idle state in stop.\n");
}
static void
ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
{
if ((flags & AE_TXS_BCAST) != 0)
stats->tx_bcast++;
if ((flags & AE_TXS_MCAST) != 0)
stats->tx_mcast++;
if ((flags & AE_TXS_PAUSE) != 0)
stats->tx_pause++;
if ((flags & AE_TXS_CTRL) != 0)
stats->tx_ctrl++;
if ((flags & AE_TXS_DEFER) != 0)
stats->tx_defer++;
if ((flags & AE_TXS_EXCDEFER) != 0)
stats->tx_excdefer++;
if ((flags & AE_TXS_SINGLECOL) != 0)
stats->tx_singlecol++;
if ((flags & AE_TXS_MULTICOL) != 0)
stats->tx_multicol++;
if ((flags & AE_TXS_LATECOL) != 0)
stats->tx_latecol++;
if ((flags & AE_TXS_ABORTCOL) != 0)
stats->tx_abortcol++;
if ((flags & AE_TXS_UNDERRUN) != 0)
stats->tx_underrun++;
}
static void
ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
{
if ((flags & AE_RXD_BCAST) != 0)
stats->rx_bcast++;
if ((flags & AE_RXD_MCAST) != 0)
stats->rx_mcast++;
if ((flags & AE_RXD_PAUSE) != 0)
stats->rx_pause++;
if ((flags & AE_RXD_CTRL) != 0)
stats->rx_ctrl++;
if ((flags & AE_RXD_CRCERR) != 0)
stats->rx_crcerr++;
if ((flags & AE_RXD_CODEERR) != 0)
stats->rx_codeerr++;
if ((flags & AE_RXD_RUNT) != 0)
stats->rx_runt++;
if ((flags & AE_RXD_FRAG) != 0)
stats->rx_frag++;
if ((flags & AE_RXD_TRUNC) != 0)
stats->rx_trunc++;
if ((flags & AE_RXD_ALIGN) != 0)
stats->rx_align++;
}
diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c
index afed720b6e56..6630f2cf782d 100644
--- a/sys/dev/age/if_age.c
+++ b/sys/dev/age/if_age.c
@@ -1,3335 +1,3329 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <dev/age/if_agereg.h>
#include <dev/age/if_agevar.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
MODULE_DEPEND(age, pci, 1, 1, 1);
MODULE_DEPEND(age, ether, 1, 1, 1);
MODULE_DEPEND(age, miibus, 1, 1, 1);
/* Tunables. */
static int msi_disable = 0;
static int msix_disable = 0;
TUNABLE_INT("hw.age.msi_disable", &msi_disable);
TUNABLE_INT("hw.age.msix_disable", &msix_disable);
/*
* Devices supported by this driver.
*/
static struct age_dev {
uint16_t age_vendorid;
uint16_t age_deviceid;
const char *age_name;
} age_devs[] = {
{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
"Attansic Technology Corp, L1 Gigabit Ethernet" },
};
static int age_miibus_readreg(device_t, int, int);
static int age_miibus_writereg(device_t, int, int, int);
static void age_miibus_statchg(device_t);
static void age_mediastatus(if_t, struct ifmediareq *);
static int age_mediachange(if_t);
static int age_probe(device_t);
static void age_get_macaddr(struct age_softc *);
static void age_phy_reset(struct age_softc *);
static int age_attach(device_t);
static int age_detach(device_t);
static void age_sysctl_node(struct age_softc *);
static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int age_check_boundary(struct age_softc *);
static int age_dma_alloc(struct age_softc *);
static void age_dma_free(struct age_softc *);
static int age_shutdown(device_t);
static void age_setwol(struct age_softc *);
static int age_suspend(device_t);
static int age_resume(device_t);
static int age_encap(struct age_softc *, struct mbuf **);
static void age_start(if_t);
static void age_start_locked(if_t);
static void age_watchdog(struct age_softc *);
static int age_ioctl(if_t, u_long, caddr_t);
static void age_mac_config(struct age_softc *);
static void age_link_task(void *, int);
static void age_stats_update(struct age_softc *);
static int age_intr(void *);
static void age_int_task(void *, int);
static void age_txintr(struct age_softc *, int);
static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
static int age_rxintr(struct age_softc *, int, int);
static void age_tick(void *);
static void age_reset(struct age_softc *);
static void age_init(void *);
static void age_init_locked(struct age_softc *);
static void age_stop(struct age_softc *);
static void age_stop_txmac(struct age_softc *);
static void age_stop_rxmac(struct age_softc *);
static void age_init_tx_ring(struct age_softc *);
static int age_init_rx_ring(struct age_softc *);
static void age_init_rr_ring(struct age_softc *);
static void age_init_cmb_block(struct age_softc *);
static void age_init_smb_block(struct age_softc *);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *age_fixup_rx(if_t, struct mbuf *);
#endif
static int age_newbuf(struct age_softc *, struct age_rxdesc *);
static void age_rxvlan(struct age_softc *);
static void age_rxfilter(struct age_softc *);
static int sysctl_age_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
static device_method_t age_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, age_probe),
DEVMETHOD(device_attach, age_attach),
DEVMETHOD(device_detach, age_detach),
DEVMETHOD(device_shutdown, age_shutdown),
DEVMETHOD(device_suspend, age_suspend),
DEVMETHOD(device_resume, age_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, age_miibus_readreg),
DEVMETHOD(miibus_writereg, age_miibus_writereg),
DEVMETHOD(miibus_statchg, age_miibus_statchg),
{ NULL, NULL }
};
static driver_t age_driver = {
"age",
age_methods,
sizeof(struct age_softc)
};
DRIVER_MODULE(age, pci, age_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, age, age_devs,
nitems(age_devs));
DRIVER_MODULE(miibus, age, miibus_driver, 0, 0);
static struct resource_spec age_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec age_irq_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec age_irq_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec age_irq_spec_msix[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
/*
* Read a PHY register on the MII of the L1.
*/
static int
age_miibus_readreg(device_t dev, int phy, int reg)
{
struct age_softc *sc;
uint32_t v;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
DELAY(1);
v = CSR_READ_4(sc, AGE_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0) {
device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
return (0);
}
return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
}
/*
* Write a PHY register on the MII of the L1.
*/
static int
age_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct age_softc *sc;
uint32_t v;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
(val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
DELAY(1);
v = CSR_READ_4(sc, AGE_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0)
device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
return (0);
}
/*
* Callback from MII layer when media changes.
*/
static void
age_miibus_statchg(device_t dev)
{
struct age_softc *sc;
sc = device_get_softc(dev);
taskqueue_enqueue(taskqueue_swi, &sc->age_link_task);
}
/*
* Get the current interface media status.
*/
static void
age_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct age_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
AGE_LOCK(sc);
mii = device_get_softc(sc->age_miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
AGE_UNLOCK(sc);
}
/*
* Set hardware to newly-selected media.
*/
static int
age_mediachange(if_t ifp)
{
struct age_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
AGE_LOCK(sc);
mii = device_get_softc(sc->age_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
AGE_UNLOCK(sc);
return (error);
}
static int
age_probe(device_t dev)
{
struct age_dev *sp;
int i;
uint16_t vendor, devid;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
sp = age_devs;
for (i = 0; i < nitems(age_devs); i++, sp++) {
if (vendor == sp->age_vendorid &&
devid == sp->age_deviceid) {
device_set_desc(dev, sp->age_name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static void
age_get_macaddr(struct age_softc *sc)
{
uint32_t ea[2], reg;
int i, vpdc;
reg = CSR_READ_4(sc, AGE_SPI_CTRL);
if ((reg & SPI_VPD_ENB) != 0) {
/* Get VPD stored in TWSI EEPROM. */
reg &= ~SPI_VPD_ENB;
CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
}
if (pci_find_cap(sc->age_dev, PCIY_VPD, &vpdc) == 0) {
/*
* PCI VPD capability found, let TWSI reload EEPROM.
* This will set ethernet address of controller.
*/
CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
TWSI_CTRL_SW_LD_START);
for (i = 100; i > 0; i--) {
DELAY(1000);
reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
if ((reg & TWSI_CTRL_SW_LD_START) == 0)
break;
}
if (i == 0)
device_printf(sc->age_dev,
"reloading EEPROM timeout!\n");
} else {
if (bootverbose)
device_printf(sc->age_dev,
"PCI VPD capability not found!\n");
}
ea[0] = CSR_READ_4(sc, AGE_PAR0);
ea[1] = CSR_READ_4(sc, AGE_PAR1);
sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
}
static void
age_phy_reset(struct age_softc *sc)
{
uint16_t reg, pn;
int i, linkup;
/* Reset PHY. */
CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
DELAY(2000);
CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
DELAY(2000);
#define ATPHY_DBG_ADDR 0x1D
#define ATPHY_DBG_DATA 0x1E
#define ATPHY_CDTC 0x16
#define PHY_CDTC_ENB 0x0001
#define PHY_CDTC_POFF 8
#define ATPHY_CDTS 0x1C
#define PHY_CDTS_STAT_OK 0x0000
#define PHY_CDTS_STAT_SHORT 0x0100
#define PHY_CDTS_STAT_OPEN 0x0200
#define PHY_CDTS_STAT_INVAL 0x0300
#define PHY_CDTS_STAT_MASK 0x0300
/* Check power saving mode. Magic from Linux. */
age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
for (linkup = 0, pn = 0; pn < 4; pn++) {
age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC,
(pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
for (i = 200; i > 0; i--) {
DELAY(1000);
reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
ATPHY_CDTC);
if ((reg & PHY_CDTC_ENB) == 0)
break;
}
DELAY(1000);
reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
ATPHY_CDTS);
if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
linkup++;
break;
}
}
age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR,
BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
if (linkup == 0) {
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_ADDR, 0);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA, 0x124E);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_ADDR, 1);
reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA, reg | 0x03);
/* XXX */
DELAY(1500 * 1000);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_ADDR, 0);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
ATPHY_DBG_DATA, 0x024E);
}
#undef ATPHY_DBG_ADDR
#undef ATPHY_DBG_DATA
#undef ATPHY_CDTC
#undef PHY_CDTC_ENB
#undef PHY_CDTC_POFF
#undef ATPHY_CDTS
#undef PHY_CDTS_STAT_OK
#undef PHY_CDTS_STAT_SHORT
#undef PHY_CDTS_STAT_OPEN
#undef PHY_CDTS_STAT_INVAL
#undef PHY_CDTS_STAT_MASK
}
static int
age_attach(device_t dev)
{
struct age_softc *sc;
if_t ifp;
uint16_t burst;
int error, i, msic, msixc, pmc;
error = 0;
sc = device_get_softc(dev);
sc->age_dev = dev;
mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0);
TASK_INIT(&sc->age_int_task, 0, age_int_task, sc);
TASK_INIT(&sc->age_link_task, 0, age_link_task, sc);
/* Map the device. */
pci_enable_busmaster(dev);
sc->age_res_spec = age_res_spec_mem;
sc->age_irq_spec = age_irq_spec_legacy;
error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res);
if (error != 0) {
device_printf(dev, "cannot allocate memory resources.\n");
goto fail;
}
/* Set PHY address. */
sc->age_phyaddr = AGE_PHY_ADDR;
/* Reset PHY. */
age_phy_reset(sc);
/* Reset the ethernet controller. */
age_reset(sc);
/* Get PCI and chip id/revision. */
sc->age_rev = pci_get_revid(dev);
sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
MASTER_CHIP_REV_SHIFT;
if (bootverbose) {
device_printf(dev, "PCI device revision : 0x%04x\n",
sc->age_rev);
device_printf(dev, "Chip id/revision : 0x%04x\n",
sc->age_chip_rev);
}
/*
* XXX
* Unintialized hardware returns an invalid chip id/revision
* as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
* unplugged cable results in putting hardware into automatic
* power down mode which in turn returns invalld chip revision.
*/
if (sc->age_chip_rev == 0xFFFF) {
device_printf(dev,"invalid chip revision : 0x%04x -- "
"not initialized?\n", sc->age_chip_rev);
error = ENXIO;
goto fail;
}
device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
/* Allocate IRQ resources. */
msixc = pci_msix_count(dev);
msic = pci_msi_count(dev);
if (bootverbose) {
device_printf(dev, "MSIX count : %d\n", msixc);
device_printf(dev, "MSI count : %d\n", msic);
}
/* Prefer MSIX over MSI. */
if (msix_disable == 0 || msi_disable == 0) {
if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES &&
pci_alloc_msix(dev, &msixc) == 0) {
if (msic == AGE_MSIX_MESSAGES) {
device_printf(dev, "Using %d MSIX messages.\n",
msixc);
sc->age_flags |= AGE_FLAG_MSIX;
sc->age_irq_spec = age_irq_spec_msix;
} else
pci_release_msi(dev);
}
if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 &&
msic == AGE_MSI_MESSAGES &&
pci_alloc_msi(dev, &msic) == 0) {
if (msic == AGE_MSI_MESSAGES) {
device_printf(dev, "Using %d MSI messages.\n",
msic);
sc->age_flags |= AGE_FLAG_MSI;
sc->age_irq_spec = age_irq_spec_msi;
} else
pci_release_msi(dev);
}
}
error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq);
if (error != 0) {
device_printf(dev, "cannot allocate IRQ resources.\n");
goto fail;
}
/* Get DMA parameters from PCIe device control register. */
if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
sc->age_flags |= AGE_FLAG_PCIE;
burst = pci_read_config(dev, i + 0x08, 2);
/* Max read request size. */
sc->age_dma_rd_burst = ((burst >> 12) & 0x07) <<
DMA_CFG_RD_BURST_SHIFT;
/* Max payload size. */
sc->age_dma_wr_burst = ((burst >> 5) & 0x07) <<
DMA_CFG_WR_BURST_SHIFT;
if (bootverbose) {
device_printf(dev, "Read request size : %d bytes.\n",
128 << ((burst >> 12) & 0x07));
device_printf(dev, "TLP payload size : %d bytes.\n",
128 << ((burst >> 5) & 0x07));
}
} else {
sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
}
/* Create device sysctl node. */
age_sysctl_node(sc);
if ((error = age_dma_alloc(sc)) != 0)
goto fail;
/* Load station address. */
age_get_macaddr(sc);
ifp = sc->age_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, age_ioctl);
if_setstartfn(ifp, age_start);
if_setinitfn(ifp, age_init);
if_setsendqlen(ifp, AGE_TX_RING_CNT - 1);
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
if_sethwassist(ifp, AGE_CSUM_FEATURES | CSUM_TSO);
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
sc->age_flags |= AGE_FLAG_PMCAP;
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Set up MII bus. */
error = mii_attach(dev, &sc->age_miibus, ifp, age_mediachange,
age_mediastatus, BMSR_DEFCAPMASK, sc->age_phyaddr, MII_OFFSET_ANY,
0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->age_eaddr);
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Create local taskq. */
sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->age_tq);
if (sc->age_tq == NULL) {
device_printf(dev, "could not create taskqueue.\n");
ether_ifdetach(ifp);
error = ENXIO;
goto fail;
}
taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->age_dev));
if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
msic = AGE_MSIX_MESSAGES;
else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
msic = AGE_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
error = bus_setup_intr(dev, sc->age_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc,
&sc->age_intrhand[i]);
if (error != 0)
break;
}
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
taskqueue_free(sc->age_tq);
sc->age_tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
age_detach(dev);
return (error);
}
static int
age_detach(device_t dev)
{
struct age_softc *sc;
if_t ifp;
int i, msic;
sc = device_get_softc(dev);
ifp = sc->age_ifp;
if (device_is_attached(dev)) {
AGE_LOCK(sc);
sc->age_flags |= AGE_FLAG_DETACH;
age_stop(sc);
AGE_UNLOCK(sc);
callout_drain(&sc->age_tick_ch);
taskqueue_drain(sc->age_tq, &sc->age_int_task);
taskqueue_drain(taskqueue_swi, &sc->age_link_task);
ether_ifdetach(ifp);
}
if (sc->age_tq != NULL) {
taskqueue_drain(sc->age_tq, &sc->age_int_task);
taskqueue_free(sc->age_tq);
sc->age_tq = NULL;
}
if (sc->age_miibus != NULL) {
device_delete_child(dev, sc->age_miibus);
sc->age_miibus = NULL;
}
bus_generic_detach(dev);
age_dma_free(sc);
if (ifp != NULL) {
if_free(ifp);
sc->age_ifp = NULL;
}
if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
msic = AGE_MSIX_MESSAGES;
else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
msic = AGE_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
if (sc->age_intrhand[i] != NULL) {
bus_teardown_intr(dev, sc->age_irq[i],
sc->age_intrhand[i]);
sc->age_intrhand[i] = NULL;
}
}
bus_release_resources(dev, sc->age_irq_spec, sc->age_irq);
if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0)
pci_release_msi(dev);
bus_release_resources(dev, sc->age_res_spec, sc->age_res);
mtx_destroy(&sc->age_mtx);
return (0);
}
static void
age_sysctl_node(struct age_softc *sc)
{
int error;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
"stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
sc, 0, sysctl_age_stats, "I", "Statistics");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
"int_mod", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->age_int_mod, 0, sysctl_hw_age_int_mod, "I",
"age interrupt moderation");
/* Pull in device tunables. */
sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
error = resource_int_value(device_get_name(sc->age_dev),
device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
if (error == 0) {
if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
sc->age_int_mod > AGE_IM_TIMER_MAX) {
device_printf(sc->age_dev,
"int_mod value out of range; using default: %d\n",
AGE_IM_TIMER_DEFAULT);
sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
}
}
SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
"process_limit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->age_process_limit, 0, sysctl_hw_age_proc_limit, "I",
"max number of Rx events to process");
/* Pull in device tunables. */
sc->age_process_limit = AGE_PROC_DEFAULT;
error = resource_int_value(device_get_name(sc->age_dev),
device_get_unit(sc->age_dev), "process_limit",
&sc->age_process_limit);
if (error == 0) {
if (sc->age_process_limit < AGE_PROC_MIN ||
sc->age_process_limit > AGE_PROC_MAX) {
device_printf(sc->age_dev,
"process_limit value out of range; "
"using default: %d\n", AGE_PROC_DEFAULT);
sc->age_process_limit = AGE_PROC_DEFAULT;
}
}
}
struct age_dmamap_arg {
bus_addr_t age_busaddr;
};
static void
age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct age_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct age_dmamap_arg *)arg;
ctx->age_busaddr = segs[0].ds_addr;
}
/*
* Attansic L1 controller have single register to specify high
* address part of DMA blocks. So all descriptor structures and
* DMA memory blocks should have the same high address of given
* 4GB address space(i.e. crossing 4GB boundary is not allowed).
*/
static int
age_check_boundary(struct age_softc *sc)
{
bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
bus_addr_t cmb_block_end, smb_block_end;
/* Tx/Rx descriptor queue should reside within 4GB boundary. */
tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
if ((AGE_ADDR_HI(tx_ring_end) !=
AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
(AGE_ADDR_HI(rx_ring_end) !=
AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
(AGE_ADDR_HI(rr_ring_end) !=
AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
(AGE_ADDR_HI(cmb_block_end) !=
AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
(AGE_ADDR_HI(smb_block_end) !=
AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
return (EFBIG);
if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
(AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
(AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
(AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
return (EFBIG);
return (0);
}
static int
age_dma_alloc(struct age_softc *sc)
{
struct age_txdesc *txd;
struct age_rxdesc *rxd;
bus_addr_t lowaddr;
struct age_dmamap_arg ctx;
int error, i;
lowaddr = BUS_SPACE_MAXADDR;
again:
/* Create parent ring/DMA block tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->age_dev), /* parent */
1, 0, /* alignment, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_parent_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(
sc->age_cdata.age_parent_tag, /* parent */
AGE_TX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
AGE_TX_RING_SZ, /* maxsize */
1, /* nsegments */
AGE_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_tx_ring_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create Tx ring DMA tag.\n");
goto fail;
}
/* Create tag for Rx ring. */
error = bus_dma_tag_create(
sc->age_cdata.age_parent_tag, /* parent */
AGE_RX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
AGE_RX_RING_SZ, /* maxsize */
1, /* nsegments */
AGE_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_rx_ring_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create Rx ring DMA tag.\n");
goto fail;
}
/* Create tag for Rx return ring. */
error = bus_dma_tag_create(
sc->age_cdata.age_parent_tag, /* parent */
AGE_RR_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
AGE_RR_RING_SZ, /* maxsize */
1, /* nsegments */
AGE_RR_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_rr_ring_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create Rx return ring DMA tag.\n");
goto fail;
}
/* Create tag for coalesing message block. */
error = bus_dma_tag_create(
sc->age_cdata.age_parent_tag, /* parent */
AGE_CMB_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
AGE_CMB_BLOCK_SZ, /* maxsize */
1, /* nsegments */
AGE_CMB_BLOCK_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_cmb_block_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create CMB DMA tag.\n");
goto fail;
}
/* Create tag for statistics message block. */
error = bus_dma_tag_create(
sc->age_cdata.age_parent_tag, /* parent */
AGE_SMB_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
AGE_SMB_BLOCK_SZ, /* maxsize */
1, /* nsegments */
AGE_SMB_BLOCK_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_smb_block_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create SMB DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map. */
error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag,
(void **)&sc->age_rdata.age_tx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->age_cdata.age_tx_ring_map);
if (error != 0) {
device_printf(sc->age_dev,
"could not allocate DMA'able memory for Tx ring.\n");
goto fail;
}
ctx.age_busaddr = 0;
error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag,
sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring,
AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.age_busaddr == 0) {
device_printf(sc->age_dev,
"could not load DMA'able memory for Tx ring.\n");
goto fail;
}
sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr;
/* Rx ring */
error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag,
(void **)&sc->age_rdata.age_rx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->age_cdata.age_rx_ring_map);
if (error != 0) {
device_printf(sc->age_dev,
"could not allocate DMA'able memory for Rx ring.\n");
goto fail;
}
ctx.age_busaddr = 0;
error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag,
sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring,
AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.age_busaddr == 0) {
device_printf(sc->age_dev,
"could not load DMA'able memory for Rx ring.\n");
goto fail;
}
sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr;
/* Rx return ring */
error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag,
(void **)&sc->age_rdata.age_rr_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->age_cdata.age_rr_ring_map);
if (error != 0) {
device_printf(sc->age_dev,
"could not allocate DMA'able memory for Rx return ring.\n");
goto fail;
}
ctx.age_busaddr = 0;
error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag,
sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring,
AGE_RR_RING_SZ, age_dmamap_cb,
&ctx, 0);
if (error != 0 || ctx.age_busaddr == 0) {
device_printf(sc->age_dev,
"could not load DMA'able memory for Rx return ring.\n");
goto fail;
}
sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr;
/* CMB block */
error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag,
(void **)&sc->age_rdata.age_cmb_block,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->age_cdata.age_cmb_block_map);
if (error != 0) {
device_printf(sc->age_dev,
"could not allocate DMA'able memory for CMB block.\n");
goto fail;
}
ctx.age_busaddr = 0;
error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag,
sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block,
AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.age_busaddr == 0) {
device_printf(sc->age_dev,
"could not load DMA'able memory for CMB block.\n");
goto fail;
}
sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr;
/* SMB block */
error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag,
(void **)&sc->age_rdata.age_smb_block,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->age_cdata.age_smb_block_map);
if (error != 0) {
device_printf(sc->age_dev,
"could not allocate DMA'able memory for SMB block.\n");
goto fail;
}
ctx.age_busaddr = 0;
error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag,
sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block,
AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.age_busaddr == 0) {
device_printf(sc->age_dev,
"could not load DMA'able memory for SMB block.\n");
goto fail;
}
sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr;
/*
* All ring buffer and DMA blocks should have the same
* high address part of 64bit DMA address space.
*/
if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
(error = age_check_boundary(sc)) != 0) {
device_printf(sc->age_dev, "4GB boundary crossed, "
"switching to 32bit DMA addressing mode.\n");
age_dma_free(sc);
/* Limit DMA address space to 32bit and try again. */
lowaddr = BUS_SPACE_MAXADDR_32BIT;
goto again;
}
/*
* Create Tx/Rx buffer parent tag.
* L1 supports full 64bit DMA addressing in Tx/Rx buffers
* so it needs separate parent DMA tag.
* XXX
* It seems enabling 64bit DMA causes data corruption. Limit
* DMA address space to 32bit.
*/
error = bus_dma_tag_create(
bus_get_dma_tag(sc->age_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_buffer_tag);
if (error != 0) {
device_printf(sc->age_dev,
"could not create parent buffer DMA tag.\n");
goto fail;
}
/* Create tag for Tx buffers. */
error = bus_dma_tag_create(
sc->age_cdata.age_buffer_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
AGE_TSO_MAXSIZE, /* maxsize */
AGE_MAXTXSEGS, /* nsegments */
AGE_TSO_MAXSEGSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_tx_tag);
if (error != 0) {
device_printf(sc->age_dev, "could not create Tx DMA tag.\n");
goto fail;
}
/* Create tag for Rx buffers. */
error = bus_dma_tag_create(
sc->age_cdata.age_buffer_tag, /* parent */
AGE_RX_BUF_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->age_cdata.age_rx_tag);
if (error != 0) {
device_printf(sc->age_dev, "could not create Rx DMA tag.\n");
goto fail;
}
/* Create DMA maps for Tx buffers. */
for (i = 0; i < AGE_TX_RING_CNT; i++) {
txd = &sc->age_cdata.age_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->age_dev,
"could not create Tx dmamap.\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
&sc->age_cdata.age_rx_sparemap)) != 0) {
device_printf(sc->age_dev,
"could not create spare Rx dmamap.\n");
goto fail;
}
for (i = 0; i < AGE_RX_RING_CNT; i++) {
rxd = &sc->age_cdata.age_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->age_dev,
"could not create Rx dmamap.\n");
goto fail;
}
}
fail:
return (error);
}
static void
age_dma_free(struct age_softc *sc)
{
struct age_txdesc *txd;
struct age_rxdesc *rxd;
int i;
/* Tx buffers */
if (sc->age_cdata.age_tx_tag != NULL) {
for (i = 0; i < AGE_TX_RING_CNT; i++) {
txd = &sc->age_cdata.age_txdesc[i];
if (txd->tx_dmamap != NULL) {
bus_dmamap_destroy(sc->age_cdata.age_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->age_cdata.age_tx_tag);
sc->age_cdata.age_tx_tag = NULL;
}
/* Rx buffers */
if (sc->age_cdata.age_rx_tag != NULL) {
for (i = 0; i < AGE_RX_RING_CNT; i++) {
rxd = &sc->age_cdata.age_rxdesc[i];
if (rxd->rx_dmamap != NULL) {
bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc->age_cdata.age_rx_sparemap != NULL) {
bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
sc->age_cdata.age_rx_sparemap);
sc->age_cdata.age_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc->age_cdata.age_rx_tag);
sc->age_cdata.age_rx_tag = NULL;
}
/* Tx ring. */
if (sc->age_cdata.age_tx_ring_tag != NULL) {
if (sc->age_rdata.age_tx_ring_paddr != 0)
bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag,
sc->age_cdata.age_tx_ring_map);
if (sc->age_rdata.age_tx_ring != NULL)
bus_dmamem_free(sc->age_cdata.age_tx_ring_tag,
sc->age_rdata.age_tx_ring,
sc->age_cdata.age_tx_ring_map);
sc->age_rdata.age_tx_ring_paddr = 0;
sc->age_rdata.age_tx_ring = NULL;
bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag);
sc->age_cdata.age_tx_ring_tag = NULL;
}
/* Rx ring. */
if (sc->age_cdata.age_rx_ring_tag != NULL) {
if (sc->age_rdata.age_rx_ring_paddr != 0)
bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag,
sc->age_cdata.age_rx_ring_map);
if (sc->age_rdata.age_rx_ring != NULL)
bus_dmamem_free(sc->age_cdata.age_rx_ring_tag,
sc->age_rdata.age_rx_ring,
sc->age_cdata.age_rx_ring_map);
sc->age_rdata.age_rx_ring_paddr = 0;
sc->age_rdata.age_rx_ring = NULL;
bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag);
sc->age_cdata.age_rx_ring_tag = NULL;
}
/* Rx return ring. */
if (sc->age_cdata.age_rr_ring_tag != NULL) {
if (sc->age_rdata.age_rr_ring_paddr != 0)
bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag,
sc->age_cdata.age_rr_ring_map);
if (sc->age_rdata.age_rr_ring != NULL)
bus_dmamem_free(sc->age_cdata.age_rr_ring_tag,
sc->age_rdata.age_rr_ring,
sc->age_cdata.age_rr_ring_map);
sc->age_rdata.age_rr_ring_paddr = 0;
sc->age_rdata.age_rr_ring = NULL;
bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag);
sc->age_cdata.age_rr_ring_tag = NULL;
}
/* CMB block */
if (sc->age_cdata.age_cmb_block_tag != NULL) {
if (sc->age_rdata.age_cmb_block_paddr != 0)
bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag,
sc->age_cdata.age_cmb_block_map);
if (sc->age_rdata.age_cmb_block != NULL)
bus_dmamem_free(sc->age_cdata.age_cmb_block_tag,
sc->age_rdata.age_cmb_block,
sc->age_cdata.age_cmb_block_map);
sc->age_rdata.age_cmb_block_paddr = 0;
sc->age_rdata.age_cmb_block = NULL;
bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag);
sc->age_cdata.age_cmb_block_tag = NULL;
}
/* SMB block */
if (sc->age_cdata.age_smb_block_tag != NULL) {
if (sc->age_rdata.age_smb_block_paddr != 0)
bus_dmamap_unload(sc->age_cdata.age_smb_block_tag,
sc->age_cdata.age_smb_block_map);
if (sc->age_rdata.age_smb_block != NULL)
bus_dmamem_free(sc->age_cdata.age_smb_block_tag,
sc->age_rdata.age_smb_block,
sc->age_cdata.age_smb_block_map);
sc->age_rdata.age_smb_block_paddr = 0;
sc->age_rdata.age_smb_block = NULL;
bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag);
sc->age_cdata.age_smb_block_tag = NULL;
}
if (sc->age_cdata.age_buffer_tag != NULL) {
bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag);
sc->age_cdata.age_buffer_tag = NULL;
}
if (sc->age_cdata.age_parent_tag != NULL) {
bus_dma_tag_destroy(sc->age_cdata.age_parent_tag);
sc->age_cdata.age_parent_tag = NULL;
}
}
/*
* Make sure the interface is stopped at reboot time.
*/
static int
age_shutdown(device_t dev)
{
return (age_suspend(dev));
}
static void
age_setwol(struct age_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint32_t reg, pmcs;
uint16_t pmstat;
int aneg, i, pmc;
AGE_LOCK_ASSERT(sc);
if (pci_find_cap(sc->age_dev, PCIY_PMG, &pmc) != 0) {
CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
/*
* No PME capability, PHY power down.
* XXX
* Due to an unknown reason powering down PHY resulted
* in unexpected results such as inaccessbility of
* hardware of freshly rebooted system. Disable
* powering down PHY until I got more information for
* Attansic/Atheros PHY hardwares.
*/
#ifdef notyet
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
MII_BMCR, BMCR_PDOWN);
#endif
return;
}
ifp = sc->age_ifp;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
/*
* Note, this driver resets the link speed to 10/100Mbps with
* auto-negotiation but we don't know whether that operation
* would succeed or not as it have no control after powering
* off. If the renegotiation fail WOL may not work. Running
* at 1Gbps will draw more power than 375mA at 3.3V which is
* specified in PCI specification and that would result in
* complete shutdowning power to ethernet controller.
*
* TODO
* Save current negotiated media speed/duplex/flow-control
* to softc and restore the same link again after resuming.
* PHY handling such as power down/resetting to 100Mbps
* may be better handled in suspend method in phy driver.
*/
mii = device_get_softc(sc->age_miibus);
mii_pollstat(mii);
aneg = 0;
if ((mii->mii_media_status & IFM_AVALID) != 0) {
switch IFM_SUBTYPE(mii->mii_media_active) {
case IFM_10_T:
case IFM_100_TX:
goto got_link;
case IFM_1000_T:
aneg++;
default:
break;
}
}
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
MII_100T2CR, 0);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD |
ANAR_10 | ANAR_CSMA);
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
DELAY(1000);
if (aneg != 0) {
/* Poll link state until age(4) get a 10/100 link. */
for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
mii_pollstat(mii);
if ((mii->mii_media_status & IFM_AVALID) != 0) {
switch (IFM_SUBTYPE(
mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
age_mac_config(sc);
goto got_link;
default:
break;
}
}
AGE_UNLOCK(sc);
pause("agelnk", hz);
AGE_LOCK(sc);
}
if (i == MII_ANEGTICKS_GIGE)
device_printf(sc->age_dev,
"establishing link failed, "
"WOL may not work!");
}
/*
* No link, force MAC to have 100Mbps, full-duplex link.
* This is the last resort and may/may not work.
*/
mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
age_mac_config(sc);
}
got_link:
pmcs = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs);
reg = CSR_READ_4(sc, AGE_MAC_CFG);
reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC);
reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST);
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
reg |= MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
}
/* Request PME. */
pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
#ifdef notyet
/* See above for powering down PHY issues. */
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
/* No WOL, PHY power down. */
age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
MII_BMCR, BMCR_PDOWN);
}
#endif
}
static int
age_suspend(device_t dev)
{
struct age_softc *sc;
sc = device_get_softc(dev);
AGE_LOCK(sc);
age_stop(sc);
age_setwol(sc);
AGE_UNLOCK(sc);
return (0);
}
static int
age_resume(device_t dev)
{
struct age_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
AGE_LOCK(sc);
age_phy_reset(sc);
ifp = sc->age_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0)
age_init_locked(sc);
AGE_UNLOCK(sc);
return (0);
}
static int
age_encap(struct age_softc *sc, struct mbuf **m_head)
{
struct age_txdesc *txd, *txd_last;
struct tx_desc *desc;
struct mbuf *m;
struct ip *ip;
struct tcphdr *tcp;
bus_dma_segment_t txsegs[AGE_MAXTXSEGS];
bus_dmamap_t map;
uint32_t cflags, hdrlen, ip_off, poff, vtag;
int error, i, nsegs, prod, si;
AGE_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
m = *m_head;
ip = NULL;
tcp = NULL;
cflags = vtag = 0;
ip_off = poff = 0;
if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) {
/*
* L1 requires offset of TCP/UDP payload in its Tx
* descriptor to perform hardware Tx checksum offload.
* Additionally, TSO requires IP/TCP header size and
* modification of IP/TCP header in order to make TSO
* engine work. This kind of operation takes many CPU
* cycles on FreeBSD so fast host CPU is needed to get
* smooth TSO performance.
*/
struct ether_header *eh;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
/* Release original mbufs. */
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
ip_off = sizeof(struct ether_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
/*
* Check if hardware VLAN insertion is off.
* Additional check for LLC/SNAP frame?
*/
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ip_off = sizeof(struct ether_vlan_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
}
m = m_pullup(m, ip_off + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + ip_off);
poff = ip_off + (ip->ip_hl << 2);
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
m = m_pullup(m, poff + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
m = m_pullup(m, poff + (tcp->th_off << 2));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
/*
* L1 requires IP/TCP header size and offset as
* well as TCP pseudo checksum which complicates
* TSO configuration. I guess this comes from the
* adherence to Microsoft NDIS Large Send
* specification which requires insertion of
* pseudo checksum by upper stack. The pseudo
* checksum that NDIS refers to doesn't include
* TCP payload length so age(4) should recompute
* the pseudo checksum here. Hopefully this wouldn't
* be much burden on modern CPUs.
* Reset IP checksum and recompute TCP pseudo
* checksum as NDIS specification said.
*/
ip = (struct ip *)(mtod(m, char *) + ip_off);
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
ip->ip_sum = 0;
tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
}
*m_head = m;
}
si = prod = sc->age_cdata.age_tx_prod;
txd = &sc->age_cdata.age_txdesc[prod];
txd_last = txd;
map = txd->tx_dmamap;
error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, AGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check descriptor overrun. */
if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
return (ENOBUFS);
}
m = *m_head;
/* Configure VLAN hardware tag insertion. */
if ((m->m_flags & M_VLANTAG) != 0) {
vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
cflags |= AGE_TD_INSERT_VLAN_TAG;
}
desc = NULL;
i = 0;
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
/* Request TSO and set MSS. */
cflags |= AGE_TD_TSO_IPV4;
cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
cflags |= ((uint32_t)m->m_pkthdr.tso_segsz <<
AGE_TD_TSO_MSS_SHIFT);
/* Set IP/TCP header size. */
cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT;
cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT;
/*
* L1 requires the first buffer should only hold IP/TCP
* header data. TCP payload should be handled in other
* descriptors.
*/
hdrlen = poff + (tcp->th_off << 2);
desc = &sc->age_rdata.age_tx_ring[prod];
desc->addr = htole64(txsegs[0].ds_addr);
desc->len = htole32(AGE_TX_BYTES(hdrlen) | vtag);
desc->flags = htole32(cflags);
sc->age_cdata.age_tx_cnt++;
AGE_DESC_INC(prod, AGE_TX_RING_CNT);
if (m->m_len - hdrlen > 0) {
/* Handle remaining payload of the 1st fragment. */
desc = &sc->age_rdata.age_tx_ring[prod];
desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
desc->len = htole32(AGE_TX_BYTES(m->m_len - hdrlen) |
vtag);
desc->flags = htole32(cflags);
sc->age_cdata.age_tx_cnt++;
AGE_DESC_INC(prod, AGE_TX_RING_CNT);
}
/* Handle remaining fragments. */
i = 1;
} else if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
/* Configure Tx IP/TCP/UDP checksum offload. */
cflags |= AGE_TD_CSUM;
if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
cflags |= AGE_TD_TCPCSUM;
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
cflags |= AGE_TD_UDPCSUM;
/* Set checksum start offset. */
cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
/* Set checksum insertion position of TCP/UDP. */
cflags |= ((poff + m->m_pkthdr.csum_data) <<
AGE_TD_CSUM_XSUMOFFSET_SHIFT);
}
for (; i < nsegs; i++) {
desc = &sc->age_rdata.age_tx_ring[prod];
desc->addr = htole64(txsegs[i].ds_addr);
desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag);
desc->flags = htole32(cflags);
sc->age_cdata.age_tx_cnt++;
AGE_DESC_INC(prod, AGE_TX_RING_CNT);
}
/* Update producer index. */
sc->age_cdata.age_tx_prod = prod;
/* Set EOP on the last descriptor. */
prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
desc = &sc->age_rdata.age_tx_ring[prod];
desc->flags |= htole32(AGE_TD_EOP);
/* Lastly set TSO header and modify IP/TCP header for TSO operation. */
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
desc = &sc->age_rdata.age_tx_ring[si];
desc->flags |= htole32(AGE_TD_TSO_HDR);
}
/* Swap dmamap of the first and the last. */
txd = &sc->age_cdata.age_txdesc[prod];
map = txd_last->tx_dmamap;
txd_last->tx_dmamap = txd->tx_dmamap;
txd->tx_dmamap = map;
txd->tx_m = m;
/* Sync descriptors. */
bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
sc->age_cdata.age_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
age_start(if_t ifp)
{
struct age_softc *sc;
sc = if_getsoftc(ifp);
AGE_LOCK(sc);
age_start_locked(ifp);
AGE_UNLOCK(sc);
}
static void
age_start_locked(if_t ifp)
{
struct age_softc *sc;
struct mbuf *m_head;
int enq;
sc = if_getsoftc(ifp);
AGE_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp); ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (age_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
/* Update mbox. */
AGE_COMMIT_MBOX(sc);
/* Set a timeout in case the chip goes out to lunch. */
sc->age_watchdog_timer = AGE_TX_TIMEOUT;
}
}
static void
age_watchdog(struct age_softc *sc)
{
if_t ifp;
AGE_LOCK_ASSERT(sc);
if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer)
return;
ifp = sc->age_ifp;
if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
if_printf(sc->age_ifp, "watchdog timeout (missed link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
age_init_locked(sc);
return;
}
if (sc->age_cdata.age_tx_cnt == 0) {
if_printf(sc->age_ifp,
"watchdog timeout (missed Tx interrupts) -- recovering\n");
if (!if_sendq_empty(ifp))
age_start_locked(ifp);
return;
}
if_printf(sc->age_ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
age_init_locked(sc);
if (!if_sendq_empty(ifp))
age_start_locked(ifp);
}
static int
age_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct age_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
uint32_t reg;
int error, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU)
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
AGE_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
age_init_locked(sc);
}
AGE_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
AGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->age_if_flags)
& (IFF_PROMISC | IFF_ALLMULTI)) != 0)
age_rxfilter(sc);
} else {
if ((sc->age_flags & AGE_FLAG_DETACH) == 0)
age_init_locked(sc);
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
age_stop(sc);
}
sc->age_if_flags = if_getflags(ifp);
AGE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
AGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
age_rxfilter(sc);
AGE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->age_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
AGE_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, AGE_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, AGE_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
reg = CSR_READ_4(sc, AGE_MAC_CFG);
reg &= ~MAC_CFG_RXCSUM_ENB;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
reg |= MAC_CFG_RXCSUM_ENB;
CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
}
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if ((mask & IFCAP_WOL_MCAST) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
age_rxvlan(sc);
}
AGE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
age_mac_config(struct age_softc *sc)
{
struct mii_data *mii;
uint32_t reg;
AGE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->age_miibus);
reg = CSR_READ_4(sc, AGE_MAC_CFG);
reg &= ~MAC_CFG_FULL_DUPLEX;
reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
reg &= ~MAC_CFG_SPEED_MASK;
/* Reprogram MAC with resolved speed/duplex. */
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
reg |= MAC_CFG_SPEED_10_100;
break;
case IFM_1000_T:
reg |= MAC_CFG_SPEED_1000;
break;
}
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
reg |= MAC_CFG_FULL_DUPLEX;
#ifdef notyet
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
reg |= MAC_CFG_TX_FC;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
reg |= MAC_CFG_RX_FC;
#endif
}
CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
}
static void
age_link_task(void *arg, int pending)
{
struct age_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t reg;
sc = (struct age_softc *)arg;
AGE_LOCK(sc);
mii = device_get_softc(sc->age_miibus);
ifp = sc->age_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
AGE_UNLOCK(sc);
return;
}
sc->age_flags &= ~AGE_FLAG_LINK;
if ((mii->mii_media_status & IFM_AVALID) != 0) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
case IFM_1000_T:
sc->age_flags |= AGE_FLAG_LINK;
break;
default:
break;
}
}
/* Stop Rx/Tx MACs. */
age_stop_rxmac(sc);
age_stop_txmac(sc);
/* Program MACs with resolved speed/duplex/flow-control. */
if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
age_mac_config(sc);
reg = CSR_READ_4(sc, AGE_MAC_CFG);
/* Restart DMA engine and Tx/Rx MAC. */
CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
}
AGE_UNLOCK(sc);
}
static void
age_stats_update(struct age_softc *sc)
{
struct age_stats *stat;
struct smb *smb;
if_t ifp;
AGE_LOCK_ASSERT(sc);
stat = &sc->age_stat;
bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
sc->age_cdata.age_smb_block_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
smb = sc->age_rdata.age_smb_block;
if (smb->updated == 0)
return;
ifp = sc->age_ifp;
/* Rx stats. */
stat->rx_frames += smb->rx_frames;
stat->rx_bcast_frames += smb->rx_bcast_frames;
stat->rx_mcast_frames += smb->rx_mcast_frames;
stat->rx_pause_frames += smb->rx_pause_frames;
stat->rx_control_frames += smb->rx_control_frames;
stat->rx_crcerrs += smb->rx_crcerrs;
stat->rx_lenerrs += smb->rx_lenerrs;
stat->rx_bytes += smb->rx_bytes;
stat->rx_runts += smb->rx_runts;
stat->rx_fragments += smb->rx_fragments;
stat->rx_pkts_64 += smb->rx_pkts_64;
stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
stat->rx_pkts_truncated += smb->rx_pkts_truncated;
stat->rx_fifo_oflows += smb->rx_fifo_oflows;
stat->rx_desc_oflows += smb->rx_desc_oflows;
stat->rx_alignerrs += smb->rx_alignerrs;
stat->rx_bcast_bytes += smb->rx_bcast_bytes;
stat->rx_mcast_bytes += smb->rx_mcast_bytes;
stat->rx_pkts_filtered += smb->rx_pkts_filtered;
/* Tx stats. */
stat->tx_frames += smb->tx_frames;
stat->tx_bcast_frames += smb->tx_bcast_frames;
stat->tx_mcast_frames += smb->tx_mcast_frames;
stat->tx_pause_frames += smb->tx_pause_frames;
stat->tx_excess_defer += smb->tx_excess_defer;
stat->tx_control_frames += smb->tx_control_frames;
stat->tx_deferred += smb->tx_deferred;
stat->tx_bytes += smb->tx_bytes;
stat->tx_pkts_64 += smb->tx_pkts_64;
stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
stat->tx_single_colls += smb->tx_single_colls;
stat->tx_multi_colls += smb->tx_multi_colls;
stat->tx_late_colls += smb->tx_late_colls;
stat->tx_excess_colls += smb->tx_excess_colls;
stat->tx_underrun += smb->tx_underrun;
stat->tx_desc_underrun += smb->tx_desc_underrun;
stat->tx_lenerrs += smb->tx_lenerrs;
stat->tx_pkts_truncated += smb->tx_pkts_truncated;
stat->tx_bcast_bytes += smb->tx_bcast_bytes;
stat->tx_mcast_bytes += smb->tx_mcast_bytes;
/* Update counters in ifnet. */
if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls +
smb->tx_multi_colls + smb->tx_late_colls +
smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_excess_colls +
smb->tx_late_colls + smb->tx_underrun +
smb->tx_pkts_truncated);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames);
if_inc_counter(ifp, IFCOUNTER_IERRORS, smb->rx_crcerrs +
smb->rx_lenerrs + smb->rx_runts + smb->rx_pkts_truncated +
smb->rx_fifo_oflows + smb->rx_desc_oflows +
smb->rx_alignerrs);
/* Update done, clear. */
smb->updated = 0;
bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
sc->age_cdata.age_smb_block_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
age_intr(void *arg)
{
struct age_softc *sc;
uint32_t status;
sc = (struct age_softc *)arg;
status = CSR_READ_4(sc, AGE_INTR_STATUS);
if (status == 0 || (status & AGE_INTRS) == 0)
return (FILTER_STRAY);
/* Disable interrupts. */
CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
return (FILTER_HANDLED);
}
static void
age_int_task(void *arg, int pending)
{
struct age_softc *sc;
if_t ifp;
struct cmb *cmb;
uint32_t status;
sc = (struct age_softc *)arg;
AGE_LOCK(sc);
bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
sc->age_cdata.age_cmb_block_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cmb = sc->age_rdata.age_cmb_block;
status = le32toh(cmb->intr_status);
if (sc->age_morework != 0)
status |= INTR_CMB_RX;
if ((status & AGE_INTRS) == 0)
goto done;
sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
TPD_CONS_SHIFT;
sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
RRD_PROD_SHIFT;
/* Let hardware know CMB was served. */
cmb->intr_status = 0;
bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
sc->age_cdata.age_cmb_block_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ifp = sc->age_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((status & INTR_CMB_RX) != 0)
sc->age_morework = age_rxintr(sc, sc->age_rr_prod,
sc->age_process_limit);
if ((status & INTR_CMB_TX) != 0)
age_txintr(sc, sc->age_tpd_cons);
if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
if ((status & INTR_DMA_RD_TO_RST) != 0)
device_printf(sc->age_dev,
"DMA read error! -- resetting\n");
if ((status & INTR_DMA_WR_TO_RST) != 0)
device_printf(sc->age_dev,
"DMA write error! -- resetting\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
age_init_locked(sc);
}
if (!if_sendq_empty(ifp))
age_start_locked(ifp);
if ((status & INTR_SMB) != 0)
age_stats_update(sc);
}
/* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
sc->age_cdata.age_cmb_block_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
status = le32toh(cmb->intr_status);
if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) {
taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
AGE_UNLOCK(sc);
return;
}
done:
/* Re-enable interrupts. */
CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
AGE_UNLOCK(sc);
}
static void
age_txintr(struct age_softc *sc, int tpd_cons)
{
if_t ifp;
struct age_txdesc *txd;
int cons, prog;
AGE_LOCK_ASSERT(sc);
ifp = sc->age_ifp;
bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
sc->age_cdata.age_tx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Go through our Tx list and free mbufs for those
* frames which have been transmitted.
*/
cons = sc->age_cdata.age_tx_cons;
for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
if (sc->age_cdata.age_tx_cnt <= 0)
break;
prog++;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->age_cdata.age_tx_cnt--;
txd = &sc->age_cdata.age_txdesc[cons];
/*
* Clear Tx descriptors, it's not required but would
* help debugging in case of Tx issues.
*/
txd->tx_desc->addr = 0;
txd->tx_desc->len = 0;
txd->tx_desc->flags = 0;
if (txd->tx_m == NULL)
continue;
/* Reclaim transmitted mbufs. */
bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
if (prog > 0) {
sc->age_cdata.age_tx_cons = cons;
/*
* Unarm watchdog timer only when there are no pending
* Tx descriptors in queue.
*/
if (sc->age_cdata.age_tx_cnt == 0)
sc->age_watchdog_timer = 0;
bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
sc->age_cdata.age_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
}
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
age_fixup_rx(if_t ifp, struct mbuf *m)
{
struct mbuf *n;
int i;
uint16_t *src, *dst;
src = mtod(m, uint16_t *);
dst = src - 3;
if (m->m_next == NULL) {
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= 6;
return (m);
}
/*
* Append a new mbuf to received mbuf chain and copy ethernet
* header from the mbuf chain. This can save lots of CPU
* cycles for jumbo frame.
*/
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
m_freem(m);
return (NULL);
}
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;
m->m_len -= ETHER_HDR_LEN;
n->m_len = ETHER_HDR_LEN;
M_MOVE_PKTHDR(n, m);
n->m_next = m;
return (n);
}
#endif
/* Receive a frame. */
static void
age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
{
struct age_rxdesc *rxd;
if_t ifp;
struct mbuf *mp, *m;
uint32_t status, index, vtag;
int count, nsegs;
int rx_cons;
AGE_LOCK_ASSERT(sc);
ifp = sc->age_ifp;
status = le32toh(rxrd->flags);
index = le32toh(rxrd->index);
rx_cons = AGE_RX_CONS(index);
nsegs = AGE_RX_NSEGS(index);
sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
if ((status & (AGE_RRD_ERROR | AGE_RRD_LENGTH_NOK)) != 0) {
/*
* We want to pass the following frames to upper
* layer regardless of error status of Rx return
* ring.
*
* o IP/TCP/UDP checksum is bad.
* o frame length and protocol specific length
* does not match.
*/
status |= AGE_RRD_IPCSUM_NOK | AGE_RRD_TCP_UDPCSUM_NOK;
if ((status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0)
return;
}
for (count = 0; count < nsegs; count++,
AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
rxd = &sc->age_cdata.age_rxdesc[rx_cons];
mp = rxd->rx_m;
/* Add a new receive buffer to the ring. */
if (age_newbuf(sc, rxd) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/* Reuse Rx buffers. */
if (sc->age_cdata.age_rxhead != NULL)
m_freem(sc->age_cdata.age_rxhead);
break;
}
/*
* Assume we've received a full sized frame.
* Actual size is fixed when we encounter the end of
* multi-segmented frame.
*/
mp->m_len = AGE_RX_BUF_SIZE;
/* Chain received mbufs. */
if (sc->age_cdata.age_rxhead == NULL) {
sc->age_cdata.age_rxhead = mp;
sc->age_cdata.age_rxtail = mp;
} else {
mp->m_flags &= ~M_PKTHDR;
sc->age_cdata.age_rxprev_tail =
sc->age_cdata.age_rxtail;
sc->age_cdata.age_rxtail->m_next = mp;
sc->age_cdata.age_rxtail = mp;
}
if (count == nsegs - 1) {
/* Last desc. for this frame. */
m = sc->age_cdata.age_rxhead;
m->m_flags |= M_PKTHDR;
/*
* It seems that L1 controller has no way
* to tell hardware to strip CRC bytes.
*/
m->m_pkthdr.len = sc->age_cdata.age_rxlen -
ETHER_CRC_LEN;
if (nsegs > 1) {
/* Set last mbuf size. */
mp->m_len = sc->age_cdata.age_rxlen -
((nsegs - 1) * AGE_RX_BUF_SIZE);
/* Remove the CRC bytes in chained mbufs. */
if (mp->m_len <= ETHER_CRC_LEN) {
sc->age_cdata.age_rxtail =
sc->age_cdata.age_rxprev_tail;
sc->age_cdata.age_rxtail->m_len -=
(ETHER_CRC_LEN - mp->m_len);
sc->age_cdata.age_rxtail->m_next = NULL;
m_freem(mp);
} else {
mp->m_len -= ETHER_CRC_LEN;
}
} else
m->m_len = m->m_pkthdr.len;
m->m_pkthdr.rcvif = ifp;
/*
* Set checksum information.
* It seems that L1 controller can compute partial
* checksum. The partial checksum value can be used
* to accelerate checksum computation for fragmented
* TCP/UDP packets. Upper network stack already
* takes advantage of the partial checksum value in
* IP reassembly stage. But I'm not sure the
* correctness of the partial hardware checksum
* assistance due to lack of data sheet. If it is
* proven to work on L1 I'll enable it.
*/
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
(status & AGE_RRD_IPV4) != 0) {
if ((status & AGE_RRD_IPCSUM_NOK) == 0)
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED | CSUM_IP_VALID;
if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
(status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
/*
* Don't mark bad checksum for TCP/UDP frames
* as fragmented frames may always have set
* bad checksummed bit of descriptor status.
*/
}
/* Check for VLAN tagged frames. */
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
(status & AGE_RRD_VLAN) != 0) {
vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag);
m->m_flags |= M_VLANTAG;
}
#ifndef __NO_STRICT_ALIGNMENT
m = age_fixup_rx(ifp, m);
if (m != NULL)
#endif
{
/* Pass it on. */
AGE_UNLOCK(sc);
if_input(ifp, m);
AGE_LOCK(sc);
}
}
}
/* Reset mbuf chains. */
AGE_RXCHAIN_RESET(sc);
}
static int
age_rxintr(struct age_softc *sc, int rr_prod, int count)
{
struct rx_rdesc *rxrd;
int rr_cons, nsegs, pktlen, prog;
AGE_LOCK_ASSERT(sc);
rr_cons = sc->age_cdata.age_rr_cons;
if (rr_cons == rr_prod)
return (0);
bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
sc->age_cdata.age_rr_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_POSTWRITE);
for (prog = 0; rr_cons != rr_prod; prog++) {
if (count-- <= 0)
break;
rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
if (nsegs == 0)
break;
/*
* Check number of segments against received bytes.
* Non-matching value would indicate that hardware
* is still trying to update Rx return descriptors.
* I'm not sure whether this check is really needed.
*/
pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
if (nsegs != howmany(pktlen, AGE_RX_BUF_SIZE))
break;
/* Received a frame. */
age_rxeof(sc, rxrd);
/* Clear return ring. */
rxrd->index = 0;
AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
sc->age_cdata.age_rx_cons += nsegs;
sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
}
if (prog > 0) {
/* Update the consumer index. */
sc->age_cdata.age_rr_cons = rr_cons;
bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
/* Sync descriptors. */
bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
sc->age_cdata.age_rr_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Notify hardware availability of new Rx buffers. */
AGE_COMMIT_MBOX(sc);
}
return (count > 0 ? 0 : EAGAIN);
}
static void
age_tick(void *arg)
{
struct age_softc *sc;
struct mii_data *mii;
sc = (struct age_softc *)arg;
AGE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->age_miibus);
mii_tick(mii);
age_watchdog(sc);
callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
}
static void
age_reset(struct age_softc *sc)
{
uint32_t reg;
int i;
CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
CSR_READ_4(sc, AGE_MASTER_CFG);
DELAY(1000);
for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg);
/* Initialize PCIe module. From Linux. */
CSR_WRITE_4(sc, 0x12FC, 0x6500);
CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
}
static void
age_init(void *xsc)
{
struct age_softc *sc;
sc = (struct age_softc *)xsc;
AGE_LOCK(sc);
age_init_locked(sc);
AGE_UNLOCK(sc);
}
static void
age_init_locked(struct age_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint8_t eaddr[ETHER_ADDR_LEN];
bus_addr_t paddr;
uint32_t reg, fsize;
uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
int error;
AGE_LOCK_ASSERT(sc);
ifp = sc->age_ifp;
mii = device_get_softc(sc->age_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel any pending I/O.
*/
age_stop(sc);
/*
* Reset the chip to a known state.
*/
age_reset(sc);
/* Initialize descriptors. */
error = age_init_rx_ring(sc);
if (error != 0) {
device_printf(sc->age_dev, "no memory for Rx buffers.\n");
age_stop(sc);
return;
}
age_init_rr_ring(sc);
age_init_tx_ring(sc);
age_init_cmb_block(sc);
age_init_smb_block(sc);
/* Reprogram the station address. */
bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
CSR_WRITE_4(sc, AGE_PAR0,
eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
/* Set descriptor base addresses. */
paddr = sc->age_rdata.age_tx_ring_paddr;
CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
paddr = sc->age_rdata.age_rx_ring_paddr;
CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
paddr = sc->age_rdata.age_rr_ring_paddr;
CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
paddr = sc->age_rdata.age_tx_ring_paddr;
CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
paddr = sc->age_rdata.age_cmb_block_paddr;
CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
paddr = sc->age_rdata.age_smb_block_paddr;
CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
/* Set Rx/Rx return descriptor counter. */
CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
DESC_RRD_CNT_MASK) |
((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
/* Set Tx descriptor counter. */
CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
(AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
/* Tell hardware that we're ready to load descriptors. */
CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
/*
* Initialize mailbox register.
* Updated producer/consumer index information is exchanged
* through this mailbox register. However Tx producer and
* Rx return consumer/Rx producer are all shared such that
* it's hard to separate code path between Tx and Rx without
* locking. If L1 hardware have a separate mail box register
* for Tx and Rx consumer/producer management we could have
* independent Tx/Rx handler which in turn Rx handler could have
* been run without any locking.
*/
AGE_COMMIT_MBOX(sc);
/* Configure IPG/IFG parameters. */
CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
/* Set parameters for half-duplex media. */
CSR_WRITE_4(sc, AGE_HDPX_CFG,
((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
HDPX_CFG_LCOL_MASK) |
((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
HDPX_CFG_ABEBT_MASK) |
((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
HDPX_CFG_JAMIPG_MASK));
/* Configure interrupt moderation timer. */
CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
reg = CSR_READ_4(sc, AGE_MASTER_CFG);
reg &= ~MASTER_MTIMER_ENB;
if (AGE_USECS(sc->age_int_mod) == 0)
reg &= ~MASTER_ITIMER_ENB;
else
reg |= MASTER_ITIMER_ENB;
CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
if (bootverbose)
device_printf(sc->age_dev, "interrupt moderation is %d us.\n",
sc->age_int_mod);
CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
if (if_getmtu(ifp) < ETHERMTU)
sc->age_max_frame_size = ETHERMTU;
else
sc->age_max_frame_size = if_getmtu(ifp);
sc->age_max_frame_size += ETHER_HDR_LEN +
sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
/* Configure jumbo frame. */
fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
(((fsize / sizeof(uint64_t)) <<
RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
RXQ_JUMBO_CFG_RRD_TIMER_MASK));
/* Configure flow-control parameters. From Linux. */
if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
/*
* Magic workaround for old-L1.
* Don't know which hw revision requires this magic.
*/
CSR_WRITE_4(sc, 0x12FC, 0x6500);
/*
* Another magic workaround for flow-control mode
* change. From Linux.
*/
CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
}
/*
* TODO
* Should understand pause parameter relationships between FIFO
* size and number of Rx descriptors and Rx return descriptors.
*
* Magic parameters came from Linux.
*/
switch (sc->age_chip_rev) {
case 0x8001:
case 0x9001:
case 0x9002:
case 0x9003:
rxf_hi = AGE_RX_RING_CNT / 16;
rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
rrd_lo = AGE_RR_RING_CNT / 16;
break;
default:
reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
rxf_lo = reg / 16;
if (rxf_lo < 192)
rxf_lo = 192;
rxf_hi = (reg * 7) / 8;
if (rxf_hi < rxf_lo)
rxf_hi = rxf_lo + 16;
reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
rrd_lo = reg / 8;
rrd_hi = (reg * 7) / 8;
if (rrd_lo < 2)
rrd_lo = 2;
if (rrd_hi < rrd_lo)
rrd_hi = rrd_lo + 3;
break;
}
CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
RXQ_FIFO_PAUSE_THRESH_HI_MASK));
CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
RXQ_RRD_PAUSE_THRESH_LO_MASK) |
((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
RXQ_RRD_PAUSE_THRESH_HI_MASK));
/* Configure RxQ. */
CSR_WRITE_4(sc, AGE_RXQ_CFG,
((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
RXQ_CFG_RD_BURST_MASK) |
((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
/* Configure TxQ. */
CSR_WRITE_4(sc, AGE_TXQ_CFG,
((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
TXQ_CFG_TPD_BURST_MASK) |
((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
TXQ_CFG_TX_FIFO_BURST_MASK) |
((TXQ_CFG_TPD_FETCH_DEFAULT <<
TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
TXQ_CFG_ENB);
CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG,
(((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) &
TX_JUMBO_TPD_TH_MASK) |
((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) &
TX_JUMBO_TPD_IPG_MASK));
/* Configure DMA parameters. */
CSR_WRITE_4(sc, AGE_DMA_CFG,
DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
/* Configure CMB DMA write threshold. */
CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
CMB_WR_THRESH_RRD_MASK) |
((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
CMB_WR_THRESH_TPD_MASK));
/* Set CMB/SMB timer and enable them. */
CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
/* Request SMB updates for every seconds. */
CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
/*
* Disable all WOL bits as WOL can interfere normal Rx
* operation.
*/
CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
/*
* Configure Tx/Rx MACs.
* - Auto-padding for short frames.
* - Enable CRC generation.
* Start with full-duplex/1000Mbps media. Actual reconfiguration
* of MAC is followed after link establishment.
*/
CSR_WRITE_4(sc, AGE_MAC_CFG,
MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
MAC_CFG_PREAMBLE_MASK));
/* Set up the receive filter. */
age_rxfilter(sc);
age_rxvlan(sc);
reg = CSR_READ_4(sc, AGE_MAC_CFG);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
reg |= MAC_CFG_RXCSUM_ENB;
/* Ack all pending interrupts and clear it. */
CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
/* Finally enable Tx/Rx MAC. */
CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
sc->age_flags &= ~AGE_FLAG_LINK;
/* Switch to the current media. */
mii_mediachg(mii);
callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
static void
age_stop(struct age_softc *sc)
{
if_t ifp;
struct age_txdesc *txd;
struct age_rxdesc *rxd;
uint32_t reg;
int i;
AGE_LOCK_ASSERT(sc);
/*
* Mark the interface down and cancel the watchdog timer.
*/
ifp = sc->age_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->age_flags &= ~AGE_FLAG_LINK;
callout_stop(&sc->age_tick_ch);
sc->age_watchdog_timer = 0;
/*
* Disable interrupts.
*/
CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
/* Stop CMB/SMB updates. */
CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
/* Stop Rx/Tx MAC. */
age_stop_rxmac(sc);
age_stop_txmac(sc);
/* Stop DMA. */
CSR_WRITE_4(sc, AGE_DMA_CFG,
CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
/* Stop TxQ/RxQ. */
CSR_WRITE_4(sc, AGE_TXQ_CFG,
CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
CSR_WRITE_4(sc, AGE_RXQ_CFG,
CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->age_dev,
"stopping Rx/Tx MACs timed out(0x%08x)!\n", reg);
/* Reclaim Rx buffers that have been processed. */
if (sc->age_cdata.age_rxhead != NULL)
m_freem(sc->age_cdata.age_rxhead);
AGE_RXCHAIN_RESET(sc);
/*
* Free RX and TX mbufs still in the queues.
*/
for (i = 0; i < AGE_RX_RING_CNT; i++) {
rxd = &sc->age_cdata.age_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->age_cdata.age_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->age_cdata.age_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < AGE_TX_RING_CNT; i++) {
txd = &sc->age_cdata.age_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->age_cdata.age_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->age_cdata.age_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
}
static void
age_stop_txmac(struct age_softc *sc)
{
uint32_t reg;
int i;
AGE_LOCK_ASSERT(sc);
reg = CSR_READ_4(sc, AGE_MAC_CFG);
if ((reg & MAC_CFG_TX_ENB) != 0) {
reg &= ~MAC_CFG_TX_ENB;
CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
}
/* Stop Tx DMA engine. */
reg = CSR_READ_4(sc, AGE_DMA_CFG);
if ((reg & DMA_CFG_RD_ENB) != 0) {
reg &= ~DMA_CFG_RD_ENB;
CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
}
for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
(IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->age_dev, "stopping TxMAC timeout!\n");
}
static void
age_stop_rxmac(struct age_softc *sc)
{
uint32_t reg;
int i;
AGE_LOCK_ASSERT(sc);
reg = CSR_READ_4(sc, AGE_MAC_CFG);
if ((reg & MAC_CFG_RX_ENB) != 0) {
reg &= ~MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
}
/* Stop Rx DMA engine. */
reg = CSR_READ_4(sc, AGE_DMA_CFG);
if ((reg & DMA_CFG_WR_ENB) != 0) {
reg &= ~DMA_CFG_WR_ENB;
CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
}
for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
(IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->age_dev, "stopping RxMAC timeout!\n");
}
static void
age_init_tx_ring(struct age_softc *sc)
{
struct age_ring_data *rd;
struct age_txdesc *txd;
int i;
AGE_LOCK_ASSERT(sc);
sc->age_cdata.age_tx_prod = 0;
sc->age_cdata.age_tx_cons = 0;
sc->age_cdata.age_tx_cnt = 0;
rd = &sc->age_rdata;
bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
for (i = 0; i < AGE_TX_RING_CNT; i++) {
txd = &sc->age_cdata.age_txdesc[i];
txd->tx_desc = &rd->age_tx_ring[i];
txd->tx_m = NULL;
}
bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
sc->age_cdata.age_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
age_init_rx_ring(struct age_softc *sc)
{
struct age_ring_data *rd;
struct age_rxdesc *rxd;
int i;
AGE_LOCK_ASSERT(sc);
sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
sc->age_morework = 0;
rd = &sc->age_rdata;
bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
for (i = 0; i < AGE_RX_RING_CNT; i++) {
rxd = &sc->age_cdata.age_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_desc = &rd->age_rx_ring[i];
if (age_newbuf(sc, rxd) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
return (0);
}
static void
age_init_rr_ring(struct age_softc *sc)
{
struct age_ring_data *rd;
AGE_LOCK_ASSERT(sc);
sc->age_cdata.age_rr_cons = 0;
AGE_RXCHAIN_RESET(sc);
rd = &sc->age_rdata;
bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
sc->age_cdata.age_rr_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
age_init_cmb_block(struct age_softc *sc)
{
struct age_ring_data *rd;
AGE_LOCK_ASSERT(sc);
rd = &sc->age_rdata;
bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
sc->age_cdata.age_cmb_block_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
age_init_smb_block(struct age_softc *sc)
{
struct age_ring_data *rd;
AGE_LOCK_ASSERT(sc);
rd = &sc->age_rdata;
bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
sc->age_cdata.age_smb_block_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
{
struct rx_desc *desc;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
AGE_LOCK_ASSERT(sc);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
#ifndef __NO_STRICT_ALIGNMENT
m_adj(m, AGE_RX_BUF_ALIGN);
#endif
if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag,
sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
sc->age_cdata.age_rx_sparemap = map;
bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
desc = rxd->rx_desc;
desc->addr = htole64(segs[0].ds_addr);
desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) <<
AGE_RD_LEN_SHIFT);
return (0);
}
static void
age_rxvlan(struct age_softc *sc)
{
if_t ifp;
uint32_t reg;
AGE_LOCK_ASSERT(sc);
ifp = sc->age_ifp;
reg = CSR_READ_4(sc, AGE_MAC_CFG);
reg &= ~MAC_CFG_VLAN_TAG_STRIP;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
reg |= MAC_CFG_VLAN_TAG_STRIP;
CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
}
static u_int
age_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *mchash = arg;
uint32_t crc;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
return (1);
}
static void
age_rxfilter(struct age_softc *sc)
{
if_t ifp;
uint32_t mchash[2];
uint32_t rxcfg;
AGE_LOCK_ASSERT(sc);
ifp = sc->age_ifp;
rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxcfg |= MAC_CFG_BCAST;
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxcfg |= MAC_CFG_PROMISC;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
rxcfg |= MAC_CFG_ALLMULTI;
CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
return;
}
/* Program new filter. */
bzero(mchash, sizeof(mchash));
if_foreach_llmaddr(ifp, age_hash_maddr, mchash);
CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
}
static int
sysctl_age_stats(SYSCTL_HANDLER_ARGS)
{
struct age_softc *sc;
struct age_stats *stats;
int error, result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (result != 1)
return (error);
sc = (struct age_softc *)arg1;
stats = &sc->age_stat;
printf("%s statistics:\n", device_get_nameunit(sc->age_dev));
printf("Transmit good frames : %ju\n",
(uintmax_t)stats->tx_frames);
printf("Transmit good broadcast frames : %ju\n",
(uintmax_t)stats->tx_bcast_frames);
printf("Transmit good multicast frames : %ju\n",
(uintmax_t)stats->tx_mcast_frames);
printf("Transmit pause control frames : %u\n",
stats->tx_pause_frames);
printf("Transmit control frames : %u\n",
stats->tx_control_frames);
printf("Transmit frames with excessive deferrals : %u\n",
stats->tx_excess_defer);
printf("Transmit deferrals : %u\n",
stats->tx_deferred);
printf("Transmit good octets : %ju\n",
(uintmax_t)stats->tx_bytes);
printf("Transmit good broadcast octets : %ju\n",
(uintmax_t)stats->tx_bcast_bytes);
printf("Transmit good multicast octets : %ju\n",
(uintmax_t)stats->tx_mcast_bytes);
printf("Transmit frames 64 bytes : %ju\n",
(uintmax_t)stats->tx_pkts_64);
printf("Transmit frames 65 to 127 bytes : %ju\n",
(uintmax_t)stats->tx_pkts_65_127);
printf("Transmit frames 128 to 255 bytes : %ju\n",
(uintmax_t)stats->tx_pkts_128_255);
printf("Transmit frames 256 to 511 bytes : %ju\n",
(uintmax_t)stats->tx_pkts_256_511);
printf("Transmit frames 512 to 1024 bytes : %ju\n",
(uintmax_t)stats->tx_pkts_512_1023);
printf("Transmit frames 1024 to 1518 bytes : %ju\n",
(uintmax_t)stats->tx_pkts_1024_1518);
printf("Transmit frames 1519 to MTU bytes : %ju\n",
(uintmax_t)stats->tx_pkts_1519_max);
printf("Transmit single collisions : %u\n",
stats->tx_single_colls);
printf("Transmit multiple collisions : %u\n",
stats->tx_multi_colls);
printf("Transmit late collisions : %u\n",
stats->tx_late_colls);
printf("Transmit abort due to excessive collisions : %u\n",
stats->tx_excess_colls);
printf("Transmit underruns due to FIFO underruns : %u\n",
stats->tx_underrun);
printf("Transmit descriptor write-back errors : %u\n",
stats->tx_desc_underrun);
printf("Transmit frames with length mismatched frame size : %u\n",
stats->tx_lenerrs);
printf("Transmit frames with truncated due to MTU size : %u\n",
stats->tx_lenerrs);
printf("Receive good frames : %ju\n",
(uintmax_t)stats->rx_frames);
printf("Receive good broadcast frames : %ju\n",
(uintmax_t)stats->rx_bcast_frames);
printf("Receive good multicast frames : %ju\n",
(uintmax_t)stats->rx_mcast_frames);
printf("Receive pause control frames : %u\n",
stats->rx_pause_frames);
printf("Receive control frames : %u\n",
stats->rx_control_frames);
printf("Receive CRC errors : %u\n",
stats->rx_crcerrs);
printf("Receive frames with length errors : %u\n",
stats->rx_lenerrs);
printf("Receive good octets : %ju\n",
(uintmax_t)stats->rx_bytes);
printf("Receive good broadcast octets : %ju\n",
(uintmax_t)stats->rx_bcast_bytes);
printf("Receive good multicast octets : %ju\n",
(uintmax_t)stats->rx_mcast_bytes);
printf("Receive frames too short : %u\n",
stats->rx_runts);
printf("Receive fragmented frames : %ju\n",
(uintmax_t)stats->rx_fragments);
printf("Receive frames 64 bytes : %ju\n",
(uintmax_t)stats->rx_pkts_64);
printf("Receive frames 65 to 127 bytes : %ju\n",
(uintmax_t)stats->rx_pkts_65_127);
printf("Receive frames 128 to 255 bytes : %ju\n",
(uintmax_t)stats->rx_pkts_128_255);
printf("Receive frames 256 to 511 bytes : %ju\n",
(uintmax_t)stats->rx_pkts_256_511);
printf("Receive frames 512 to 1024 bytes : %ju\n",
(uintmax_t)stats->rx_pkts_512_1023);
printf("Receive frames 1024 to 1518 bytes : %ju\n",
(uintmax_t)stats->rx_pkts_1024_1518);
printf("Receive frames 1519 to MTU bytes : %ju\n",
(uintmax_t)stats->rx_pkts_1519_max);
printf("Receive frames too long : %ju\n",
(uint64_t)stats->rx_pkts_truncated);
printf("Receive frames with FIFO overflow : %u\n",
stats->rx_fifo_oflows);
printf("Receive frames with return descriptor overflow : %u\n",
stats->rx_desc_oflows);
printf("Receive frames with alignment errors : %u\n",
stats->rx_alignerrs);
printf("Receive frames dropped due to address filtering : %ju\n",
(uint64_t)stats->rx_pkts_filtered);
return (error);
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
AGE_PROC_MIN, AGE_PROC_MAX));
}
static int
sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN,
AGE_IM_TIMER_MAX));
}
diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c
index 86ae705667de..859d1214b46a 100644
--- a/sys/dev/alc/if_alc.c
+++ b/sys/dev/alc/if_alc.c
@@ -1,4736 +1,4730 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/debugnet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <dev/alc/if_alcreg.h>
#include <dev/alc/if_alcvar.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#undef ALC_USE_CUSTOM_CSUM
#ifdef ALC_USE_CUSTOM_CSUM
#define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
#else
#define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
#endif
MODULE_DEPEND(alc, pci, 1, 1, 1);
MODULE_DEPEND(alc, ether, 1, 1, 1);
MODULE_DEPEND(alc, miibus, 1, 1, 1);
/* Tunables. */
static int msi_disable = 0;
TUNABLE_INT("hw.alc.msi_disable", &msi_disable);
/*
* The default value of msix_disable is 2, which means to decide whether to
* enable MSI-X in alc_attach() depending on the card type. The operator can
* set this to 0 or 1 to override the default.
*/
static int msix_disable = 2;
TUNABLE_INT("hw.alc.msix_disable", &msix_disable);
/*
* Devices supported by this driver.
*/
static struct alc_ident alc_ident_table[] = {
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024,
"Atheros AR8131 PCIe Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024,
"Atheros AR8132 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024,
"Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024,
"Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024,
"Atheros AR8152 v1.1 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024,
"Atheros AR8152 v2.0 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8161, 9 * 1024,
"Atheros AR8161 PCIe Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8162, 9 * 1024,
"Atheros AR8162 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8171, 9 * 1024,
"Atheros AR8171 PCIe Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8172, 9 * 1024,
"Atheros AR8172 PCIe Fast Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024,
"Killer E2200 Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024,
"Killer E2400 Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2500, 9 * 1024,
"Killer E2500 Gigabit Ethernet" },
{ 0, 0, 0, NULL}
};
static void alc_aspm(struct alc_softc *, int, int);
static void alc_aspm_813x(struct alc_softc *, int);
static void alc_aspm_816x(struct alc_softc *, int);
static int alc_attach(device_t);
static int alc_check_boundary(struct alc_softc *);
static void alc_config_msi(struct alc_softc *);
static int alc_detach(device_t);
static void alc_disable_l0s_l1(struct alc_softc *);
static int alc_dma_alloc(struct alc_softc *);
static void alc_dma_free(struct alc_softc *);
static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static void alc_dsp_fixup(struct alc_softc *, int);
static int alc_encap(struct alc_softc *, struct mbuf **);
static struct alc_ident *
alc_find_ident(device_t);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
alc_fixup_rx(if_t, struct mbuf *);
#endif
static void alc_get_macaddr(struct alc_softc *);
static void alc_get_macaddr_813x(struct alc_softc *);
static void alc_get_macaddr_816x(struct alc_softc *);
static void alc_get_macaddr_par(struct alc_softc *);
static void alc_init(void *);
static void alc_init_cmb(struct alc_softc *);
static void alc_init_locked(struct alc_softc *);
static void alc_init_rr_ring(struct alc_softc *);
static int alc_init_rx_ring(struct alc_softc *);
static void alc_init_smb(struct alc_softc *);
static void alc_init_tx_ring(struct alc_softc *);
static void alc_int_task(void *, int);
static int alc_intr(void *);
static int alc_ioctl(if_t, u_long, caddr_t);
static void alc_mac_config(struct alc_softc *);
static uint32_t alc_mii_readreg_813x(struct alc_softc *, int, int);
static uint32_t alc_mii_readreg_816x(struct alc_softc *, int, int);
static uint32_t alc_mii_writereg_813x(struct alc_softc *, int, int, int);
static uint32_t alc_mii_writereg_816x(struct alc_softc *, int, int, int);
static int alc_miibus_readreg(device_t, int, int);
static void alc_miibus_statchg(device_t);
static int alc_miibus_writereg(device_t, int, int, int);
static uint32_t alc_miidbg_readreg(struct alc_softc *, int);
static uint32_t alc_miidbg_writereg(struct alc_softc *, int, int);
static uint32_t alc_miiext_readreg(struct alc_softc *, int, int);
static uint32_t alc_miiext_writereg(struct alc_softc *, int, int, int);
static int alc_mediachange(if_t);
static int alc_mediachange_locked(struct alc_softc *);
static void alc_mediastatus(if_t, struct ifmediareq *);
static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
static void alc_osc_reset(struct alc_softc *);
static void alc_phy_down(struct alc_softc *);
static void alc_phy_reset(struct alc_softc *);
static void alc_phy_reset_813x(struct alc_softc *);
static void alc_phy_reset_816x(struct alc_softc *);
static int alc_probe(device_t);
static void alc_reset(struct alc_softc *);
static int alc_resume(device_t);
static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
static int alc_rxintr(struct alc_softc *, int);
static void alc_rxfilter(struct alc_softc *);
static void alc_rxvlan(struct alc_softc *);
static void alc_setlinkspeed(struct alc_softc *);
static void alc_setwol(struct alc_softc *);
static void alc_setwol_813x(struct alc_softc *);
static void alc_setwol_816x(struct alc_softc *);
static int alc_shutdown(device_t);
static void alc_start(if_t);
static void alc_start_locked(if_t);
static void alc_start_queue(struct alc_softc *);
static void alc_start_tx(struct alc_softc *);
static void alc_stats_clear(struct alc_softc *);
static void alc_stats_update(struct alc_softc *);
static void alc_stop(struct alc_softc *);
static void alc_stop_mac(struct alc_softc *);
static void alc_stop_queue(struct alc_softc *);
static int alc_suspend(device_t);
static void alc_sysctl_node(struct alc_softc *);
static void alc_tick(void *);
static void alc_txeof(struct alc_softc *);
static void alc_watchdog(struct alc_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
DEBUGNET_DEFINE(alc);
static device_method_t alc_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, alc_probe),
DEVMETHOD(device_attach, alc_attach),
DEVMETHOD(device_detach, alc_detach),
DEVMETHOD(device_shutdown, alc_shutdown),
DEVMETHOD(device_suspend, alc_suspend),
DEVMETHOD(device_resume, alc_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, alc_miibus_readreg),
DEVMETHOD(miibus_writereg, alc_miibus_writereg),
DEVMETHOD(miibus_statchg, alc_miibus_statchg),
DEVMETHOD_END
};
static driver_t alc_driver = {
"alc",
alc_methods,
sizeof(struct alc_softc)
};
DRIVER_MODULE(alc, pci, alc_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device", pci, alc, alc_ident_table,
nitems(alc_ident_table) - 1);
DRIVER_MODULE(miibus, alc, miibus_driver, 0, 0);
static struct resource_spec alc_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec alc_irq_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec alc_irq_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec alc_irq_spec_msix[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
static int
alc_miibus_readreg(device_t dev, int phy, int reg)
{
struct alc_softc *sc;
int v;
sc = device_get_softc(dev);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
v = alc_mii_readreg_816x(sc, phy, reg);
else
v = alc_mii_readreg_813x(sc, phy, reg);
return (v);
}
static uint32_t
alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg)
{
uint32_t v;
int i;
/*
* For AR8132 fast ethernet controller, do not report 1000baseT
* capability to mii(4). Even though AR8132 uses the same
* model/revision number of F1 gigabit PHY, the PHY has no
* ability to establish 1000baseT link.
*/
if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
reg == MII_EXTSR)
return (0);
CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALC_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0) {
device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
return (0);
}
return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
}
static uint32_t
alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg)
{
uint32_t clk, v;
int i;
if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
clk = MDIO_CLK_25_128;
else
clk = MDIO_CLK_25_4;
CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg));
for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALC_MDIO);
if ((v & MDIO_OP_BUSY) == 0)
break;
}
if (i == 0) {
device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
return (0);
}
return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
}
static int
alc_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct alc_softc *sc;
int v;
sc = device_get_softc(dev);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
v = alc_mii_writereg_816x(sc, phy, reg, val);
else
v = alc_mii_writereg_813x(sc, phy, reg, val);
return (v);
}
static uint32_t
alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, int val)
{
uint32_t v;
int i;
CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
(val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALC_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0)
device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
return (0);
}
static uint32_t
alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, int val)
{
uint32_t clk, v;
int i;
if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
clk = MDIO_CLK_25_128;
else
clk = MDIO_CLK_25_4;
CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) |
MDIO_SUP_PREAMBLE | clk);
for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALC_MDIO);
if ((v & MDIO_OP_BUSY) == 0)
break;
}
if (i == 0)
device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
return (0);
}
static void
alc_miibus_statchg(device_t dev)
{
struct alc_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t reg;
sc = device_get_softc(dev);
mii = device_get_softc(sc->alc_miibus);
ifp = sc->alc_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->alc_flags &= ~ALC_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->alc_flags |= ALC_FLAG_LINK;
break;
case IFM_1000_T:
if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
sc->alc_flags |= ALC_FLAG_LINK;
break;
default:
break;
}
}
/* Stop Rx/Tx MACs. */
alc_stop_mac(sc);
/* Program MACs with resolved speed/duplex/flow-control. */
if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
alc_start_queue(sc);
alc_mac_config(sc);
/* Re-enable Tx/Rx MACs. */
reg = CSR_READ_4(sc, ALC_MAC_CFG);
reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
}
alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active));
alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active));
}
static uint32_t
alc_miidbg_readreg(struct alc_softc *sc, int reg)
{
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
reg);
return (alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA));
}
static uint32_t
alc_miidbg_writereg(struct alc_softc *sc, int reg, int val)
{
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
reg);
return (alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, val));
}
static uint32_t
alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg)
{
uint32_t clk, v;
int i;
CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
EXT_MDIO_DEVADDR(devaddr));
if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
clk = MDIO_CLK_25_128;
else
clk = MDIO_CLK_25_4;
CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALC_MDIO);
if ((v & MDIO_OP_BUSY) == 0)
break;
}
if (i == 0) {
device_printf(sc->alc_dev, "phy ext read timeout : %d, %d\n",
devaddr, reg);
return (0);
}
return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
}
static uint32_t
alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val)
{
uint32_t clk, v;
int i;
CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
EXT_MDIO_DEVADDR(devaddr));
if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
clk = MDIO_CLK_25_128;
else
clk = MDIO_CLK_25_4;
CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) |
MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALC_MDIO);
if ((v & MDIO_OP_BUSY) == 0)
break;
}
if (i == 0)
device_printf(sc->alc_dev, "phy ext write timeout : %d, %d\n",
devaddr, reg);
return (0);
}
static void
alc_dsp_fixup(struct alc_softc *sc, int media)
{
uint16_t agc, len, val;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
return;
if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0)
return;
/*
* Vendor PHY magic.
* 1000BT/AZ, wrong cable length
*/
if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6);
len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) &
EXT_CLDCTL6_CAB_LEN_MASK;
agc = alc_miidbg_readreg(sc, MII_DBG_AGC);
agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK;
if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G &&
agc > DBG_AGC_LONG1G_LIMT) ||
(media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT &&
agc > DBG_AGC_LONG1G_LIMT)) {
alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
DBG_AZ_ANADECT_LONG);
val = alc_miiext_readreg(sc, MII_EXT_ANEG,
MII_EXT_ANEG_AFE);
val |= ANEG_AFEE_10BT_100M_TH;
alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
val);
} else {
alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
DBG_AZ_ANADECT_DEFAULT);
val = alc_miiext_readreg(sc, MII_EXT_ANEG,
MII_EXT_ANEG_AFE);
val &= ~ANEG_AFEE_10BT_100M_TH;
alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
val);
}
if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
if (media == IFM_1000_T) {
/*
* Giga link threshold, raise the tolerance of
* noise 50%.
*/
val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
val &= ~DBG_MSE20DB_TH_MASK;
val |= (DBG_MSE20DB_TH_HI <<
DBG_MSE20DB_TH_SHIFT);
alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
} else if (media == IFM_100_TX)
alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
DBG_MSE16DB_UP);
}
} else {
val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE);
val &= ~ANEG_AFEE_10BT_100M_TH;
alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val);
if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
DBG_MSE16DB_DOWN);
val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
val &= ~DBG_MSE20DB_TH_MASK;
val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT);
alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
}
}
}
static void
alc_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct alc_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
ALC_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
ALC_UNLOCK(sc);
return;
}
mii = device_get_softc(sc->alc_miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
ALC_UNLOCK(sc);
}
static int
alc_mediachange(if_t ifp)
{
struct alc_softc *sc;
int error;
sc = if_getsoftc(ifp);
ALC_LOCK(sc);
error = alc_mediachange_locked(sc);
ALC_UNLOCK(sc);
return (error);
}
static int
alc_mediachange_locked(struct alc_softc *sc)
{
struct mii_data *mii;
struct mii_softc *miisc;
int error;
ALC_LOCK_ASSERT(sc);
mii = device_get_softc(sc->alc_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
return (error);
}
static struct alc_ident *
alc_find_ident(device_t dev)
{
struct alc_ident *ident;
uint16_t vendor, devid;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
for (ident = alc_ident_table; ident->name != NULL; ident++) {
if (vendor == ident->vendorid && devid == ident->deviceid)
return (ident);
}
return (NULL);
}
static int
alc_probe(device_t dev)
{
struct alc_ident *ident;
ident = alc_find_ident(dev);
if (ident != NULL) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static void
alc_get_macaddr(struct alc_softc *sc)
{
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
alc_get_macaddr_816x(sc);
else
alc_get_macaddr_813x(sc);
}
static void
alc_get_macaddr_813x(struct alc_softc *sc)
{
uint32_t opt;
uint16_t val;
int eeprom, i;
eeprom = 0;
opt = CSR_READ_4(sc, ALC_OPT_CFG);
if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
(CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
/*
* EEPROM found, let TWSI reload EEPROM configuration.
* This will set ethernet address of controller.
*/
eeprom++;
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_AR8131:
case DEVICEID_ATHEROS_AR8132:
if ((opt & OPT_CFG_CLK_ENB) == 0) {
opt |= OPT_CFG_CLK_ENB;
CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
CSR_READ_4(sc, ALC_OPT_CFG);
DELAY(1000);
}
break;
case DEVICEID_ATHEROS_AR8151:
case DEVICEID_ATHEROS_AR8151_V2:
case DEVICEID_ATHEROS_AR8152_B:
case DEVICEID_ATHEROS_AR8152_B2:
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x00);
val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, val & 0xFF7F);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x3B);
val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, val | 0x0008);
DELAY(20);
break;
}
CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
CSR_READ_4(sc, ALC_WOL_CFG);
CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
TWSI_CFG_SW_LD_START);
for (i = 100; i > 0; i--) {
DELAY(1000);
if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
TWSI_CFG_SW_LD_START) == 0)
break;
}
if (i == 0)
device_printf(sc->alc_dev,
"reloading EEPROM timeout!\n");
} else {
if (bootverbose)
device_printf(sc->alc_dev, "EEPROM not found!\n");
}
if (eeprom != 0) {
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_AR8131:
case DEVICEID_ATHEROS_AR8132:
if ((opt & OPT_CFG_CLK_ENB) != 0) {
opt &= ~OPT_CFG_CLK_ENB;
CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
CSR_READ_4(sc, ALC_OPT_CFG);
DELAY(1000);
}
break;
case DEVICEID_ATHEROS_AR8151:
case DEVICEID_ATHEROS_AR8151_V2:
case DEVICEID_ATHEROS_AR8152_B:
case DEVICEID_ATHEROS_AR8152_B2:
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x00);
val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, val | 0x0080);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x3B);
val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, val & 0xFFF7);
DELAY(20);
break;
}
}
alc_get_macaddr_par(sc);
}
static void
alc_get_macaddr_816x(struct alc_softc *sc)
{
uint32_t reg;
int i, reloaded;
reloaded = 0;
/* Try to reload station address via TWSI. */
for (i = 100; i > 0; i--) {
reg = CSR_READ_4(sc, ALC_SLD);
if ((reg & (SLD_PROGRESS | SLD_START)) == 0)
break;
DELAY(1000);
}
if (i != 0) {
CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START);
for (i = 100; i > 0; i--) {
DELAY(1000);
reg = CSR_READ_4(sc, ALC_SLD);
if ((reg & SLD_START) == 0)
break;
}
if (i != 0)
reloaded++;
else if (bootverbose)
device_printf(sc->alc_dev,
"reloading station address via TWSI timed out!\n");
}
/* Try to reload station address from EEPROM or FLASH. */
if (reloaded == 0) {
reg = CSR_READ_4(sc, ALC_EEPROM_LD);
if ((reg & (EEPROM_LD_EEPROM_EXIST |
EEPROM_LD_FLASH_EXIST)) != 0) {
for (i = 100; i > 0; i--) {
reg = CSR_READ_4(sc, ALC_EEPROM_LD);
if ((reg & (EEPROM_LD_PROGRESS |
EEPROM_LD_START)) == 0)
break;
DELAY(1000);
}
if (i != 0) {
CSR_WRITE_4(sc, ALC_EEPROM_LD, reg |
EEPROM_LD_START);
for (i = 100; i > 0; i--) {
DELAY(1000);
reg = CSR_READ_4(sc, ALC_EEPROM_LD);
if ((reg & EEPROM_LD_START) == 0)
break;
}
} else if (bootverbose)
device_printf(sc->alc_dev,
"reloading EEPROM/FLASH timed out!\n");
}
}
alc_get_macaddr_par(sc);
}
static void
alc_get_macaddr_par(struct alc_softc *sc)
{
uint32_t ea[2];
ea[0] = CSR_READ_4(sc, ALC_PAR0);
ea[1] = CSR_READ_4(sc, ALC_PAR1);
sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
}
static void
alc_disable_l0s_l1(struct alc_softc *sc)
{
uint32_t pmcfg;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
/* Another magic from vendor. */
pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1);
pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB |
PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB;
CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
}
}
static void
alc_phy_reset(struct alc_softc *sc)
{
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
alc_phy_reset_816x(sc);
else
alc_phy_reset_813x(sc);
}
static void
alc_phy_reset_813x(struct alc_softc *sc)
{
uint16_t data;
/* Reset magic from Linux. */
CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET);
CSR_READ_2(sc, ALC_GPHY_CFG);
DELAY(10 * 1000);
CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
GPHY_CFG_SEL_ANA_RESET);
CSR_READ_2(sc, ALC_GPHY_CFG);
DELAY(10 * 1000);
/* DSP fixup, Vendor magic. */
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x000A);
data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, data & 0xDFFF);
}
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x003B);
data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, data & 0xFFF7);
DELAY(20 * 1000);
}
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) {
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x0029);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, 0x929D);
}
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, 0x0029);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, 0xB6DD);
}
/* Load DSP codes, vendor magic. */
data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, MII_ANA_CFG18);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, data);
data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
ANA_SERDES_EN_LCKDT;
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, MII_ANA_CFG5);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, data);
data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
ANA_LONG_CABLE_TH_100_MASK) |
((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
ANA_SHORT_CABLE_TH_100_SHIFT) |
ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, MII_ANA_CFG54);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, data);
data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, MII_ANA_CFG4);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, data);
data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
ANA_OEN_125M;
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_ADDR, MII_ANA_CFG0);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA, data);
DELAY(1000);
/* Disable hibernation. */
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
0x0029);
data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
data &= ~0x8000;
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
data);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
0x000B);
data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
ALC_MII_DBG_DATA);
data &= ~0x8000;
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
data);
}
static void
alc_phy_reset_816x(struct alc_softc *sc)
{
uint32_t val;
val = CSR_READ_4(sc, ALC_GPHY_CFG);
val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON |
GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB);
val |= GPHY_CFG_SEL_ANA_RESET;
#ifdef notyet
val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET;
#else
/* Disable PHY hibernation. */
val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN);
#endif
CSR_WRITE_4(sc, ALC_GPHY_CFG, val);
DELAY(10);
CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET);
DELAY(800);
/* Vendor PHY magic. */
#ifdef notyet
alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT);
alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT);
alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS,
EXT_VDRVBIAS_DEFAULT);
#else
/* Disable PHY hibernation. */
alc_miidbg_writereg(sc, MII_DBG_LEGCYPS,
DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB);
alc_miidbg_writereg(sc, MII_DBG_HIBNEG,
DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE));
alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT);
#endif
/* XXX Disable EEE. */
val = CSR_READ_4(sc, ALC_LPI_CTL);
val &= ~LPI_CTL_ENB;
CSR_WRITE_4(sc, ALC_LPI_CTL, val);
alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0);
/* PHY power saving. */
alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT);
alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT);
alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT);
alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT);
val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
val &= ~DBG_GREENCFG2_GATE_DFSE_EN;
alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
/* RTL8139C, 120m issue. */
alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78,
ANEG_NLP78_120M_DEFAULT);
alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
ANEG_S3DIG10_DEFAULT);
if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) {
/* Turn off half amplitude. */
val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3);
val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT;
alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val);
/* Turn off Green feature. */
val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
val |= DBG_GREENCFG2_BP_GREEN;
alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
/* Turn off half bias. */
val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5);
val |= EXT_CLDCTL5_BP_VD_HLFBIAS;
alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val);
}
}
static void
alc_phy_down(struct alc_softc *sc)
{
uint32_t gphy;
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_AR8161:
case DEVICEID_ATHEROS_E2200:
case DEVICEID_ATHEROS_E2400:
case DEVICEID_ATHEROS_E2500:
case DEVICEID_ATHEROS_AR8162:
case DEVICEID_ATHEROS_AR8171:
case DEVICEID_ATHEROS_AR8172:
gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON);
gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
GPHY_CFG_SEL_ANA_RESET;
gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
break;
case DEVICEID_ATHEROS_AR8151:
case DEVICEID_ATHEROS_AR8151_V2:
case DEVICEID_ATHEROS_AR8152_B:
case DEVICEID_ATHEROS_AR8152_B2:
/*
* GPHY power down caused more problems on AR8151 v2.0.
* When driver is reloaded after GPHY power down,
* accesses to PHY/MAC registers hung the system. Only
* cold boot recovered from it. I'm not sure whether
* AR8151 v1.0 also requires this one though. I don't
* have AR8151 v1.0 controller in hand.
* The only option left is to isolate the PHY and
* initiates power down the PHY which in turn saves
* more power when driver is unloaded.
*/
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
MII_BMCR, BMCR_ISO | BMCR_PDOWN);
break;
default:
/* Force PHY down. */
CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
GPHY_CFG_PWDOWN_HW);
DELAY(1000);
break;
}
}
static void
alc_aspm(struct alc_softc *sc, int init, int media)
{
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
alc_aspm_816x(sc, init);
else
alc_aspm_813x(sc, media);
}
static void
alc_aspm_813x(struct alc_softc *sc, int media)
{
uint32_t pmcfg;
uint16_t linkcfg;
if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
return;
pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
(ALC_FLAG_APS | ALC_FLAG_PCIE))
linkcfg = CSR_READ_2(sc, sc->alc_expcap +
PCIER_LINK_CTL);
else
linkcfg = 0;
pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
pmcfg |= PM_CFG_MAC_ASPM_CHK;
pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
/* Disable extended sync except AR8152 B v1.0 */
linkcfg &= ~PCIEM_LINK_CTL_EXTENDED_SYNC;
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
sc->alc_rev == ATHEROS_AR8152_B_V10)
linkcfg |= PCIEM_LINK_CTL_EXTENDED_SYNC;
CSR_WRITE_2(sc, sc->alc_expcap + PCIER_LINK_CTL,
linkcfg);
pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
PM_CFG_HOTRST);
pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
PM_CFG_L1_ENTRY_TIMER_SHIFT);
pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
PM_CFG_PM_REQ_TIMER_SHIFT);
pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
}
if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
pmcfg |= PM_CFG_ASPM_L0S_ENB;
if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
pmcfg |= PM_CFG_ASPM_L1_ENB;
if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
if (sc->alc_ident->deviceid ==
DEVICEID_ATHEROS_AR8152_B)
pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
PM_CFG_SERDES_PLL_L1_ENB |
PM_CFG_SERDES_BUDS_RX_L1_ENB);
pmcfg |= PM_CFG_CLK_SWH_L1;
if (media == IFM_100_TX || media == IFM_1000_T) {
pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_AR8152_B:
pmcfg |= (7 <<
PM_CFG_L1_ENTRY_TIMER_SHIFT);
break;
case DEVICEID_ATHEROS_AR8152_B2:
case DEVICEID_ATHEROS_AR8151_V2:
pmcfg |= (4 <<
PM_CFG_L1_ENTRY_TIMER_SHIFT);
break;
default:
pmcfg |= (15 <<
PM_CFG_L1_ENTRY_TIMER_SHIFT);
break;
}
}
} else {
pmcfg |= PM_CFG_SERDES_L1_ENB |
PM_CFG_SERDES_PLL_L1_ENB |
PM_CFG_SERDES_BUDS_RX_L1_ENB;
pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
}
} else {
pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
PM_CFG_SERDES_PLL_L1_ENB);
pmcfg |= PM_CFG_CLK_SWH_L1;
if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
pmcfg |= PM_CFG_ASPM_L1_ENB;
}
CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
}
static void
alc_aspm_816x(struct alc_softc *sc, int init)
{
uint32_t pmcfg;
pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK;
pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT;
pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT;
pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK;
pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT;
pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV;
pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S |
PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB |
PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB |
PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST);
if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
(sc->alc_rev & 0x01) != 0)
pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB;
if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
/* Link up, enable both L0s, L1s. */
pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
PM_CFG_MAC_ASPM_CHK;
} else {
if (init != 0)
pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
PM_CFG_MAC_ASPM_CHK;
else if ((if_getdrvflags(sc->alc_ifp) & IFF_DRV_RUNNING) != 0)
pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK;
}
CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
}
static void
alc_init_pcie(struct alc_softc *sc)
{
const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
uint32_t cap, ctl, val;
int state;
/* Clear data link and flow-control protocol error. */
val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
PCIE_PHYMISC_FORCE_RCV_DET);
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
sc->alc_rev == ATHEROS_AR8152_B_V10) {
val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
PCIE_PHYMISC2_SERDES_TH_MASK);
val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
}
/* Disable ASPM L0S and L1. */
cap = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CAP);
if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
ctl = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL);
if ((ctl & PCIEM_LINK_CTL_RCB) != 0)
sc->alc_rcb = DMA_CFG_RCB_128;
if (bootverbose)
device_printf(sc->alc_dev, "RCB %u bytes\n",
sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
state = ctl & PCIEM_LINK_CTL_ASPMC;
if (state & PCIEM_LINK_CTL_ASPMC_L0S)
sc->alc_flags |= ALC_FLAG_L0S;
if (state & PCIEM_LINK_CTL_ASPMC_L1)
sc->alc_flags |= ALC_FLAG_L1S;
if (bootverbose)
device_printf(sc->alc_dev, "ASPM %s %s\n",
aspm_state[state],
state == 0 ? "disabled" : "enabled");
alc_disable_l0s_l1(sc);
} else {
if (bootverbose)
device_printf(sc->alc_dev,
"no ASPM support\n");
}
} else {
val = CSR_READ_4(sc, ALC_PDLL_TRNS1);
val &= ~PDLL_TRNS1_D3PLLOFF_ENB;
CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val);
val = CSR_READ_4(sc, ALC_MASTER_CFG);
if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
(sc->alc_rev & 0x01) != 0) {
if ((val & MASTER_WAKEN_25M) == 0 ||
(val & MASTER_CLK_SEL_DIS) == 0) {
val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS;
CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
}
} else {
if ((val & MASTER_WAKEN_25M) == 0 ||
(val & MASTER_CLK_SEL_DIS) != 0) {
val |= MASTER_WAKEN_25M;
val &= ~MASTER_CLK_SEL_DIS;
CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
}
}
}
alc_aspm(sc, 1, IFM_UNKNOWN);
}
static void
alc_config_msi(struct alc_softc *sc)
{
uint32_t ctl, mod;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
/*
* It seems interrupt moderation is controlled by
* ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active.
* Driver uses RX interrupt moderation parameter to
* program ALC_MSI_RETRANS_TIMER register.
*/
ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER);
ctl &= ~MSI_RETRANS_TIMER_MASK;
ctl &= ~MSI_RETRANS_MASK_SEL_LINE;
mod = ALC_USECS(sc->alc_int_rx_mod);
if (mod == 0)
mod = 1;
ctl |= mod;
if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
MSI_RETRANS_MASK_SEL_STD);
else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
MSI_RETRANS_MASK_SEL_LINE);
else
CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0);
}
}
static int
alc_attach(device_t dev)
{
struct alc_softc *sc;
if_t ifp;
int base, error, i, msic, msixc;
uint16_t burst;
error = 0;
sc = device_get_softc(dev);
sc->alc_dev = dev;
sc->alc_rev = pci_get_revid(dev);
mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
NET_TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
sc->alc_ident = alc_find_ident(dev);
/* Map the device. */
pci_enable_busmaster(dev);
sc->alc_res_spec = alc_res_spec_mem;
sc->alc_irq_spec = alc_irq_spec_legacy;
error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
if (error != 0) {
device_printf(dev, "cannot allocate memory resources.\n");
goto fail;
}
/* Set PHY address. */
sc->alc_phyaddr = ALC_PHY_ADDR;
/*
* One odd thing is AR8132 uses the same PHY hardware(F1
* gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
* the PHY supports 1000Mbps but that's not true. The PHY
* used in AR8132 can't establish gigabit link even if it
* shows the same PHY model/revision number of AR8131.
*/
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_E2200:
case DEVICEID_ATHEROS_E2400:
case DEVICEID_ATHEROS_E2500:
sc->alc_flags |= ALC_FLAG_E2X00;
/*
* Disable MSI-X by default on Killer devices, since this is
* reported by several users to not work well.
*/
if (msix_disable == 2)
msix_disable = 1;
/* FALLTHROUGH */
case DEVICEID_ATHEROS_AR8161:
if (pci_get_subvendor(dev) == VENDORID_ATHEROS &&
pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0)
sc->alc_flags |= ALC_FLAG_LINK_WAR;
/* FALLTHROUGH */
case DEVICEID_ATHEROS_AR8171:
sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
break;
case DEVICEID_ATHEROS_AR8162:
case DEVICEID_ATHEROS_AR8172:
sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY;
break;
case DEVICEID_ATHEROS_AR8152_B:
case DEVICEID_ATHEROS_AR8152_B2:
sc->alc_flags |= ALC_FLAG_APS;
/* FALLTHROUGH */
case DEVICEID_ATHEROS_AR8132:
sc->alc_flags |= ALC_FLAG_FASTETHER;
break;
case DEVICEID_ATHEROS_AR8151:
case DEVICEID_ATHEROS_AR8151_V2:
sc->alc_flags |= ALC_FLAG_APS;
if (CSR_READ_4(sc, ALC_MT_MAGIC) == MT_MAGIC)
sc->alc_flags |= ALC_FLAG_MT;
/* FALLTHROUGH */
default:
break;
}
/*
* The default value of msix_disable is 2, which means auto-detect. If
* we didn't auto-detect it, default to enabling it.
*/
if (msix_disable == 2)
msix_disable = 0;
sc->alc_flags |= ALC_FLAG_JUMBO;
/*
* It seems that AR813x/AR815x has silicon bug for SMB. In
* addition, Atheros said that enabling SMB wouldn't improve
* performance. However I think it's bad to access lots of
* registers to extract MAC statistics.
*/
sc->alc_flags |= ALC_FLAG_SMB_BUG;
/*
* Don't use Tx CMB. It is known to have silicon bug.
*/
sc->alc_flags |= ALC_FLAG_CMB_BUG;
sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
MASTER_CHIP_REV_SHIFT;
if (bootverbose) {
device_printf(dev, "PCI device revision : 0x%04x\n",
sc->alc_rev);
device_printf(dev, "Chip id/revision : 0x%04x\n",
sc->alc_chip_rev);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
device_printf(dev, "AR816x revision : 0x%x\n",
AR816X_REV(sc->alc_rev));
}
device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
/* Initialize DMA parameters. */
sc->alc_dma_rd_burst = 0;
sc->alc_dma_wr_burst = 0;
sc->alc_rcb = DMA_CFG_RCB_64;
if (pci_find_cap(dev, PCIY_EXPRESS, &base) == 0) {
sc->alc_flags |= ALC_FLAG_PCIE;
sc->alc_expcap = base;
burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL);
sc->alc_dma_rd_burst =
(burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12;
sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5;
if (bootverbose) {
device_printf(dev, "Read request size : %u bytes.\n",
alc_dma_burst[sc->alc_dma_rd_burst]);
device_printf(dev, "TLP payload size : %u bytes.\n",
alc_dma_burst[sc->alc_dma_wr_burst]);
}
if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
sc->alc_dma_rd_burst = 3;
if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
sc->alc_dma_wr_burst = 3;
/*
* Force maximum payload size to 128 bytes for
* E2200/E2400/E2500/AR8162/AR8171/AR8172.
* Otherwise it triggers DMA write error.
*/
if ((sc->alc_flags &
(ALC_FLAG_E2X00 | ALC_FLAG_AR816X_FAMILY)) != 0)
sc->alc_dma_wr_burst = 0;
alc_init_pcie(sc);
}
/* Reset PHY. */
alc_phy_reset(sc);
/* Reset the ethernet controller. */
alc_stop_mac(sc);
alc_reset(sc);
/* Allocate IRQ resources. */
msixc = pci_msix_count(dev);
msic = pci_msi_count(dev);
if (bootverbose) {
device_printf(dev, "MSIX count : %d\n", msixc);
device_printf(dev, "MSI count : %d\n", msic);
}
if (msixc > 1)
msixc = 1;
if (msic > 1)
msic = 1;
/*
* Prefer MSIX over MSI.
* AR816x controller has a silicon bug that MSI interrupt
* does not assert if PCIM_CMD_INTxDIS bit of command
* register is set. pci(4) was taught to handle that case.
*/
if (msix_disable == 0 || msi_disable == 0) {
if (msix_disable == 0 && msixc > 0 &&
pci_alloc_msix(dev, &msixc) == 0) {
if (msic == 1) {
device_printf(dev,
"Using %d MSIX message(s).\n", msixc);
sc->alc_flags |= ALC_FLAG_MSIX;
sc->alc_irq_spec = alc_irq_spec_msix;
} else
pci_release_msi(dev);
}
if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
if (msic == 1) {
device_printf(dev,
"Using %d MSI message(s).\n", msic);
sc->alc_flags |= ALC_FLAG_MSI;
sc->alc_irq_spec = alc_irq_spec_msi;
} else
pci_release_msi(dev);
}
}
error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq);
if (error != 0) {
device_printf(dev, "cannot allocate IRQ resources.\n");
goto fail;
}
/* Create device sysctl node. */
alc_sysctl_node(sc);
if ((error = alc_dma_alloc(sc)) != 0)
goto fail;
/* Load station address. */
alc_get_macaddr(sc);
ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, alc_ioctl);
if_setstartfn(ifp, alc_start);
if_setinitfn(ifp, alc_init);
if_setsendqlen(ifp, ALC_TX_RING_CNT - 1);
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_TSO4);
if_sethwassist(ifp, ALC_CSUM_FEATURES | CSUM_TSO);
if (pci_find_cap(dev, PCIY_PMG, &base) == 0) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST, 0);
sc->alc_flags |= ALC_FLAG_PM;
sc->alc_pmcap = base;
}
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Set up MII bus. */
error = mii_attach(dev, &sc->alc_miibus, ifp, alc_mediachange,
alc_mediastatus, BMSR_DEFCAPMASK, sc->alc_phyaddr, MII_OFFSET_ANY,
MIIF_DOPAUSE);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->alc_eaddr);
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* XXX
* It seems enabling Tx checksum offloading makes more trouble.
* Sometimes the controller does not receive any frames when
* Tx checksum offloading is enabled. I'm not sure whether this
* is a bug in Tx checksum offloading logic or I got broken
* sample boards. To safety, don't enable Tx checksum offloading
* by default but give chance to users to toggle it if they know
* their controllers work without problems.
* Fortunately, Tx checksum offloading for AR816x family
* seems to work.
*/
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
if_sethwassistbits(ifp, 0, ALC_CSUM_FEATURES);
}
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Create local taskq. */
sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->alc_tq);
if (sc->alc_tq == NULL) {
device_printf(dev, "could not create taskqueue.\n");
ether_ifdetach(ifp);
error = ENXIO;
goto fail;
}
taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->alc_dev));
alc_config_msi(sc);
if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
msic = ALC_MSIX_MESSAGES;
else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
msic = ALC_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
error = bus_setup_intr(dev, sc->alc_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc,
&sc->alc_intrhand[i]);
if (error != 0)
break;
}
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
taskqueue_free(sc->alc_tq);
sc->alc_tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
/* Attach driver debugnet methods. */
DEBUGNET_SET(ifp, alc);
fail:
if (error != 0)
alc_detach(dev);
return (error);
}
static int
alc_detach(device_t dev)
{
struct alc_softc *sc;
if_t ifp;
int i, msic;
sc = device_get_softc(dev);
ifp = sc->alc_ifp;
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
ALC_LOCK(sc);
alc_stop(sc);
ALC_UNLOCK(sc);
callout_drain(&sc->alc_tick_ch);
taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
}
if (sc->alc_tq != NULL) {
taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
taskqueue_free(sc->alc_tq);
sc->alc_tq = NULL;
}
if (sc->alc_miibus != NULL) {
device_delete_child(dev, sc->alc_miibus);
sc->alc_miibus = NULL;
}
bus_generic_detach(dev);
alc_dma_free(sc);
if (ifp != NULL) {
if_free(ifp);
sc->alc_ifp = NULL;
}
if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
msic = ALC_MSIX_MESSAGES;
else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
msic = ALC_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
if (sc->alc_intrhand[i] != NULL) {
bus_teardown_intr(dev, sc->alc_irq[i],
sc->alc_intrhand[i]);
sc->alc_intrhand[i] = NULL;
}
}
if (sc->alc_res[0] != NULL)
alc_phy_down(sc);
bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq);
if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0)
pci_release_msi(dev);
bus_release_resources(dev, sc->alc_res_spec, sc->alc_res);
mtx_destroy(&sc->alc_mtx);
return (0);
}
#define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
#define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \
SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
static void
alc_sysctl_node(struct alc_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct alc_hw_stats *stats;
int error;
stats = &sc->alc_stats;
ctx = device_get_sysctl_ctx(sc->alc_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->alc_int_rx_mod,
0, sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->alc_int_tx_mod,
0, sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
/* Pull in device tunables. */
sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
error = resource_int_value(device_get_name(sc->alc_dev),
device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
if (error == 0) {
if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
device_printf(sc->alc_dev, "int_rx_mod value out of "
"range; using default: %d\n",
ALC_IM_RX_TIMER_DEFAULT);
sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
}
}
sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
error = resource_int_value(device_get_name(sc->alc_dev),
device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
if (error == 0) {
if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
device_printf(sc->alc_dev, "int_tx_mod value out of "
"range; using default: %d\n",
ALC_IM_TX_TIMER_DEFAULT);
sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
}
}
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->alc_process_limit, 0, sysctl_hw_alc_proc_limit, "I",
"max number of Rx events to process");
/* Pull in device tunables. */
sc->alc_process_limit = ALC_PROC_DEFAULT;
error = resource_int_value(device_get_name(sc->alc_dev),
device_get_unit(sc->alc_dev), "process_limit",
&sc->alc_process_limit);
if (error == 0) {
if (sc->alc_process_limit < ALC_PROC_MIN ||
sc->alc_process_limit > ALC_PROC_MAX) {
device_printf(sc->alc_dev,
"process_limit value out of range; "
"using default: %d\n", ALC_PROC_DEFAULT);
sc->alc_process_limit = ALC_PROC_DEFAULT;
}
}
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ALC statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
child = SYSCTL_CHILDREN(tree);
ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->rx_frames, "Good frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
&stats->rx_bcast_frames, "Good broadcast frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
&stats->rx_mcast_frames, "Good multicast frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->rx_pause_frames, "Pause control frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
&stats->rx_control_frames, "Control frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
&stats->rx_crcerrs, "CRC errors");
ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
&stats->rx_lenerrs, "Frames with length mismatched");
ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
&stats->rx_bytes, "Good octets");
ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
&stats->rx_bcast_bytes, "Good broadcast octets");
ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
&stats->rx_mcast_bytes, "Good multicast octets");
ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
&stats->rx_runts, "Too short frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
&stats->rx_fragments, "Fragmented frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
&stats->rx_pkts_64, "64 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
&stats->rx_pkts_65_127, "65 to 127 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
&stats->rx_pkts_128_255, "128 to 255 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
&stats->rx_pkts_256_511, "256 to 511 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
&stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
&stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
&stats->rx_pkts_1519_max, "1519 to max frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
&stats->rx_pkts_truncated, "Truncated frames due to MTU size");
ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
&stats->rx_fifo_oflows, "FIFO overflows");
ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
&stats->rx_rrs_errs, "Return status write-back errors");
ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
&stats->rx_alignerrs, "Alignment errors");
ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
&stats->rx_pkts_filtered,
"Frames dropped due to address filtering");
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->tx_frames, "Good frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
&stats->tx_bcast_frames, "Good broadcast frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
&stats->tx_mcast_frames, "Good multicast frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->tx_pause_frames, "Pause control frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
&stats->tx_control_frames, "Control frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
&stats->tx_excess_defer, "Frames with excessive derferrals");
ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
&stats->tx_excess_defer, "Frames with derferrals");
ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
&stats->tx_bytes, "Good octets");
ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
&stats->tx_bcast_bytes, "Good broadcast octets");
ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
&stats->tx_mcast_bytes, "Good multicast octets");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
&stats->tx_pkts_64, "64 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
&stats->tx_pkts_65_127, "65 to 127 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
&stats->tx_pkts_128_255, "128 to 255 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
&stats->tx_pkts_256_511, "256 to 511 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
&stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
&stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
&stats->tx_pkts_1519_max, "1519 to max frames");
ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
&stats->tx_single_colls, "Single collisions");
ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
&stats->tx_multi_colls, "Multiple collisions");
ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
&stats->tx_late_colls, "Late collisions");
ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
&stats->tx_excess_colls, "Excessive collisions");
ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
&stats->tx_underrun, "FIFO underruns");
ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
&stats->tx_desc_underrun, "Descriptor write-back errors");
ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
&stats->tx_lenerrs, "Frames with length mismatched");
ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
&stats->tx_pkts_truncated, "Truncated frames due to MTU size");
}
#undef ALC_SYSCTL_STAT_ADD32
#undef ALC_SYSCTL_STAT_ADD64
struct alc_dmamap_arg {
bus_addr_t alc_busaddr;
};
static void
alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct alc_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct alc_dmamap_arg *)arg;
ctx->alc_busaddr = segs[0].ds_addr;
}
/*
* Normal and high Tx descriptors shares single Tx high address.
* Four Rx descriptor/return rings and CMB shares the same Rx
* high address.
*/
static int
alc_check_boundary(struct alc_softc *sc)
{
bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
/* 4GB boundary crossing is not allowed. */
if ((ALC_ADDR_HI(rx_ring_end) !=
ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
(ALC_ADDR_HI(rr_ring_end) !=
ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
(ALC_ADDR_HI(cmb_end) !=
ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
(ALC_ADDR_HI(tx_ring_end) !=
ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
return (EFBIG);
/*
* Make sure Rx return descriptor/Rx descriptor/CMB use
* the same high address.
*/
if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
(ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
return (EFBIG);
return (0);
}
static int
alc_dma_alloc(struct alc_softc *sc)
{
struct alc_txdesc *txd;
struct alc_rxdesc *rxd;
bus_addr_t lowaddr;
struct alc_dmamap_arg ctx;
int error, i;
lowaddr = BUS_SPACE_MAXADDR;
if (sc->alc_flags & ALC_FLAG_MT)
lowaddr = BUS_SPACE_MAXSIZE_32BIT;
again:
/* Create parent DMA tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->alc_dev), /* parent */
1, 0, /* alignment, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_parent_tag);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* Create DMA tag for Tx descriptor ring. */
error = bus_dma_tag_create(
sc->alc_cdata.alc_parent_tag, /* parent */
ALC_TX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALC_TX_RING_SZ, /* maxsize */
1, /* nsegments */
ALC_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_tx_ring_tag);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create Tx ring DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx free descriptor ring. */
error = bus_dma_tag_create(
sc->alc_cdata.alc_parent_tag, /* parent */
ALC_RX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALC_RX_RING_SZ, /* maxsize */
1, /* nsegments */
ALC_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_rx_ring_tag);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create Rx ring DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx return descriptor ring. */
error = bus_dma_tag_create(
sc->alc_cdata.alc_parent_tag, /* parent */
ALC_RR_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALC_RR_RING_SZ, /* maxsize */
1, /* nsegments */
ALC_RR_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_rr_ring_tag);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create Rx return ring DMA tag.\n");
goto fail;
}
/* Create DMA tag for coalescing message block. */
error = bus_dma_tag_create(
sc->alc_cdata.alc_parent_tag, /* parent */
ALC_CMB_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALC_CMB_SZ, /* maxsize */
1, /* nsegments */
ALC_CMB_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_cmb_tag);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create CMB DMA tag.\n");
goto fail;
}
/* Create DMA tag for status message block. */
error = bus_dma_tag_create(
sc->alc_cdata.alc_parent_tag, /* parent */
ALC_SMB_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALC_SMB_SZ, /* maxsize */
1, /* nsegments */
ALC_SMB_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_smb_tag);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create SMB DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
(void **)&sc->alc_rdata.alc_tx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->alc_cdata.alc_tx_ring_map);
if (error != 0) {
device_printf(sc->alc_dev,
"could not allocate DMA'able memory for Tx ring.\n");
goto fail;
}
ctx.alc_busaddr = 0;
error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.alc_busaddr == 0) {
device_printf(sc->alc_dev,
"could not load DMA'able memory for Tx ring.\n");
goto fail;
}
sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx ring. */
error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
(void **)&sc->alc_rdata.alc_rx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->alc_cdata.alc_rx_ring_map);
if (error != 0) {
device_printf(sc->alc_dev,
"could not allocate DMA'able memory for Rx ring.\n");
goto fail;
}
ctx.alc_busaddr = 0;
error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.alc_busaddr == 0) {
device_printf(sc->alc_dev,
"could not load DMA'able memory for Rx ring.\n");
goto fail;
}
sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx return ring. */
error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
(void **)&sc->alc_rdata.alc_rr_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->alc_cdata.alc_rr_ring_map);
if (error != 0) {
device_printf(sc->alc_dev,
"could not allocate DMA'able memory for Rx return ring.\n");
goto fail;
}
ctx.alc_busaddr = 0;
error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.alc_busaddr == 0) {
device_printf(sc->alc_dev,
"could not load DMA'able memory for Tx ring.\n");
goto fail;
}
sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
/* Allocate DMA'able memory and load the DMA map for CMB. */
error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
(void **)&sc->alc_rdata.alc_cmb,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->alc_cdata.alc_cmb_map);
if (error != 0) {
device_printf(sc->alc_dev,
"could not allocate DMA'able memory for CMB.\n");
goto fail;
}
ctx.alc_busaddr = 0;
error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.alc_busaddr == 0) {
device_printf(sc->alc_dev,
"could not load DMA'able memory for CMB.\n");
goto fail;
}
sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
/* Allocate DMA'able memory and load the DMA map for SMB. */
error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
(void **)&sc->alc_rdata.alc_smb,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->alc_cdata.alc_smb_map);
if (error != 0) {
device_printf(sc->alc_dev,
"could not allocate DMA'able memory for SMB.\n");
goto fail;
}
ctx.alc_busaddr = 0;
error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.alc_busaddr == 0) {
device_printf(sc->alc_dev,
"could not load DMA'able memory for CMB.\n");
goto fail;
}
sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
/* Make sure we've not crossed 4GB boundary. */
if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
(error = alc_check_boundary(sc)) != 0) {
device_printf(sc->alc_dev, "4GB boundary crossed, "
"switching to 32bit DMA addressing mode.\n");
alc_dma_free(sc);
/*
* Limit max allowable DMA address space to 32bit
* and try again.
*/
lowaddr = BUS_SPACE_MAXADDR_32BIT;
goto again;
}
/*
* Create Tx buffer parent tag.
* AR81[3567]x allows 64bit DMA addressing of Tx/Rx buffers
* so it needs separate parent DMA tag as parent DMA address
* space could be restricted to be within 32bit address space
* by 4GB boundary crossing.
*/
error = bus_dma_tag_create(
bus_get_dma_tag(sc->alc_dev), /* parent */
1, 0, /* alignment, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_buffer_tag);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create parent buffer DMA tag.\n");
goto fail;
}
/* Create DMA tag for Tx buffers. */
error = bus_dma_tag_create(
sc->alc_cdata.alc_buffer_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALC_TSO_MAXSIZE, /* maxsize */
ALC_MAXTXSEGS, /* nsegments */
ALC_TSO_MAXSEGSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_tx_tag);
if (error != 0) {
device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx buffers. */
error = bus_dma_tag_create(
sc->alc_cdata.alc_buffer_tag, /* parent */
ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->alc_cdata.alc_rx_tag);
if (error != 0) {
device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
goto fail;
}
/* Create DMA maps for Tx buffers. */
for (i = 0; i < ALC_TX_RING_CNT; i++) {
txd = &sc->alc_cdata.alc_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create Tx dmamap.\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
&sc->alc_cdata.alc_rx_sparemap)) != 0) {
device_printf(sc->alc_dev,
"could not create spare Rx dmamap.\n");
goto fail;
}
for (i = 0; i < ALC_RX_RING_CNT; i++) {
rxd = &sc->alc_cdata.alc_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->alc_dev,
"could not create Rx dmamap.\n");
goto fail;
}
}
fail:
return (error);
}
static void
alc_dma_free(struct alc_softc *sc)
{
struct alc_txdesc *txd;
struct alc_rxdesc *rxd;
int i;
/* Tx buffers. */
if (sc->alc_cdata.alc_tx_tag != NULL) {
for (i = 0; i < ALC_TX_RING_CNT; i++) {
txd = &sc->alc_cdata.alc_txdesc[i];
if (txd->tx_dmamap != NULL) {
bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
sc->alc_cdata.alc_tx_tag = NULL;
}
/* Rx buffers */
if (sc->alc_cdata.alc_rx_tag != NULL) {
for (i = 0; i < ALC_RX_RING_CNT; i++) {
rxd = &sc->alc_cdata.alc_rxdesc[i];
if (rxd->rx_dmamap != NULL) {
bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc->alc_cdata.alc_rx_sparemap != NULL) {
bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
sc->alc_cdata.alc_rx_sparemap);
sc->alc_cdata.alc_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
sc->alc_cdata.alc_rx_tag = NULL;
}
/* Tx descriptor ring. */
if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
if (sc->alc_rdata.alc_tx_ring_paddr != 0)
bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
sc->alc_cdata.alc_tx_ring_map);
if (sc->alc_rdata.alc_tx_ring != NULL)
bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
sc->alc_rdata.alc_tx_ring,
sc->alc_cdata.alc_tx_ring_map);
sc->alc_rdata.alc_tx_ring_paddr = 0;
sc->alc_rdata.alc_tx_ring = NULL;
bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
sc->alc_cdata.alc_tx_ring_tag = NULL;
}
/* Rx ring. */
if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
if (sc->alc_rdata.alc_rx_ring_paddr != 0)
bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
sc->alc_cdata.alc_rx_ring_map);
if (sc->alc_rdata.alc_rx_ring != NULL)
bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
sc->alc_rdata.alc_rx_ring,
sc->alc_cdata.alc_rx_ring_map);
sc->alc_rdata.alc_rx_ring_paddr = 0;
sc->alc_rdata.alc_rx_ring = NULL;
bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
sc->alc_cdata.alc_rx_ring_tag = NULL;
}
/* Rx return ring. */
if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
if (sc->alc_rdata.alc_rr_ring_paddr != 0)
bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
sc->alc_cdata.alc_rr_ring_map);
if (sc->alc_rdata.alc_rr_ring != NULL)
bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
sc->alc_rdata.alc_rr_ring,
sc->alc_cdata.alc_rr_ring_map);
sc->alc_rdata.alc_rr_ring_paddr = 0;
sc->alc_rdata.alc_rr_ring = NULL;
bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
sc->alc_cdata.alc_rr_ring_tag = NULL;
}
/* CMB block */
if (sc->alc_cdata.alc_cmb_tag != NULL) {
if (sc->alc_rdata.alc_cmb_paddr != 0)
bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
sc->alc_cdata.alc_cmb_map);
if (sc->alc_rdata.alc_cmb != NULL)
bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
sc->alc_rdata.alc_cmb,
sc->alc_cdata.alc_cmb_map);
sc->alc_rdata.alc_cmb_paddr = 0;
sc->alc_rdata.alc_cmb = NULL;
bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
sc->alc_cdata.alc_cmb_tag = NULL;
}
/* SMB block */
if (sc->alc_cdata.alc_smb_tag != NULL) {
if (sc->alc_rdata.alc_smb_paddr != 0)
bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
sc->alc_cdata.alc_smb_map);
if (sc->alc_rdata.alc_smb != NULL)
bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
sc->alc_rdata.alc_smb,
sc->alc_cdata.alc_smb_map);
sc->alc_rdata.alc_smb_paddr = 0;
sc->alc_rdata.alc_smb = NULL;
bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
sc->alc_cdata.alc_smb_tag = NULL;
}
if (sc->alc_cdata.alc_buffer_tag != NULL) {
bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
sc->alc_cdata.alc_buffer_tag = NULL;
}
if (sc->alc_cdata.alc_parent_tag != NULL) {
bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
sc->alc_cdata.alc_parent_tag = NULL;
}
}
static int
alc_shutdown(device_t dev)
{
return (alc_suspend(dev));
}
/*
* Note, this driver resets the link speed to 10/100Mbps by
* restarting auto-negotiation in suspend/shutdown phase but we
* don't know whether that auto-negotiation would succeed or not
* as driver has no control after powering off/suspend operation.
* If the renegotiation fail WOL may not work. Running at 1Gbps
* will draw more power than 375mA at 3.3V which is specified in
* PCI specification and that would result in complete
* shutdowning power to ethernet controller.
*
* TODO
* Save current negotiated media speed/duplex/flow-control to
* softc and restore the same link again after resuming. PHY
* handling such as power down/resetting to 100Mbps may be better
* handled in suspend method in phy driver.
*/
static void
alc_setlinkspeed(struct alc_softc *sc)
{
struct mii_data *mii;
int aneg, i;
mii = device_get_softc(sc->alc_miibus);
mii_pollstat(mii);
aneg = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch IFM_SUBTYPE(mii->mii_media_active) {
case IFM_10_T:
case IFM_100_TX:
return;
case IFM_1000_T:
aneg++;
break;
default:
break;
}
}
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
DELAY(1000);
if (aneg != 0) {
/*
* Poll link state until alc(4) get a 10/100Mbps link.
*/
for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
mii_pollstat(mii);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
== (IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(
mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
alc_mac_config(sc);
return;
default:
break;
}
}
ALC_UNLOCK(sc);
pause("alclnk", hz);
ALC_LOCK(sc);
}
if (i == MII_ANEGTICKS_GIGE)
device_printf(sc->alc_dev,
"establishing a link failed, WOL may not work!");
}
/*
* No link, force MAC to have 100Mbps, full-duplex link.
* This is the last resort and may/may not work.
*/
mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
alc_mac_config(sc);
}
static void
alc_setwol(struct alc_softc *sc)
{
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
alc_setwol_816x(sc);
else
alc_setwol_813x(sc);
}
static void
alc_setwol_813x(struct alc_softc *sc)
{
if_t ifp;
uint32_t reg, pmcs;
uint16_t pmstat;
ALC_LOCK_ASSERT(sc);
alc_disable_l0s_l1(sc);
ifp = sc->alc_ifp;
if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
/* Disable WOL. */
CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
reg |= PCIE_PHYMISC_FORCE_RCV_DET;
CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
/* Force PHY power down. */
alc_phy_down(sc);
CSR_WRITE_4(sc, ALC_MASTER_CFG,
CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
return;
}
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
alc_setlinkspeed(sc);
CSR_WRITE_4(sc, ALC_MASTER_CFG,
CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS);
}
pmcs = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
reg = CSR_READ_4(sc, ALC_MAC_CFG);
reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
MAC_CFG_BCAST);
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
reg |= MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
reg |= PCIE_PHYMISC_FORCE_RCV_DET;
CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
/* WOL disabled, PHY power down. */
alc_phy_down(sc);
CSR_WRITE_4(sc, ALC_MASTER_CFG,
CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
}
/* Request PME. */
pmstat = pci_read_config(sc->alc_dev,
sc->alc_pmcap + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->alc_dev,
sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
}
static void
alc_setwol_816x(struct alc_softc *sc)
{
if_t ifp;
uint32_t gphy, mac, master, pmcs, reg;
uint16_t pmstat;
ALC_LOCK_ASSERT(sc);
ifp = sc->alc_ifp;
master = CSR_READ_4(sc, ALC_MASTER_CFG);
master &= ~MASTER_CLK_SEL_DIS;
gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB |
GPHY_CFG_PHY_PLL_ON);
gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET;
if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
mac = CSR_READ_4(sc, ALC_MAC_CFG);
} else {
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
gphy |= GPHY_CFG_EXT_RESET;
if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
alc_setlinkspeed(sc);
}
pmcs = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
mac = CSR_READ_4(sc, ALC_MAC_CFG);
mac &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
MAC_CFG_BCAST);
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
mac |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
mac |= MAC_CFG_RX_ENB;
alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
ANEG_S3DIG10_SL);
}
/* Enable OSC. */
reg = CSR_READ_4(sc, ALC_MISC);
reg &= ~MISC_INTNLOSC_OPEN;
CSR_WRITE_4(sc, ALC_MISC, reg);
reg |= MISC_INTNLOSC_OPEN;
CSR_WRITE_4(sc, ALC_MISC, reg);
CSR_WRITE_4(sc, ALC_MASTER_CFG, master);
CSR_WRITE_4(sc, ALC_MAC_CFG, mac);
CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
reg = CSR_READ_4(sc, ALC_PDLL_TRNS1);
reg |= PDLL_TRNS1_D3PLLOFF_ENB;
CSR_WRITE_4(sc, ALC_PDLL_TRNS1, reg);
if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
/* Request PME. */
pmstat = pci_read_config(sc->alc_dev,
sc->alc_pmcap + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->alc_dev,
sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
}
}
static int
alc_suspend(device_t dev)
{
struct alc_softc *sc;
sc = device_get_softc(dev);
ALC_LOCK(sc);
alc_stop(sc);
alc_setwol(sc);
ALC_UNLOCK(sc);
return (0);
}
static int
alc_resume(device_t dev)
{
struct alc_softc *sc;
if_t ifp;
uint16_t pmstat;
sc = device_get_softc(dev);
ALC_LOCK(sc);
if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
/* Disable PME and clear PME status. */
pmstat = pci_read_config(sc->alc_dev,
sc->alc_pmcap + PCIR_POWER_STATUS, 2);
if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->alc_dev,
sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
}
}
/* Reset PHY. */
alc_phy_reset(sc);
ifp = sc->alc_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
alc_init_locked(sc);
}
ALC_UNLOCK(sc);
return (0);
}
static int
alc_encap(struct alc_softc *sc, struct mbuf **m_head)
{
struct alc_txdesc *txd, *txd_last;
struct tx_desc *desc;
struct mbuf *m;
struct ip *ip;
struct tcphdr *tcp;
bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
bus_dmamap_t map;
uint32_t cflags, hdrlen, ip_off, poff, vtag;
int error, idx, nsegs, prod;
ALC_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
m = *m_head;
ip = NULL;
tcp = NULL;
ip_off = poff = 0;
if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
/*
* AR81[3567]x requires offset of TCP/UDP header in its
* Tx descriptor to perform Tx checksum offloading. TSO
* also requires TCP header offset and modification of
* IP/TCP header. This kind of operation takes many CPU
* cycles on FreeBSD so fast host CPU is required to get
* smooth TSO performance.
*/
struct ether_header *eh;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
/* Release original mbufs. */
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
ip_off = sizeof(struct ether_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
/*
* Check if hardware VLAN insertion is off.
* Additional check for LLC/SNAP frame?
*/
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ip_off = sizeof(struct ether_vlan_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
}
m = m_pullup(m, ip_off + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + ip_off);
poff = ip_off + (ip->ip_hl << 2);
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
m = m_pullup(m, poff + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
m = m_pullup(m, poff + (tcp->th_off << 2));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
/*
* Due to strict adherence of Microsoft NDIS
* Large Send specification, hardware expects
* a pseudo TCP checksum inserted by upper
* stack. Unfortunately the pseudo TCP
* checksum that NDIS refers to does not include
* TCP payload length so driver should recompute
* the pseudo checksum here. Hopefully this
* wouldn't be much burden on modern CPUs.
*
* Reset IP checksum and recompute TCP pseudo
* checksum as NDIS specification said.
*/
ip = (struct ip *)(mtod(m, char *) + ip_off);
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
ip->ip_sum = 0;
tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
}
*m_head = m;
}
prod = sc->alc_cdata.alc_tx_prod;
txd = &sc->alc_cdata.alc_txdesc[prod];
txd_last = txd;
map = txd->tx_dmamap;
error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, ALC_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check descriptor overrun. */
if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
return (ENOBUFS);
}
bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
m = *m_head;
cflags = TD_ETHERNET;
vtag = 0;
desc = NULL;
idx = 0;
/* Configure VLAN hardware tag insertion. */
if ((m->m_flags & M_VLANTAG) != 0) {
vtag = htons(m->m_pkthdr.ether_vtag);
vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
cflags |= TD_INS_VLAN_TAG;
}
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
/* Request TSO and set MSS. */
cflags |= TD_TSO | TD_TSO_DESCV1;
cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
TD_MSS_MASK;
/* Set TCP header offset. */
cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
TD_TCPHDR_OFFSET_MASK;
/*
* AR81[3567]x requires the first buffer should
* only hold IP/TCP header data. Payload should
* be handled in other descriptors.
*/
hdrlen = poff + (tcp->th_off << 2);
desc = &sc->alc_rdata.alc_tx_ring[prod];
desc->len = htole32(TX_BYTES(hdrlen | vtag));
desc->flags = htole32(cflags);
desc->addr = htole64(txsegs[0].ds_addr);
sc->alc_cdata.alc_tx_cnt++;
ALC_DESC_INC(prod, ALC_TX_RING_CNT);
if (m->m_len - hdrlen > 0) {
/* Handle remaining payload of the first fragment. */
desc = &sc->alc_rdata.alc_tx_ring[prod];
desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
vtag));
desc->flags = htole32(cflags);
desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
sc->alc_cdata.alc_tx_cnt++;
ALC_DESC_INC(prod, ALC_TX_RING_CNT);
}
/* Handle remaining fragments. */
idx = 1;
} else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
/* Configure Tx checksum offload. */
#ifdef ALC_USE_CUSTOM_CSUM
cflags |= TD_CUSTOM_CSUM;
/* Set checksum start offset. */
cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
TD_PLOAD_OFFSET_MASK;
/* Set checksum insertion position of TCP/UDP. */
cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
#else
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
cflags |= TD_IPCSUM;
if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
cflags |= TD_TCPCSUM;
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
cflags |= TD_UDPCSUM;
/* Set TCP/UDP header offset. */
cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
TD_L4HDR_OFFSET_MASK;
#endif
}
for (; idx < nsegs; idx++) {
desc = &sc->alc_rdata.alc_tx_ring[prod];
desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
desc->flags = htole32(cflags);
desc->addr = htole64(txsegs[idx].ds_addr);
sc->alc_cdata.alc_tx_cnt++;
ALC_DESC_INC(prod, ALC_TX_RING_CNT);
}
/* Update producer index. */
sc->alc_cdata.alc_tx_prod = prod;
/* Finally set EOP on the last descriptor. */
prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
desc = &sc->alc_rdata.alc_tx_ring[prod];
desc->flags |= htole32(TD_EOP);
/* Swap dmamap of the first and the last. */
txd = &sc->alc_cdata.alc_txdesc[prod];
map = txd_last->tx_dmamap;
txd_last->tx_dmamap = txd->tx_dmamap;
txd->tx_dmamap = map;
txd->tx_m = m;
return (0);
}
static void
alc_start(if_t ifp)
{
struct alc_softc *sc;
sc = if_getsoftc(ifp);
ALC_LOCK(sc);
alc_start_locked(ifp);
ALC_UNLOCK(sc);
}
static void
alc_start_locked(if_t ifp)
{
struct alc_softc *sc;
struct mbuf *m_head;
int enq;
sc = if_getsoftc(ifp);
ALC_LOCK_ASSERT(sc);
/* Reclaim transmitted frames. */
if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
alc_txeof(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp); ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (alc_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0)
alc_start_tx(sc);
}
static void
alc_start_tx(struct alc_softc *sc)
{
/* Sync descriptors. */
bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
/* Kick. Assume we're using normal Tx priority queue. */
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX,
(uint16_t)sc->alc_cdata.alc_tx_prod);
else
CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
(sc->alc_cdata.alc_tx_prod <<
MBOX_TD_PROD_LO_IDX_SHIFT) &
MBOX_TD_PROD_LO_IDX_MASK);
/* Set a timeout in case the chip goes out to lunch. */
sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
}
static void
alc_watchdog(struct alc_softc *sc)
{
if_t ifp;
ALC_LOCK_ASSERT(sc);
if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
return;
ifp = sc->alc_ifp;
if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
alc_init_locked(sc);
return;
}
if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
alc_init_locked(sc);
if (!if_sendq_empty(ifp))
alc_start_locked(ifp);
}
static int
alc_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct alc_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN ||
ifr->ifr_mtu > (sc->alc_ident->max_framelen -
sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) ||
((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
ifr->ifr_mtu > ETHERMTU))
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
ALC_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
/* AR81[3567]x has 13 bits MSS field. */
if (if_getmtu(ifp) > ALC_TSO_MTU &&
(if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, CSUM_TSO);
VLAN_CAPABILITIES(ifp);
}
ALC_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
ALC_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->alc_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
alc_rxfilter(sc);
else
alc_init_locked(sc);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
alc_stop(sc);
sc->alc_if_flags = if_getflags(ifp);
ALC_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ALC_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
alc_rxfilter(sc);
ALC_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->alc_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
ALC_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, ALC_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, ALC_CSUM_FEATURES);
}
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
/* AR81[3567]x has 13 bits MSS field. */
if (if_getmtu(ifp) > ALC_TSO_MTU) {
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, CSUM_TSO);
} else
if_sethwassistbits(ifp, CSUM_TSO, 0);
} else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if ((mask & IFCAP_WOL_MCAST) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
alc_rxvlan(sc);
}
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0,
IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
ALC_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
alc_mac_config(struct alc_softc *sc)
{
struct mii_data *mii;
uint32_t reg;
ALC_LOCK_ASSERT(sc);
mii = device_get_softc(sc->alc_miibus);
reg = CSR_READ_4(sc, ALC_MAC_CFG);
reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
MAC_CFG_SPEED_MASK);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
/* Reprogram MAC with resolved speed/duplex. */
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
reg |= MAC_CFG_SPEED_10_100;
break;
case IFM_1000_T:
reg |= MAC_CFG_SPEED_1000;
break;
}
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
reg |= MAC_CFG_FULL_DUPLEX;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
reg |= MAC_CFG_TX_FC;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
reg |= MAC_CFG_RX_FC;
}
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
}
static void
alc_stats_clear(struct alc_softc *sc)
{
struct smb sb, *smb;
uint32_t *reg;
int i;
if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
sc->alc_cdata.alc_smb_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
smb = sc->alc_rdata.alc_smb;
/* Update done, clear. */
smb->updated = 0;
bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
sc->alc_cdata.alc_smb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
} else {
for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
reg++) {
CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
i += sizeof(uint32_t);
}
/* Read Tx statistics. */
for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
reg++) {
CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
i += sizeof(uint32_t);
}
}
}
static void
alc_stats_update(struct alc_softc *sc)
{
struct alc_hw_stats *stat;
struct smb sb, *smb;
if_t ifp;
uint32_t *reg;
int i;
ALC_LOCK_ASSERT(sc);
ifp = sc->alc_ifp;
stat = &sc->alc_stats;
if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
sc->alc_cdata.alc_smb_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
smb = sc->alc_rdata.alc_smb;
if (smb->updated == 0)
return;
} else {
smb = &sb;
/* Read Rx statistics. */
for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
reg++) {
*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
i += sizeof(uint32_t);
}
/* Read Tx statistics. */
for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
reg++) {
*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
i += sizeof(uint32_t);
}
}
/* Rx stats. */
stat->rx_frames += smb->rx_frames;
stat->rx_bcast_frames += smb->rx_bcast_frames;
stat->rx_mcast_frames += smb->rx_mcast_frames;
stat->rx_pause_frames += smb->rx_pause_frames;
stat->rx_control_frames += smb->rx_control_frames;
stat->rx_crcerrs += smb->rx_crcerrs;
stat->rx_lenerrs += smb->rx_lenerrs;
stat->rx_bytes += smb->rx_bytes;
stat->rx_runts += smb->rx_runts;
stat->rx_fragments += smb->rx_fragments;
stat->rx_pkts_64 += smb->rx_pkts_64;
stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
stat->rx_pkts_truncated += smb->rx_pkts_truncated;
stat->rx_fifo_oflows += smb->rx_fifo_oflows;
stat->rx_rrs_errs += smb->rx_rrs_errs;
stat->rx_alignerrs += smb->rx_alignerrs;
stat->rx_bcast_bytes += smb->rx_bcast_bytes;
stat->rx_mcast_bytes += smb->rx_mcast_bytes;
stat->rx_pkts_filtered += smb->rx_pkts_filtered;
/* Tx stats. */
stat->tx_frames += smb->tx_frames;
stat->tx_bcast_frames += smb->tx_bcast_frames;
stat->tx_mcast_frames += smb->tx_mcast_frames;
stat->tx_pause_frames += smb->tx_pause_frames;
stat->tx_excess_defer += smb->tx_excess_defer;
stat->tx_control_frames += smb->tx_control_frames;
stat->tx_deferred += smb->tx_deferred;
stat->tx_bytes += smb->tx_bytes;
stat->tx_pkts_64 += smb->tx_pkts_64;
stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
stat->tx_single_colls += smb->tx_single_colls;
stat->tx_multi_colls += smb->tx_multi_colls;
stat->tx_late_colls += smb->tx_late_colls;
stat->tx_excess_colls += smb->tx_excess_colls;
stat->tx_underrun += smb->tx_underrun;
stat->tx_desc_underrun += smb->tx_desc_underrun;
stat->tx_lenerrs += smb->tx_lenerrs;
stat->tx_pkts_truncated += smb->tx_pkts_truncated;
stat->tx_bcast_bytes += smb->tx_bcast_bytes;
stat->tx_mcast_bytes += smb->tx_mcast_bytes;
/* Update counters in ifnet. */
if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls +
smb->tx_multi_colls * 2 + smb->tx_late_colls +
smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_late_colls +
smb->tx_excess_colls + smb->tx_underrun + smb->tx_pkts_truncated);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames);
if_inc_counter(ifp, IFCOUNTER_IERRORS,
smb->rx_crcerrs + smb->rx_lenerrs +
smb->rx_runts + smb->rx_pkts_truncated +
smb->rx_fifo_oflows + smb->rx_rrs_errs +
smb->rx_alignerrs);
if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
/* Update done, clear. */
smb->updated = 0;
bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
sc->alc_cdata.alc_smb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
}
static int
alc_intr(void *arg)
{
struct alc_softc *sc;
uint32_t status;
sc = (struct alc_softc *)arg;
if (sc->alc_flags & ALC_FLAG_MT) {
taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
return (FILTER_HANDLED);
}
status = CSR_READ_4(sc, ALC_INTR_STATUS);
if ((status & ALC_INTRS) == 0)
return (FILTER_STRAY);
/* Disable interrupts. */
CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
return (FILTER_HANDLED);
}
static void
alc_int_task(void *arg, int pending)
{
struct alc_softc *sc;
if_t ifp;
uint32_t status;
int more;
sc = (struct alc_softc *)arg;
ifp = sc->alc_ifp;
status = CSR_READ_4(sc, ALC_INTR_STATUS);
ALC_LOCK(sc);
if (sc->alc_morework != 0) {
sc->alc_morework = 0;
status |= INTR_RX_PKT;
}
if ((status & ALC_INTRS) == 0)
goto done;
/* Acknowledge interrupts but still disable interrupts. */
CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
more = 0;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((status & INTR_RX_PKT) != 0) {
more = alc_rxintr(sc, sc->alc_process_limit);
if (more == EAGAIN)
sc->alc_morework = 1;
else if (more == EIO) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
alc_init_locked(sc);
ALC_UNLOCK(sc);
return;
}
}
if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
INTR_TXQ_TO_RST)) != 0) {
if ((status & INTR_DMA_RD_TO_RST) != 0)
device_printf(sc->alc_dev,
"DMA read error! -- resetting\n");
if ((status & INTR_DMA_WR_TO_RST) != 0)
device_printf(sc->alc_dev,
"DMA write error! -- resetting\n");
if ((status & INTR_TXQ_TO_RST) != 0)
device_printf(sc->alc_dev,
"TxQ reset! -- resetting\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
alc_init_locked(sc);
ALC_UNLOCK(sc);
return;
}
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
!if_sendq_empty(ifp))
alc_start_locked(ifp);
}
if (more == EAGAIN ||
(CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) {
ALC_UNLOCK(sc);
taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
return;
}
done:
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
/* Re-enable interrupts if we're running. */
if (sc->alc_flags & ALC_FLAG_MT)
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
else
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
}
ALC_UNLOCK(sc);
}
static void
alc_txeof(struct alc_softc *sc)
{
if_t ifp;
struct alc_txdesc *txd;
uint32_t cons, prod;
ALC_LOCK_ASSERT(sc);
ifp = sc->alc_ifp;
if (sc->alc_cdata.alc_tx_cnt == 0)
return;
bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
prod = sc->alc_rdata.alc_cmb->cons;
} else {
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX);
else {
prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
/* Assume we're using normal Tx priority queue. */
prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
MBOX_TD_CONS_LO_IDX_SHIFT;
}
}
cons = sc->alc_cdata.alc_tx_cons;
/*
* Go through our Tx list and free mbufs for those
* frames which have been transmitted.
*/
for (; cons != prod; ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
if (sc->alc_cdata.alc_tx_cnt <= 0)
break;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->alc_cdata.alc_tx_cnt--;
txd = &sc->alc_cdata.alc_txdesc[cons];
if (txd->tx_m != NULL) {
/* Reclaim transmitted mbufs. */
bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
sc->alc_cdata.alc_tx_cons = cons;
/*
* Unarm watchdog timer only when there is no pending
* frames in Tx queue.
*/
if (sc->alc_cdata.alc_tx_cnt == 0)
sc->alc_watchdog_timer = 0;
}
static int
alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
{
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
#ifndef __NO_STRICT_ALIGNMENT
m_adj(m, sizeof(uint64_t));
#endif
if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag,
sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
sc->alc_cdata.alc_rx_sparemap = map;
bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
rxd->rx_desc->addr = htole64(segs[0].ds_addr);
return (0);
}
static int
alc_rxintr(struct alc_softc *sc, int count)
{
if_t ifp;
struct rx_rdesc *rrd;
uint32_t nsegs, status;
int rr_cons, prog;
bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
sc->alc_cdata.alc_rr_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
rr_cons = sc->alc_cdata.alc_rr_cons;
ifp = sc->alc_ifp;
for (prog = 0; (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0;) {
if (count-- <= 0)
break;
rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
status = le32toh(rrd->status);
if ((status & RRD_VALID) == 0)
break;
nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
if (nsegs == 0) {
/* This should not happen! */
device_printf(sc->alc_dev,
"unexpected segment count -- resetting\n");
return (EIO);
}
alc_rxeof(sc, rrd);
/* Clear Rx return status. */
rrd->status = 0;
ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
sc->alc_cdata.alc_rx_cons += nsegs;
sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
prog += nsegs;
}
if (prog > 0) {
/* Update the consumer index. */
sc->alc_cdata.alc_rr_cons = rr_cons;
/* Sync Rx return descriptors. */
bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
sc->alc_cdata.alc_rr_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Sync updated Rx descriptors such that controller see
* modified buffer addresses.
*/
bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
/*
* Let controller know availability of new Rx buffers.
* Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
* it may be possible to update ALC_MBOX_RD0_PROD_IDX
* only when Rx buffer pre-fetching is required. In
* addition we already set ALC_RX_RD_FREE_THRESH to
* RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
* it still seems that pre-fetching needs more
* experimentation.
*/
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX,
(uint16_t)sc->alc_cdata.alc_rx_cons);
else
CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
sc->alc_cdata.alc_rx_cons);
}
return (count > 0 ? 0 : EAGAIN);
}
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
alc_fixup_rx(if_t ifp, struct mbuf *m)
{
struct mbuf *n;
int i;
uint16_t *src, *dst;
src = mtod(m, uint16_t *);
dst = src - 3;
if (m->m_next == NULL) {
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= 6;
return (m);
}
/*
* Append a new mbuf to received mbuf chain and copy ethernet
* header from the mbuf chain. This can save lots of CPU
* cycles for jumbo frame.
*/
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
m_freem(m);
return (NULL);
}
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;
m->m_len -= ETHER_HDR_LEN;
n->m_len = ETHER_HDR_LEN;
M_MOVE_PKTHDR(n, m);
n->m_next = m;
return (n);
}
#endif
/* Receive a frame. */
static void
alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
{
struct alc_rxdesc *rxd;
if_t ifp;
struct mbuf *mp, *m;
uint32_t rdinfo, status, vtag;
int count, nsegs, rx_cons;
ifp = sc->alc_ifp;
status = le32toh(rrd->status);
rdinfo = le32toh(rrd->rdinfo);
rx_cons = RRD_RD_IDX(rdinfo);
nsegs = RRD_RD_CNT(rdinfo);
sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
/*
* We want to pass the following frames to upper
* layer regardless of error status of Rx return
* ring.
*
* o IP/TCP/UDP checksum is bad.
* o frame length and protocol specific length
* does not match.
*
* Force network stack compute checksum for
* errored frames.
*/
status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
return;
}
for (count = 0; count < nsegs; count++,
ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
mp = rxd->rx_m;
/* Add a new receive buffer to the ring. */
if (alc_newbuf(sc, rxd) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/* Reuse Rx buffers. */
if (sc->alc_cdata.alc_rxhead != NULL)
m_freem(sc->alc_cdata.alc_rxhead);
break;
}
/*
* Assume we've received a full sized frame.
* Actual size is fixed when we encounter the end of
* multi-segmented frame.
*/
mp->m_len = sc->alc_buf_size;
/* Chain received mbufs. */
if (sc->alc_cdata.alc_rxhead == NULL) {
sc->alc_cdata.alc_rxhead = mp;
sc->alc_cdata.alc_rxtail = mp;
} else {
mp->m_flags &= ~M_PKTHDR;
sc->alc_cdata.alc_rxprev_tail =
sc->alc_cdata.alc_rxtail;
sc->alc_cdata.alc_rxtail->m_next = mp;
sc->alc_cdata.alc_rxtail = mp;
}
if (count == nsegs - 1) {
/* Last desc. for this frame. */
m = sc->alc_cdata.alc_rxhead;
m->m_flags |= M_PKTHDR;
/*
* It seems that L1C/L2C controller has no way
* to tell hardware to strip CRC bytes.
*/
m->m_pkthdr.len =
sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
if (nsegs > 1) {
/* Set last mbuf size. */
mp->m_len = sc->alc_cdata.alc_rxlen -
(nsegs - 1) * sc->alc_buf_size;
/* Remove the CRC bytes in chained mbufs. */
if (mp->m_len <= ETHER_CRC_LEN) {
sc->alc_cdata.alc_rxtail =
sc->alc_cdata.alc_rxprev_tail;
sc->alc_cdata.alc_rxtail->m_len -=
(ETHER_CRC_LEN - mp->m_len);
sc->alc_cdata.alc_rxtail->m_next = NULL;
m_freem(mp);
} else {
mp->m_len -= ETHER_CRC_LEN;
}
} else
m->m_len = m->m_pkthdr.len;
m->m_pkthdr.rcvif = ifp;
/*
* Due to hardware bugs, Rx checksum offloading
* was intentionally disabled.
*/
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
(status & RRD_VLAN_TAG) != 0) {
vtag = RRD_VLAN(le32toh(rrd->vtag));
m->m_pkthdr.ether_vtag = ntohs(vtag);
m->m_flags |= M_VLANTAG;
}
#ifndef __NO_STRICT_ALIGNMENT
m = alc_fixup_rx(ifp, m);
if (m != NULL)
#endif
{
/* Pass it on. */
ALC_UNLOCK(sc);
if_input(ifp, m);
ALC_LOCK(sc);
}
}
}
/* Reset mbuf chains. */
ALC_RXCHAIN_RESET(sc);
}
static void
alc_tick(void *arg)
{
struct alc_softc *sc;
struct mii_data *mii;
sc = (struct alc_softc *)arg;
ALC_LOCK_ASSERT(sc);
mii = device_get_softc(sc->alc_miibus);
mii_tick(mii);
alc_stats_update(sc);
/*
* alc(4) does not rely on Tx completion interrupts to reclaim
* transferred buffers. Instead Tx completion interrupts are
* used to hint for scheduling Tx task. So it's necessary to
* release transmitted buffers by kicking Tx completion
* handler. This limits the maximum reclamation delay to a hz.
*/
alc_txeof(sc);
alc_watchdog(sc);
callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
}
static void
alc_osc_reset(struct alc_softc *sc)
{
uint32_t reg;
reg = CSR_READ_4(sc, ALC_MISC3);
reg &= ~MISC3_25M_BY_SW;
reg |= MISC3_25M_NOTO_INTNL;
CSR_WRITE_4(sc, ALC_MISC3, reg);
reg = CSR_READ_4(sc, ALC_MISC);
if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) {
/*
* Restore over-current protection default value.
* This value could be reset by MAC reset.
*/
reg &= ~MISC_PSW_OCP_MASK;
reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT);
reg &= ~MISC_INTNLOSC_OPEN;
CSR_WRITE_4(sc, ALC_MISC, reg);
CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
reg = CSR_READ_4(sc, ALC_MISC2);
reg &= ~MISC2_CALB_START;
CSR_WRITE_4(sc, ALC_MISC2, reg);
CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START);
} else {
reg &= ~MISC_INTNLOSC_OPEN;
/* Disable isolate for revision A devices. */
if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
reg &= ~MISC_ISO_ENB;
CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
CSR_WRITE_4(sc, ALC_MISC, reg);
}
DELAY(20);
}
static void
alc_reset(struct alc_softc *sc)
{
uint32_t pmcfg, reg;
int i;
pmcfg = 0;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
/* Reset workaround. */
CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1);
if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
(sc->alc_rev & 0x01) != 0) {
/* Disable L0s/L1s before reset. */
pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
!= 0) {
pmcfg &= ~(PM_CFG_ASPM_L0S_ENB |
PM_CFG_ASPM_L1_ENB);
CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
}
}
}
reg = CSR_READ_4(sc, ALC_MASTER_CFG);
reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
DELAY(10);
if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0)
break;
}
if (i == 0)
device_printf(sc->alc_dev, "MAC reset timeout!\n");
}
for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
DELAY(10);
if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
break;
}
if (i == 0)
device_printf(sc->alc_dev, "master reset timeout!\n");
for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC |
IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
(sc->alc_rev & 0x01) != 0) {
reg = CSR_READ_4(sc, ALC_MASTER_CFG);
reg |= MASTER_CLK_SEL_DIS;
CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
/* Restore L0s/L1s config. */
if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
!= 0)
CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
}
alc_osc_reset(sc);
reg = CSR_READ_4(sc, ALC_MISC3);
reg &= ~MISC3_25M_BY_SW;
reg |= MISC3_25M_NOTO_INTNL;
CSR_WRITE_4(sc, ALC_MISC3, reg);
reg = CSR_READ_4(sc, ALC_MISC);
reg &= ~MISC_INTNLOSC_OPEN;
if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
reg &= ~MISC_ISO_ENB;
CSR_WRITE_4(sc, ALC_MISC, reg);
DELAY(20);
}
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2)
CSR_WRITE_4(sc, ALC_SERDES_LOCK,
CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
SERDES_PHY_CLK_SLOWDOWN);
}
static void
alc_init(void *xsc)
{
struct alc_softc *sc;
sc = (struct alc_softc *)xsc;
ALC_LOCK(sc);
alc_init_locked(sc);
ALC_UNLOCK(sc);
}
static void
alc_init_locked(struct alc_softc *sc)
{
if_t ifp;
uint8_t eaddr[ETHER_ADDR_LEN];
bus_addr_t paddr;
uint32_t reg, rxf_hi, rxf_lo;
ALC_LOCK_ASSERT(sc);
ifp = sc->alc_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel any pending I/O.
*/
alc_stop(sc);
/*
* Reset the chip to a known state.
*/
alc_reset(sc);
/* Initialize Rx descriptors. */
if (alc_init_rx_ring(sc) != 0) {
device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
alc_stop(sc);
return;
}
alc_init_rr_ring(sc);
alc_init_tx_ring(sc);
alc_init_cmb(sc);
alc_init_smb(sc);
/* Enable all clocks. */
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB |
CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB |
CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB |
CLK_GATING_RXMAC_ENB);
if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0)
CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER,
IDLE_DECISN_TIMER_DEFAULT_1MS);
} else
CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
/* Reprogram the station address. */
bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
CSR_WRITE_4(sc, ALC_PAR0,
eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
/*
* Clear WOL status and disable all WOL feature as WOL
* would interfere Rx operation under normal environments.
*/
CSR_READ_4(sc, ALC_WOL_CFG);
CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
/* Set Tx descriptor base addresses. */
paddr = sc->alc_rdata.alc_tx_ring_paddr;
CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
/* We don't use high priority ring. */
CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
/* Set Tx descriptor counter. */
CSR_WRITE_4(sc, ALC_TD_RING_CNT,
(ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
/* Set Rx descriptor base addresses. */
paddr = sc->alc_rdata.alc_rx_ring_paddr;
CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
/* We use one Rx ring. */
CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
}
/* Set Rx descriptor counter. */
CSR_WRITE_4(sc, ALC_RD_RING_CNT,
(ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
/*
* Let hardware split jumbo frames into alc_max_buf_sized chunks.
* if it do not fit the buffer size. Rx return descriptor holds
* a counter that indicates how many fragments were made by the
* hardware. The buffer size should be multiple of 8 bytes.
* Since hardware has limit on the size of buffer size, always
* use the maximum value.
* For strict-alignment architectures make sure to reduce buffer
* size by 8 bytes to make room for alignment fixup.
*/
#ifndef __NO_STRICT_ALIGNMENT
sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t);
#else
sc->alc_buf_size = RX_BUF_SIZE_MAX;
#endif
CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
paddr = sc->alc_rdata.alc_rr_ring_paddr;
/* Set Rx return descriptor base addresses. */
CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
/* We use one Rx return ring. */
CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
}
/* Set Rx return descriptor counter. */
CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
(ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
paddr = sc->alc_rdata.alc_cmb_paddr;
CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
paddr = sc->alc_rdata.alc_smb_paddr;
CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
/* Reconfigure SRAM - Vendor magic. */
CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
}
/* Tell hardware that we're ready to load DMA blocks. */
CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
/* Configure interrupt moderation timer. */
reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0)
reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
/*
* We don't want to automatic interrupt clear as task queue
* for the interrupt should know interrupt status.
*/
reg = CSR_READ_4(sc, ALC_MASTER_CFG);
reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
reg |= MASTER_SA_TIMER_ENB;
if (ALC_USECS(sc->alc_int_rx_mod) != 0)
reg |= MASTER_IM_RX_TIMER_ENB;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 &&
ALC_USECS(sc->alc_int_tx_mod) != 0)
reg |= MASTER_IM_TX_TIMER_ENB;
CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
/*
* Disable interrupt re-trigger timer. We don't want automatic
* re-triggering of un-ACKed interrupts.
*/
CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
/* Configure CMB. */
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3);
CSR_WRITE_4(sc, ALC_CMB_TX_TIMER,
ALC_USECS(sc->alc_int_tx_mod));
} else {
if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
} else
CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
}
/*
* Hardware can be configured to issue SMB interrupt based
* on programmed interval. Since there is a callout that is
* invoked for every hz in driver we use that instead of
* relying on periodic SMB interrupt.
*/
CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
/* Clear MAC statistics. */
alc_stats_clear(sc);
/*
* Always use maximum frame size that controller can support.
* Otherwise received frames that has larger frame length
* than alc(4) MTU would be silently dropped in hardware. This
* would make path-MTU discovery hard as sender wouldn't get
* any responses from receiver. alc(4) supports
* multi-fragmented frames on Rx path so it has no issue on
* assembling fragmented frames. Using maximum frame size also
* removes the need to reinitialize hardware when interface
* MTU configuration was changed.
*
* Be conservative in what you do, be liberal in what you
* accept from others - RFC 793.
*/
CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
/* Disable header split(?) */
CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
/* Configure IPG/IFG parameters. */
CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) &
IPG_IFG_IPGT_MASK) |
((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) &
IPG_IFG_MIFG_MASK) |
((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) &
IPG_IFG_IPG1_MASK) |
((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) &
IPG_IFG_IPG2_MASK));
/* Set parameters for half-duplex media. */
CSR_WRITE_4(sc, ALC_HDPX_CFG,
((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
HDPX_CFG_LCOL_MASK) |
((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
HDPX_CFG_ABEBT_MASK) |
((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
HDPX_CFG_JAMIPG_MASK));
}
/*
* Set TSO/checksum offload threshold. For frames that is
* larger than this threshold, hardware wouldn't do
* TSO/checksum offloading.
*/
reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
TSO_OFFLOAD_THRESH_MASK;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB;
CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg);
/* Configure TxQ. */
reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
reg >>= 1;
reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
TXQ_CFG_TD_BURST_MASK;
reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB;
CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT |
TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT |
TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT |
HQTD_CFG_BURST_ENB);
CSR_WRITE_4(sc, ALC_HQTD_CFG, reg);
reg = WRR_PRI_RESTRICT_NONE;
reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT |
WRR_PRI_DEFAULT << WRR_PRI1_SHIFT |
WRR_PRI_DEFAULT << WRR_PRI2_SHIFT |
WRR_PRI_DEFAULT << WRR_PRI3_SHIFT);
CSR_WRITE_4(sc, ALC_WRR, reg);
} else {
/* Configure Rx free descriptor pre-fetching. */
CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
((RX_RD_FREE_THRESH_HI_DEFAULT <<
RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) |
((RX_RD_FREE_THRESH_LO_DEFAULT <<
RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK));
}
/*
* Configure flow control parameters.
* XON : 80% of Rx FIFO
* XOFF : 30% of Rx FIFO
*/
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
reg &= SRAM_RX_FIFO_LEN_MASK;
reg *= 8;
if (reg > 8 * 1024)
reg -= RX_FIFO_PAUSE_816X_RSVD;
else
reg -= RX_BUF_SIZE_MAX;
reg /= 8;
CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
RX_FIFO_PAUSE_THRESH_LO_MASK) |
(((RX_FIFO_PAUSE_816X_RSVD / 8) <<
RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
RX_FIFO_PAUSE_THRESH_HI_MASK));
} else if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) {
reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
rxf_hi = (reg * 8) / 10;
rxf_lo = (reg * 3) / 10;
CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
RX_FIFO_PAUSE_THRESH_LO_MASK) |
((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
RX_FIFO_PAUSE_THRESH_HI_MASK));
}
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
/* Disable RSS until I understand L1C/L2C's RSS logic. */
CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
}
/* Configure RxQ. */
reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
RXQ_CFG_RD_BURST_MASK;
reg |= RXQ_CFG_RSS_MODE_DIS;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
} else {
if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2)
reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
}
CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
/* Configure DMA parameters. */
reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
reg |= sc->alc_rcb;
if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
reg |= DMA_CFG_CMB_ENB;
if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
reg |= DMA_CFG_SMB_ENB;
else
reg |= DMA_CFG_SMB_DIS;
reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
DMA_CFG_RD_BURST_SHIFT;
reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
DMA_CFG_WR_BURST_SHIFT;
reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
DMA_CFG_RD_DELAY_CNT_MASK;
reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
DMA_CFG_WR_DELAY_CNT_MASK;
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
switch (AR816X_REV(sc->alc_rev)) {
case AR816X_REV_A0:
case AR816X_REV_A1:
reg |= DMA_CFG_RD_CHNL_SEL_2;
break;
case AR816X_REV_B0:
/* FALLTHROUGH */
default:
reg |= DMA_CFG_RD_CHNL_SEL_4;
break;
}
}
CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
/*
* Configure Tx/Rx MACs.
* - Auto-padding for short frames.
* - Enable CRC generation.
* Actual reconfiguration of MAC for resolved speed/duplex
* is followed after detection of link establishment.
* AR813x/AR815x always does checksum computation regardless
* of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
* have bug in protocol field in Rx return structure so
* these controllers can't handle fragmented frames. Disable
* Rx checksum offloading until there is a newer controller
* that has sane implementation.
*/
reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
MAC_CFG_PREAMBLE_MASK);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
reg |= MAC_CFG_SPEED_10_100;
else
reg |= MAC_CFG_SPEED_1000;
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
/* Set up the receive filter. */
alc_rxfilter(sc);
alc_rxvlan(sc);
/* Acknowledge all pending interrupts and clear it. */
CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->alc_flags &= ~ALC_FLAG_LINK;
/* Switch to the current media. */
alc_mediachange_locked(sc);
callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
}
static void
alc_stop(struct alc_softc *sc)
{
if_t ifp;
struct alc_txdesc *txd;
struct alc_rxdesc *rxd;
uint32_t reg;
int i;
ALC_LOCK_ASSERT(sc);
/*
* Mark the interface down and cancel the watchdog timer.
*/
ifp = sc->alc_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->alc_flags &= ~ALC_FLAG_LINK;
callout_stop(&sc->alc_tick_ch);
sc->alc_watchdog_timer = 0;
alc_stats_update(sc);
/* Disable interrupts. */
CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
/* Disable DMA. */
reg = CSR_READ_4(sc, ALC_DMA_CFG);
reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
reg |= DMA_CFG_SMB_DIS;
CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
DELAY(1000);
/* Stop Rx/Tx MACs. */
alc_stop_mac(sc);
/* Disable interrupts which might be touched in taskq handler. */
CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
/* Disable L0s/L1s */
alc_aspm(sc, 0, IFM_UNKNOWN);
/* Reclaim Rx buffers that have been processed. */
if (sc->alc_cdata.alc_rxhead != NULL)
m_freem(sc->alc_cdata.alc_rxhead);
ALC_RXCHAIN_RESET(sc);
/*
* Free Tx/Rx mbufs still in the queues.
*/
for (i = 0; i < ALC_RX_RING_CNT; i++) {
rxd = &sc->alc_cdata.alc_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < ALC_TX_RING_CNT; i++) {
txd = &sc->alc_cdata.alc_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
}
static void
alc_stop_mac(struct alc_softc *sc)
{
uint32_t reg;
int i;
alc_stop_queue(sc);
/* Disable Rx/Tx MAC. */
reg = CSR_READ_4(sc, ALC_MAC_CFG);
if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
}
for (i = ALC_TIMEOUT; i > 0; i--) {
reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->alc_dev,
"could not disable Rx/Tx MAC(0x%08x)!\n", reg);
}
static void
alc_start_queue(struct alc_softc *sc)
{
uint32_t qcfg[] = {
0,
RXQ_CFG_QUEUE0_ENB,
RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
RXQ_CFG_ENB
};
uint32_t cfg;
ALC_LOCK_ASSERT(sc);
/* Enable RxQ. */
cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
cfg &= ~RXQ_CFG_ENB;
cfg |= qcfg[1];
} else
cfg |= RXQ_CFG_QUEUE0_ENB;
CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
/* Enable TxQ. */
cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
cfg |= TXQ_CFG_ENB;
CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
}
static void
alc_stop_queue(struct alc_softc *sc)
{
uint32_t reg;
int i;
/* Disable RxQ. */
reg = CSR_READ_4(sc, ALC_RXQ_CFG);
if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
if ((reg & RXQ_CFG_ENB) != 0) {
reg &= ~RXQ_CFG_ENB;
CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
}
} else {
if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) {
reg &= ~RXQ_CFG_QUEUE0_ENB;
CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
}
}
/* Disable TxQ. */
reg = CSR_READ_4(sc, ALC_TXQ_CFG);
if ((reg & TXQ_CFG_ENB) != 0) {
reg &= ~TXQ_CFG_ENB;
CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
}
DELAY(40);
for (i = ALC_TIMEOUT; i > 0; i--) {
reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->alc_dev,
"could not disable RxQ/TxQ (0x%08x)!\n", reg);
}
static void
alc_init_tx_ring(struct alc_softc *sc)
{
struct alc_ring_data *rd;
struct alc_txdesc *txd;
int i;
ALC_LOCK_ASSERT(sc);
sc->alc_cdata.alc_tx_prod = 0;
sc->alc_cdata.alc_tx_cons = 0;
sc->alc_cdata.alc_tx_cnt = 0;
rd = &sc->alc_rdata;
bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
for (i = 0; i < ALC_TX_RING_CNT; i++) {
txd = &sc->alc_cdata.alc_txdesc[i];
txd->tx_m = NULL;
}
bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
}
static int
alc_init_rx_ring(struct alc_softc *sc)
{
struct alc_ring_data *rd;
struct alc_rxdesc *rxd;
int i;
ALC_LOCK_ASSERT(sc);
sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
sc->alc_morework = 0;
rd = &sc->alc_rdata;
bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
for (i = 0; i < ALC_RX_RING_CNT; i++) {
rxd = &sc->alc_cdata.alc_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_desc = &rd->alc_rx_ring[i];
if (alc_newbuf(sc, rxd) != 0)
return (ENOBUFS);
}
/*
* Since controller does not update Rx descriptors, driver
* does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
* is enough to ensure coherence.
*/
bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
/* Let controller know availability of new Rx buffers. */
CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
return (0);
}
static void
alc_init_rr_ring(struct alc_softc *sc)
{
struct alc_ring_data *rd;
ALC_LOCK_ASSERT(sc);
sc->alc_cdata.alc_rr_cons = 0;
ALC_RXCHAIN_RESET(sc);
rd = &sc->alc_rdata;
bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
sc->alc_cdata.alc_rr_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
alc_init_cmb(struct alc_softc *sc)
{
struct alc_ring_data *rd;
ALC_LOCK_ASSERT(sc);
rd = &sc->alc_rdata;
bzero(rd->alc_cmb, ALC_CMB_SZ);
bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
alc_init_smb(struct alc_softc *sc)
{
struct alc_ring_data *rd;
ALC_LOCK_ASSERT(sc);
rd = &sc->alc_rdata;
bzero(rd->alc_smb, ALC_SMB_SZ);
bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
alc_rxvlan(struct alc_softc *sc)
{
if_t ifp;
uint32_t reg;
ALC_LOCK_ASSERT(sc);
ifp = sc->alc_ifp;
reg = CSR_READ_4(sc, ALC_MAC_CFG);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
reg |= MAC_CFG_VLAN_TAG_STRIP;
else
reg &= ~MAC_CFG_VLAN_TAG_STRIP;
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
}
static u_int
alc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *mchash = arg;
uint32_t crc;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
return (1);
}
static void
alc_rxfilter(struct alc_softc *sc)
{
if_t ifp;
uint32_t mchash[2];
uint32_t rxcfg;
ALC_LOCK_ASSERT(sc);
ifp = sc->alc_ifp;
bzero(mchash, sizeof(mchash));
rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxcfg |= MAC_CFG_BCAST;
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxcfg |= MAC_CFG_PROMISC;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
rxcfg |= MAC_CFG_ALLMULTI;
mchash[0] = 0xFFFFFFFF;
mchash[1] = 0xFFFFFFFF;
goto chipit;
}
if_foreach_llmaddr(ifp, alc_hash_maddr, mchash);
chipit:
CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
ALC_PROC_MIN, ALC_PROC_MAX));
}
static int
sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));
}
#ifdef DEBUGNET
static void
alc_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
{
struct alc_softc *sc __diagused;
sc = if_getsoftc(ifp);
KASSERT(sc->alc_buf_size <= MCLBYTES, ("incorrect cluster size"));
*nrxr = ALC_RX_RING_CNT;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
*clsize = MCLBYTES;
}
static void
alc_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
{
}
static int
alc_debugnet_transmit(if_t ifp, struct mbuf *m)
{
struct alc_softc *sc;
int error;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
error = alc_encap(sc, &m);
if (error == 0)
alc_start_tx(sc);
return (error);
}
static int
alc_debugnet_poll(if_t ifp, int count)
{
struct alc_softc *sc;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
alc_txeof(sc);
return (alc_rxintr(sc, count));
}
#endif /* DEBUGNET */
diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c
index dcab9d10dfa4..5b3ae438810c 100644
--- a/sys/dev/ale/if_ale.c
+++ b/sys/dev/ale/if_ale.c
@@ -1,3073 +1,3067 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <dev/ale/if_alereg.h>
#include <dev/ale/if_alevar.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/* For more information about Tx checksum offload issues see ale_encap(). */
#define ALE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
MODULE_DEPEND(ale, pci, 1, 1, 1);
MODULE_DEPEND(ale, ether, 1, 1, 1);
MODULE_DEPEND(ale, miibus, 1, 1, 1);
/* Tunables. */
static int msi_disable = 0;
static int msix_disable = 0;
TUNABLE_INT("hw.ale.msi_disable", &msi_disable);
TUNABLE_INT("hw.ale.msix_disable", &msix_disable);
/*
* Devices supported by this driver.
*/
static const struct ale_dev {
uint16_t ale_vendorid;
uint16_t ale_deviceid;
const char *ale_name;
} ale_devs[] = {
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR81XX,
"Atheros AR8121/AR8113/AR8114 PCIe Ethernet" },
};
static int ale_attach(device_t);
static int ale_check_boundary(struct ale_softc *);
static int ale_detach(device_t);
static int ale_dma_alloc(struct ale_softc *);
static void ale_dma_free(struct ale_softc *);
static void ale_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int ale_encap(struct ale_softc *, struct mbuf **);
static void ale_get_macaddr(struct ale_softc *);
static void ale_init(void *);
static void ale_init_locked(struct ale_softc *);
static void ale_init_rx_pages(struct ale_softc *);
static void ale_init_tx_ring(struct ale_softc *);
static void ale_int_task(void *, int);
static int ale_intr(void *);
static int ale_ioctl(if_t, u_long, caddr_t);
static void ale_mac_config(struct ale_softc *);
static int ale_miibus_readreg(device_t, int, int);
static void ale_miibus_statchg(device_t);
static int ale_miibus_writereg(device_t, int, int, int);
static int ale_mediachange(if_t);
static void ale_mediastatus(if_t, struct ifmediareq *);
static void ale_phy_reset(struct ale_softc *);
static int ale_probe(device_t);
static void ale_reset(struct ale_softc *);
static int ale_resume(device_t);
static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **,
uint32_t, uint32_t *);
static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t);
static int ale_rxeof(struct ale_softc *sc, int);
static void ale_rxfilter(struct ale_softc *);
static void ale_rxvlan(struct ale_softc *);
static void ale_setlinkspeed(struct ale_softc *);
static void ale_setwol(struct ale_softc *);
static int ale_shutdown(device_t);
static void ale_start(if_t);
static void ale_start_locked(if_t);
static void ale_stats_clear(struct ale_softc *);
static void ale_stats_update(struct ale_softc *);
static void ale_stop(struct ale_softc *);
static void ale_stop_mac(struct ale_softc *);
static int ale_suspend(device_t);
static void ale_sysctl_node(struct ale_softc *);
static void ale_tick(void *);
static void ale_txeof(struct ale_softc *);
static void ale_watchdog(struct ale_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS);
static device_method_t ale_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, ale_probe),
DEVMETHOD(device_attach, ale_attach),
DEVMETHOD(device_detach, ale_detach),
DEVMETHOD(device_shutdown, ale_shutdown),
DEVMETHOD(device_suspend, ale_suspend),
DEVMETHOD(device_resume, ale_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, ale_miibus_readreg),
DEVMETHOD(miibus_writereg, ale_miibus_writereg),
DEVMETHOD(miibus_statchg, ale_miibus_statchg),
DEVMETHOD_END
};
static driver_t ale_driver = {
"ale",
ale_methods,
sizeof(struct ale_softc)
};
DRIVER_MODULE(ale, pci, ale_driver, NULL, NULL);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ale, ale_devs,
nitems(ale_devs));
DRIVER_MODULE(miibus, ale, miibus_driver, NULL, NULL);
static struct resource_spec ale_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec ale_irq_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec ale_irq_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec ale_irq_spec_msix[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static int
ale_miibus_readreg(device_t dev, int phy, int reg)
{
struct ale_softc *sc;
uint32_t v;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALE_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0) {
device_printf(sc->ale_dev, "phy read timeout : %d\n", reg);
return (0);
}
return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
}
static int
ale_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct ale_softc *sc;
uint32_t v;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
(val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
v = CSR_READ_4(sc, ALE_MDIO);
if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
break;
}
if (i == 0)
device_printf(sc->ale_dev, "phy write timeout : %d\n", reg);
return (0);
}
static void
ale_miibus_statchg(device_t dev)
{
struct ale_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t reg;
sc = device_get_softc(dev);
mii = device_get_softc(sc->ale_miibus);
ifp = sc->ale_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->ale_flags &= ~ALE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->ale_flags |= ALE_FLAG_LINK;
break;
case IFM_1000_T:
if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
sc->ale_flags |= ALE_FLAG_LINK;
break;
default:
break;
}
}
/* Stop Rx/Tx MACs. */
ale_stop_mac(sc);
/* Program MACs with resolved speed/duplex/flow-control. */
if ((sc->ale_flags & ALE_FLAG_LINK) != 0) {
ale_mac_config(sc);
/* Reenable Tx/Rx MACs. */
reg = CSR_READ_4(sc, ALE_MAC_CFG);
reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
}
}
static void
ale_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct ale_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
ALE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
ALE_UNLOCK(sc);
return;
}
mii = device_get_softc(sc->ale_miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
ALE_UNLOCK(sc);
}
static int
ale_mediachange(if_t ifp)
{
struct ale_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
ALE_LOCK(sc);
mii = device_get_softc(sc->ale_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
ALE_UNLOCK(sc);
return (error);
}
static int
ale_probe(device_t dev)
{
const struct ale_dev *sp;
int i;
uint16_t vendor, devid;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
sp = ale_devs;
for (i = 0; i < nitems(ale_devs); i++) {
if (vendor == sp->ale_vendorid &&
devid == sp->ale_deviceid) {
device_set_desc(dev, sp->ale_name);
return (BUS_PROBE_DEFAULT);
}
sp++;
}
return (ENXIO);
}
static void
ale_get_macaddr(struct ale_softc *sc)
{
uint32_t ea[2], reg;
int i, vpdc;
reg = CSR_READ_4(sc, ALE_SPI_CTRL);
if ((reg & SPI_VPD_ENB) != 0) {
reg &= ~SPI_VPD_ENB;
CSR_WRITE_4(sc, ALE_SPI_CTRL, reg);
}
if (pci_find_cap(sc->ale_dev, PCIY_VPD, &vpdc) == 0) {
/*
* PCI VPD capability found, let TWSI reload EEPROM.
* This will set ethernet address of controller.
*/
CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) |
TWSI_CTRL_SW_LD_START);
for (i = 100; i > 0; i--) {
DELAY(1000);
reg = CSR_READ_4(sc, ALE_TWSI_CTRL);
if ((reg & TWSI_CTRL_SW_LD_START) == 0)
break;
}
if (i == 0)
device_printf(sc->ale_dev,
"reloading EEPROM timeout!\n");
} else {
if (bootverbose)
device_printf(sc->ale_dev,
"PCI VPD capability not found!\n");
}
ea[0] = CSR_READ_4(sc, ALE_PAR0);
ea[1] = CSR_READ_4(sc, ALE_PAR1);
sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF;
sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF;
sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF;
sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF;
sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF;
sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF;
}
static void
ale_phy_reset(struct ale_softc *sc)
{
/* Reset magic from Linux. */
CSR_WRITE_2(sc, ALE_GPHY_CTRL,
GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
GPHY_CTRL_PHY_PLL_ON);
DELAY(1000);
CSR_WRITE_2(sc, ALE_GPHY_CTRL,
GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE |
GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON);
DELAY(1000);
#define ATPHY_DBG_ADDR 0x1D
#define ATPHY_DBG_DATA 0x1E
/* Enable hibernation mode. */
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_ADDR, 0x0B);
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_DATA, 0xBC00);
/* Set Class A/B for all modes. */
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_ADDR, 0x00);
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_DATA, 0x02EF);
/* Enable 10BT power saving. */
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_ADDR, 0x12);
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_DATA, 0x4C04);
/* Adjust 1000T power. */
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_ADDR, 0x04);
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_ADDR, 0x8BBB);
/* 10BT center tap voltage. */
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_ADDR, 0x05);
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
ATPHY_DBG_ADDR, 0x2C46);
#undef ATPHY_DBG_ADDR
#undef ATPHY_DBG_DATA
DELAY(1000);
}
static int
ale_attach(device_t dev)
{
struct ale_softc *sc;
if_t ifp;
uint16_t burst;
int error, i, msic, msixc, pmc;
uint32_t rxf_len, txf_len;
error = 0;
sc = device_get_softc(dev);
sc->ale_dev = dev;
mtx_init(&sc->ale_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->ale_tick_ch, &sc->ale_mtx, 0);
NET_TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc);
/* Map the device. */
pci_enable_busmaster(dev);
sc->ale_res_spec = ale_res_spec_mem;
sc->ale_irq_spec = ale_irq_spec_legacy;
error = bus_alloc_resources(dev, sc->ale_res_spec, sc->ale_res);
if (error != 0) {
device_printf(dev, "cannot allocate memory resources.\n");
goto fail;
}
/* Set PHY address. */
sc->ale_phyaddr = ALE_PHY_ADDR;
/* Reset PHY. */
ale_phy_reset(sc);
/* Reset the ethernet controller. */
ale_reset(sc);
/* Get PCI and chip id/revision. */
sc->ale_rev = pci_get_revid(dev);
if (sc->ale_rev >= 0xF0) {
/* L2E Rev. B. AR8114 */
sc->ale_flags |= ALE_FLAG_FASTETHER;
} else {
if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) {
/* L1E AR8121 */
sc->ale_flags |= ALE_FLAG_JUMBO;
} else {
/* L2E Rev. A. AR8113 */
sc->ale_flags |= ALE_FLAG_FASTETHER;
}
}
/*
* All known controllers seems to require 4 bytes alignment
* of Tx buffers to make Tx checksum offload with custom
* checksum generation method work.
*/
sc->ale_flags |= ALE_FLAG_TXCSUM_BUG;
/*
* All known controllers seems to have issues on Rx checksum
* offload for fragmented IP datagrams.
*/
sc->ale_flags |= ALE_FLAG_RXCSUM_BUG;
/*
* Don't use Tx CMB. It is known to cause RRS update failure
* under certain circumstances. Typical phenomenon of the
* issue would be unexpected sequence number encountered in
* Rx handler.
*/
sc->ale_flags |= ALE_FLAG_TXCMB_BUG;
sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >>
MASTER_CHIP_REV_SHIFT;
if (bootverbose) {
device_printf(dev, "PCI device revision : 0x%04x\n",
sc->ale_rev);
device_printf(dev, "Chip id/revision : 0x%04x\n",
sc->ale_chip_rev);
}
txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN);
rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
/*
* Uninitialized hardware returns an invalid chip id/revision
* as well as 0xFFFFFFFF for Tx/Rx fifo length.
*/
if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF ||
rxf_len == 0xFFFFFFF) {
device_printf(dev,"chip revision : 0x%04x, %u Tx FIFO "
"%u Rx FIFO -- not initialized?\n", sc->ale_chip_rev,
txf_len, rxf_len);
error = ENXIO;
goto fail;
}
device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", txf_len, rxf_len);
/* Allocate IRQ resources. */
msixc = pci_msix_count(dev);
msic = pci_msi_count(dev);
if (bootverbose) {
device_printf(dev, "MSIX count : %d\n", msixc);
device_printf(dev, "MSI count : %d\n", msic);
}
/* Prefer MSIX over MSI. */
if (msix_disable == 0 || msi_disable == 0) {
if (msix_disable == 0 && msixc == ALE_MSIX_MESSAGES &&
pci_alloc_msix(dev, &msixc) == 0) {
if (msixc == ALE_MSIX_MESSAGES) {
device_printf(dev, "Using %d MSIX messages.\n",
msixc);
sc->ale_flags |= ALE_FLAG_MSIX;
sc->ale_irq_spec = ale_irq_spec_msix;
} else
pci_release_msi(dev);
}
if (msi_disable == 0 && (sc->ale_flags & ALE_FLAG_MSIX) == 0 &&
msic == ALE_MSI_MESSAGES &&
pci_alloc_msi(dev, &msic) == 0) {
if (msic == ALE_MSI_MESSAGES) {
device_printf(dev, "Using %d MSI messages.\n",
msic);
sc->ale_flags |= ALE_FLAG_MSI;
sc->ale_irq_spec = ale_irq_spec_msi;
} else
pci_release_msi(dev);
}
}
error = bus_alloc_resources(dev, sc->ale_irq_spec, sc->ale_irq);
if (error != 0) {
device_printf(dev, "cannot allocate IRQ resources.\n");
goto fail;
}
/* Get DMA parameters from PCIe device control register. */
if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
sc->ale_flags |= ALE_FLAG_PCIE;
burst = pci_read_config(dev, i + 0x08, 2);
/* Max read request size. */
sc->ale_dma_rd_burst = ((burst >> 12) & 0x07) <<
DMA_CFG_RD_BURST_SHIFT;
/* Max payload size. */
sc->ale_dma_wr_burst = ((burst >> 5) & 0x07) <<
DMA_CFG_WR_BURST_SHIFT;
if (bootverbose) {
device_printf(dev, "Read request size : %d bytes.\n",
128 << ((burst >> 12) & 0x07));
device_printf(dev, "TLP payload size : %d bytes.\n",
128 << ((burst >> 5) & 0x07));
}
} else {
sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128;
sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128;
}
/* Create device sysctl node. */
ale_sysctl_node(sc);
if ((error = ale_dma_alloc(sc)) != 0)
goto fail;
/* Load station address. */
ale_get_macaddr(sc);
ifp = sc->ale_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, ale_ioctl);
if_setstartfn(ifp, ale_start);
if_setinitfn(ifp, ale_init);
if_setsendqlen(ifp, ALE_TX_RING_CNT - 1);
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4);
if_sethwassist(ifp, ALE_CSUM_FEATURES | CSUM_TSO);
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
sc->ale_flags |= ALE_FLAG_PMCAP;
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Set up MII bus. */
error = mii_attach(dev, &sc->ale_miibus, ifp, ale_mediachange,
ale_mediastatus, BMSR_DEFCAPMASK, sc->ale_phyaddr, MII_OFFSET_ANY,
MIIF_DOPAUSE);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->ale_eaddr);
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Even though controllers supported by ale(3) have Rx checksum
* offload bug the workaround for fragmented frames seemed to
* work so far. However it seems Rx checksum offload does not
* work under certain conditions. So disable Rx checksum offload
* until I find more clue about it but allow users to override it.
*/
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM);
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Create local taskq. */
sc->ale_tq = taskqueue_create_fast("ale_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->ale_tq);
if (sc->ale_tq == NULL) {
device_printf(dev, "could not create taskqueue.\n");
ether_ifdetach(ifp);
error = ENXIO;
goto fail;
}
taskqueue_start_threads(&sc->ale_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->ale_dev));
if ((sc->ale_flags & ALE_FLAG_MSIX) != 0)
msic = ALE_MSIX_MESSAGES;
else if ((sc->ale_flags & ALE_FLAG_MSI) != 0)
msic = ALE_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
error = bus_setup_intr(dev, sc->ale_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, ale_intr, NULL, sc,
&sc->ale_intrhand[i]);
if (error != 0)
break;
}
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
taskqueue_free(sc->ale_tq);
sc->ale_tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
ale_detach(dev);
return (error);
}
static int
ale_detach(device_t dev)
{
struct ale_softc *sc;
if_t ifp;
int i, msic;
sc = device_get_softc(dev);
ifp = sc->ale_ifp;
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
ALE_LOCK(sc);
ale_stop(sc);
ALE_UNLOCK(sc);
callout_drain(&sc->ale_tick_ch);
taskqueue_drain(sc->ale_tq, &sc->ale_int_task);
}
if (sc->ale_tq != NULL) {
taskqueue_drain(sc->ale_tq, &sc->ale_int_task);
taskqueue_free(sc->ale_tq);
sc->ale_tq = NULL;
}
if (sc->ale_miibus != NULL) {
device_delete_child(dev, sc->ale_miibus);
sc->ale_miibus = NULL;
}
bus_generic_detach(dev);
ale_dma_free(sc);
if (ifp != NULL) {
if_free(ifp);
sc->ale_ifp = NULL;
}
if ((sc->ale_flags & ALE_FLAG_MSIX) != 0)
msic = ALE_MSIX_MESSAGES;
else if ((sc->ale_flags & ALE_FLAG_MSI) != 0)
msic = ALE_MSI_MESSAGES;
else
msic = 1;
for (i = 0; i < msic; i++) {
if (sc->ale_intrhand[i] != NULL) {
bus_teardown_intr(dev, sc->ale_irq[i],
sc->ale_intrhand[i]);
sc->ale_intrhand[i] = NULL;
}
}
bus_release_resources(dev, sc->ale_irq_spec, sc->ale_irq);
if ((sc->ale_flags & (ALE_FLAG_MSI | ALE_FLAG_MSIX)) != 0)
pci_release_msi(dev);
bus_release_resources(dev, sc->ale_res_spec, sc->ale_res);
mtx_destroy(&sc->ale_mtx);
return (0);
}
#define ALE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
#define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
static void
ale_sysctl_node(struct ale_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct ale_hw_stats *stats;
int error;
stats = &sc->ale_stats;
ctx = device_get_sysctl_ctx(sc->ale_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ale_dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->ale_int_rx_mod,
0, sysctl_hw_ale_int_mod, "I", "ale Rx interrupt moderation");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->ale_int_tx_mod,
0, sysctl_hw_ale_int_mod, "I", "ale Tx interrupt moderation");
/* Pull in device tunables. */
sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
error = resource_int_value(device_get_name(sc->ale_dev),
device_get_unit(sc->ale_dev), "int_rx_mod", &sc->ale_int_rx_mod);
if (error == 0) {
if (sc->ale_int_rx_mod < ALE_IM_TIMER_MIN ||
sc->ale_int_rx_mod > ALE_IM_TIMER_MAX) {
device_printf(sc->ale_dev, "int_rx_mod value out of "
"range; using default: %d\n",
ALE_IM_RX_TIMER_DEFAULT);
sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
}
}
sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
error = resource_int_value(device_get_name(sc->ale_dev),
device_get_unit(sc->ale_dev), "int_tx_mod", &sc->ale_int_tx_mod);
if (error == 0) {
if (sc->ale_int_tx_mod < ALE_IM_TIMER_MIN ||
sc->ale_int_tx_mod > ALE_IM_TIMER_MAX) {
device_printf(sc->ale_dev, "int_tx_mod value out of "
"range; using default: %d\n",
ALE_IM_TX_TIMER_DEFAULT);
sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
}
}
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->ale_process_limit, 0, sysctl_hw_ale_proc_limit, "I",
"max number of Rx events to process");
/* Pull in device tunables. */
sc->ale_process_limit = ALE_PROC_DEFAULT;
error = resource_int_value(device_get_name(sc->ale_dev),
device_get_unit(sc->ale_dev), "process_limit",
&sc->ale_process_limit);
if (error == 0) {
if (sc->ale_process_limit < ALE_PROC_MIN ||
sc->ale_process_limit > ALE_PROC_MAX) {
device_printf(sc->ale_dev,
"process_limit value out of range; "
"using default: %d\n", ALE_PROC_DEFAULT);
sc->ale_process_limit = ALE_PROC_DEFAULT;
}
}
/* Misc statistics. */
ALE_SYSCTL_STAT_ADD32(ctx, child, "reset_brk_seq",
&stats->reset_brk_seq,
"Controller resets due to broken Rx sequnce number");
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ATE statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
child = SYSCTL_CHILDREN(tree);
ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->rx_frames, "Good frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
&stats->rx_bcast_frames, "Good broadcast frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
&stats->rx_mcast_frames, "Good multicast frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->rx_pause_frames, "Pause control frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
&stats->rx_control_frames, "Control frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
&stats->rx_crcerrs, "CRC errors");
ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
&stats->rx_lenerrs, "Frames with length mismatched");
ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
&stats->rx_bytes, "Good octets");
ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
&stats->rx_bcast_bytes, "Good broadcast octets");
ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
&stats->rx_mcast_bytes, "Good multicast octets");
ALE_SYSCTL_STAT_ADD32(ctx, child, "runts",
&stats->rx_runts, "Too short frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "fragments",
&stats->rx_fragments, "Fragmented frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
&stats->rx_pkts_64, "64 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
&stats->rx_pkts_65_127, "65 to 127 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
&stats->rx_pkts_128_255, "128 to 255 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
&stats->rx_pkts_256_511, "256 to 511 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
&stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
&stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
&stats->rx_pkts_1519_max, "1519 to max frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
&stats->rx_pkts_truncated, "Truncated frames due to MTU size");
ALE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
&stats->rx_fifo_oflows, "FIFO overflows");
ALE_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
&stats->rx_rrs_errs, "Return status write-back errors");
ALE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
&stats->rx_alignerrs, "Alignment errors");
ALE_SYSCTL_STAT_ADD32(ctx, child, "filtered",
&stats->rx_pkts_filtered,
"Frames dropped due to address filtering");
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->tx_frames, "Good frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
&stats->tx_bcast_frames, "Good broadcast frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
&stats->tx_mcast_frames, "Good multicast frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->tx_pause_frames, "Pause control frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
&stats->tx_control_frames, "Control frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
&stats->tx_excess_defer, "Frames with excessive derferrals");
ALE_SYSCTL_STAT_ADD32(ctx, child, "defers",
&stats->tx_excess_defer, "Frames with derferrals");
ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
&stats->tx_bytes, "Good octets");
ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
&stats->tx_bcast_bytes, "Good broadcast octets");
ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
&stats->tx_mcast_bytes, "Good multicast octets");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
&stats->tx_pkts_64, "64 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
&stats->tx_pkts_65_127, "65 to 127 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
&stats->tx_pkts_128_255, "128 to 255 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
&stats->tx_pkts_256_511, "256 to 511 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
&stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
&stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
&stats->tx_pkts_1519_max, "1519 to max frames");
ALE_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
&stats->tx_single_colls, "Single collisions");
ALE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
&stats->tx_multi_colls, "Multiple collisions");
ALE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
&stats->tx_late_colls, "Late collisions");
ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
&stats->tx_excess_colls, "Excessive collisions");
ALE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
&stats->tx_underrun, "FIFO underruns");
ALE_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
&stats->tx_desc_underrun, "Descriptor write-back errors");
ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
&stats->tx_lenerrs, "Frames with length mismatched");
ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
&stats->tx_pkts_truncated, "Truncated frames due to MTU size");
}
#undef ALE_SYSCTL_STAT_ADD32
#undef ALE_SYSCTL_STAT_ADD64
struct ale_dmamap_arg {
bus_addr_t ale_busaddr;
};
static void
ale_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct ale_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct ale_dmamap_arg *)arg;
ctx->ale_busaddr = segs[0].ds_addr;
}
/*
* Tx descriptors/RXF0/CMB DMA blocks share ALE_DESC_ADDR_HI register
* which specifies high address region of DMA blocks. Therefore these
* blocks should have the same high address of given 4GB address
* space(i.e. crossing 4GB boundary is not allowed).
*/
static int
ale_check_boundary(struct ale_softc *sc)
{
bus_addr_t rx_cmb_end[ALE_RX_PAGES], tx_cmb_end;
bus_addr_t rx_page_end[ALE_RX_PAGES], tx_ring_end;
rx_page_end[0] = sc->ale_cdata.ale_rx_page[0].page_paddr +
sc->ale_pagesize;
rx_page_end[1] = sc->ale_cdata.ale_rx_page[1].page_paddr +
sc->ale_pagesize;
tx_ring_end = sc->ale_cdata.ale_tx_ring_paddr + ALE_TX_RING_SZ;
tx_cmb_end = sc->ale_cdata.ale_tx_cmb_paddr + ALE_TX_CMB_SZ;
rx_cmb_end[0] = sc->ale_cdata.ale_rx_page[0].cmb_paddr + ALE_RX_CMB_SZ;
rx_cmb_end[1] = sc->ale_cdata.ale_rx_page[1].cmb_paddr + ALE_RX_CMB_SZ;
if ((ALE_ADDR_HI(tx_ring_end) !=
ALE_ADDR_HI(sc->ale_cdata.ale_tx_ring_paddr)) ||
(ALE_ADDR_HI(rx_page_end[0]) !=
ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].page_paddr)) ||
(ALE_ADDR_HI(rx_page_end[1]) !=
ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].page_paddr)) ||
(ALE_ADDR_HI(tx_cmb_end) !=
ALE_ADDR_HI(sc->ale_cdata.ale_tx_cmb_paddr)) ||
(ALE_ADDR_HI(rx_cmb_end[0]) !=
ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].cmb_paddr)) ||
(ALE_ADDR_HI(rx_cmb_end[1]) !=
ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].cmb_paddr)))
return (EFBIG);
if ((ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[0])) ||
(ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[1])) ||
(ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[0])) ||
(ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[1])) ||
(ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(tx_cmb_end)))
return (EFBIG);
return (0);
}
static int
ale_dma_alloc(struct ale_softc *sc)
{
struct ale_txdesc *txd;
bus_addr_t lowaddr;
struct ale_dmamap_arg ctx;
int error, guard_size, i;
if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
guard_size = ALE_JUMBO_FRAMELEN;
else
guard_size = ALE_MAX_FRAMELEN;
sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ,
ALE_RX_PAGE_ALIGN);
lowaddr = BUS_SPACE_MAXADDR;
again:
/* Create parent DMA tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->ale_dev), /* parent */
1, 0, /* alignment, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ale_cdata.ale_parent_tag);
if (error != 0) {
device_printf(sc->ale_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* Create DMA tag for Tx descriptor ring. */
error = bus_dma_tag_create(
sc->ale_cdata.ale_parent_tag, /* parent */
ALE_TX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALE_TX_RING_SZ, /* maxsize */
1, /* nsegments */
ALE_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ale_cdata.ale_tx_ring_tag);
if (error != 0) {
device_printf(sc->ale_dev,
"could not create Tx ring DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx pages. */
for (i = 0; i < ALE_RX_PAGES; i++) {
error = bus_dma_tag_create(
sc->ale_cdata.ale_parent_tag, /* parent */
ALE_RX_PAGE_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->ale_pagesize, /* maxsize */
1, /* nsegments */
sc->ale_pagesize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ale_cdata.ale_rx_page[i].page_tag);
if (error != 0) {
device_printf(sc->ale_dev,
"could not create Rx page %d DMA tag.\n", i);
goto fail;
}
}
/* Create DMA tag for Tx coalescing message block. */
error = bus_dma_tag_create(
sc->ale_cdata.ale_parent_tag, /* parent */
ALE_CMB_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALE_TX_CMB_SZ, /* maxsize */
1, /* nsegments */
ALE_TX_CMB_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ale_cdata.ale_tx_cmb_tag);
if (error != 0) {
device_printf(sc->ale_dev,
"could not create Tx CMB DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx coalescing message block. */
for (i = 0; i < ALE_RX_PAGES; i++) {
error = bus_dma_tag_create(
sc->ale_cdata.ale_parent_tag, /* parent */
ALE_CMB_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALE_RX_CMB_SZ, /* maxsize */
1, /* nsegments */
ALE_RX_CMB_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ale_cdata.ale_rx_page[i].cmb_tag);
if (error != 0) {
device_printf(sc->ale_dev,
"could not create Rx page %d CMB DMA tag.\n", i);
goto fail;
}
}
/* Allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_ring_tag,
(void **)&sc->ale_cdata.ale_tx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->ale_cdata.ale_tx_ring_map);
if (error != 0) {
device_printf(sc->ale_dev,
"could not allocate DMA'able memory for Tx ring.\n");
goto fail;
}
ctx.ale_busaddr = 0;
error = bus_dmamap_load(sc->ale_cdata.ale_tx_ring_tag,
sc->ale_cdata.ale_tx_ring_map, sc->ale_cdata.ale_tx_ring,
ALE_TX_RING_SZ, ale_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.ale_busaddr == 0) {
device_printf(sc->ale_dev,
"could not load DMA'able memory for Tx ring.\n");
goto fail;
}
sc->ale_cdata.ale_tx_ring_paddr = ctx.ale_busaddr;
/* Rx pages. */
for (i = 0; i < ALE_RX_PAGES; i++) {
error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].page_tag,
(void **)&sc->ale_cdata.ale_rx_page[i].page_addr,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->ale_cdata.ale_rx_page[i].page_map);
if (error != 0) {
device_printf(sc->ale_dev,
"could not allocate DMA'able memory for "
"Rx page %d.\n", i);
goto fail;
}
ctx.ale_busaddr = 0;
error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].page_tag,
sc->ale_cdata.ale_rx_page[i].page_map,
sc->ale_cdata.ale_rx_page[i].page_addr,
sc->ale_pagesize, ale_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.ale_busaddr == 0) {
device_printf(sc->ale_dev,
"could not load DMA'able memory for "
"Rx page %d.\n", i);
goto fail;
}
sc->ale_cdata.ale_rx_page[i].page_paddr = ctx.ale_busaddr;
}
/* Tx CMB. */
error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_cmb_tag,
(void **)&sc->ale_cdata.ale_tx_cmb,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->ale_cdata.ale_tx_cmb_map);
if (error != 0) {
device_printf(sc->ale_dev,
"could not allocate DMA'able memory for Tx CMB.\n");
goto fail;
}
ctx.ale_busaddr = 0;
error = bus_dmamap_load(sc->ale_cdata.ale_tx_cmb_tag,
sc->ale_cdata.ale_tx_cmb_map, sc->ale_cdata.ale_tx_cmb,
ALE_TX_CMB_SZ, ale_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.ale_busaddr == 0) {
device_printf(sc->ale_dev,
"could not load DMA'able memory for Tx CMB.\n");
goto fail;
}
sc->ale_cdata.ale_tx_cmb_paddr = ctx.ale_busaddr;
/* Rx CMB. */
for (i = 0; i < ALE_RX_PAGES; i++) {
error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].cmb_tag,
(void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->ale_cdata.ale_rx_page[i].cmb_map);
if (error != 0) {
device_printf(sc->ale_dev, "could not allocate "
"DMA'able memory for Rx page %d CMB.\n", i);
goto fail;
}
ctx.ale_busaddr = 0;
error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].cmb_tag,
sc->ale_cdata.ale_rx_page[i].cmb_map,
sc->ale_cdata.ale_rx_page[i].cmb_addr,
ALE_RX_CMB_SZ, ale_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.ale_busaddr == 0) {
device_printf(sc->ale_dev, "could not load DMA'able "
"memory for Rx page %d CMB.\n", i);
goto fail;
}
sc->ale_cdata.ale_rx_page[i].cmb_paddr = ctx.ale_busaddr;
}
/*
* Tx descriptors/RXF0/CMB DMA blocks share the same
* high address region of 64bit DMA address space.
*/
if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
(error = ale_check_boundary(sc)) != 0) {
device_printf(sc->ale_dev, "4GB boundary crossed, "
"switching to 32bit DMA addressing mode.\n");
ale_dma_free(sc);
/*
* Limit max allowable DMA address space to 32bit
* and try again.
*/
lowaddr = BUS_SPACE_MAXADDR_32BIT;
goto again;
}
/*
* Create Tx buffer parent tag.
* AR81xx allows 64bit DMA addressing of Tx buffers so it
* needs separate parent DMA tag as parent DMA address space
* could be restricted to be within 32bit address space by
* 4GB boundary crossing.
*/
error = bus_dma_tag_create(
bus_get_dma_tag(sc->ale_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ale_cdata.ale_buffer_tag);
if (error != 0) {
device_printf(sc->ale_dev,
"could not create parent buffer DMA tag.\n");
goto fail;
}
/* Create DMA tag for Tx buffers. */
error = bus_dma_tag_create(
sc->ale_cdata.ale_buffer_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
ALE_TSO_MAXSIZE, /* maxsize */
ALE_MAXTXSEGS, /* nsegments */
ALE_TSO_MAXSEGSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ale_cdata.ale_tx_tag);
if (error != 0) {
device_printf(sc->ale_dev, "could not create Tx DMA tag.\n");
goto fail;
}
/* Create DMA maps for Tx buffers. */
for (i = 0; i < ALE_TX_RING_CNT; i++) {
txd = &sc->ale_cdata.ale_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->ale_cdata.ale_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->ale_dev,
"could not create Tx dmamap.\n");
goto fail;
}
}
fail:
return (error);
}
static void
ale_dma_free(struct ale_softc *sc)
{
struct ale_txdesc *txd;
int i;
/* Tx buffers. */
if (sc->ale_cdata.ale_tx_tag != NULL) {
for (i = 0; i < ALE_TX_RING_CNT; i++) {
txd = &sc->ale_cdata.ale_txdesc[i];
if (txd->tx_dmamap != NULL) {
bus_dmamap_destroy(sc->ale_cdata.ale_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->ale_cdata.ale_tx_tag);
sc->ale_cdata.ale_tx_tag = NULL;
}
/* Tx descriptor ring. */
if (sc->ale_cdata.ale_tx_ring_tag != NULL) {
if (sc->ale_cdata.ale_tx_ring_paddr != 0)
bus_dmamap_unload(sc->ale_cdata.ale_tx_ring_tag,
sc->ale_cdata.ale_tx_ring_map);
if (sc->ale_cdata.ale_tx_ring != NULL)
bus_dmamem_free(sc->ale_cdata.ale_tx_ring_tag,
sc->ale_cdata.ale_tx_ring,
sc->ale_cdata.ale_tx_ring_map);
sc->ale_cdata.ale_tx_ring_paddr = 0;
sc->ale_cdata.ale_tx_ring = NULL;
bus_dma_tag_destroy(sc->ale_cdata.ale_tx_ring_tag);
sc->ale_cdata.ale_tx_ring_tag = NULL;
}
/* Rx page block. */
for (i = 0; i < ALE_RX_PAGES; i++) {
if (sc->ale_cdata.ale_rx_page[i].page_tag != NULL) {
if (sc->ale_cdata.ale_rx_page[i].page_paddr != 0)
bus_dmamap_unload(
sc->ale_cdata.ale_rx_page[i].page_tag,
sc->ale_cdata.ale_rx_page[i].page_map);
if (sc->ale_cdata.ale_rx_page[i].page_addr != NULL)
bus_dmamem_free(
sc->ale_cdata.ale_rx_page[i].page_tag,
sc->ale_cdata.ale_rx_page[i].page_addr,
sc->ale_cdata.ale_rx_page[i].page_map);
sc->ale_cdata.ale_rx_page[i].page_paddr = 0;
sc->ale_cdata.ale_rx_page[i].page_addr = NULL;
bus_dma_tag_destroy(
sc->ale_cdata.ale_rx_page[i].page_tag);
sc->ale_cdata.ale_rx_page[i].page_tag = NULL;
}
}
/* Rx CMB. */
for (i = 0; i < ALE_RX_PAGES; i++) {
if (sc->ale_cdata.ale_rx_page[i].cmb_tag != NULL) {
if (sc->ale_cdata.ale_rx_page[i].cmb_paddr != 0)
bus_dmamap_unload(
sc->ale_cdata.ale_rx_page[i].cmb_tag,
sc->ale_cdata.ale_rx_page[i].cmb_map);
if (sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL)
bus_dmamem_free(
sc->ale_cdata.ale_rx_page[i].cmb_tag,
sc->ale_cdata.ale_rx_page[i].cmb_addr,
sc->ale_cdata.ale_rx_page[i].cmb_map);
sc->ale_cdata.ale_rx_page[i].cmb_paddr = 0;
sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL;
bus_dma_tag_destroy(
sc->ale_cdata.ale_rx_page[i].cmb_tag);
sc->ale_cdata.ale_rx_page[i].cmb_tag = NULL;
}
}
/* Tx CMB. */
if (sc->ale_cdata.ale_tx_cmb_tag != NULL) {
if (sc->ale_cdata.ale_tx_cmb_paddr != 0)
bus_dmamap_unload(sc->ale_cdata.ale_tx_cmb_tag,
sc->ale_cdata.ale_tx_cmb_map);
if (sc->ale_cdata.ale_tx_cmb != NULL)
bus_dmamem_free(sc->ale_cdata.ale_tx_cmb_tag,
sc->ale_cdata.ale_tx_cmb,
sc->ale_cdata.ale_tx_cmb_map);
sc->ale_cdata.ale_tx_cmb_paddr = 0;
sc->ale_cdata.ale_tx_cmb = NULL;
bus_dma_tag_destroy(sc->ale_cdata.ale_tx_cmb_tag);
sc->ale_cdata.ale_tx_cmb_tag = NULL;
}
if (sc->ale_cdata.ale_buffer_tag != NULL) {
bus_dma_tag_destroy(sc->ale_cdata.ale_buffer_tag);
sc->ale_cdata.ale_buffer_tag = NULL;
}
if (sc->ale_cdata.ale_parent_tag != NULL) {
bus_dma_tag_destroy(sc->ale_cdata.ale_parent_tag);
sc->ale_cdata.ale_parent_tag = NULL;
}
}
static int
ale_shutdown(device_t dev)
{
return (ale_suspend(dev));
}
/*
* Note, this driver resets the link speed to 10/100Mbps by
* restarting auto-negotiation in suspend/shutdown phase but we
* don't know whether that auto-negotiation would succeed or not
* as driver has no control after powering off/suspend operation.
* If the renegotiation fail WOL may not work. Running at 1Gbps
* will draw more power than 375mA at 3.3V which is specified in
* PCI specification and that would result in complete
* shutdowning power to ethernet controller.
*
* TODO
* Save current negotiated media speed/duplex/flow-control to
* softc and restore the same link again after resuming. PHY
* handling such as power down/resetting to 100Mbps may be better
* handled in suspend method in phy driver.
*/
static void
ale_setlinkspeed(struct ale_softc *sc)
{
struct mii_data *mii;
int aneg, i;
mii = device_get_softc(sc->ale_miibus);
mii_pollstat(mii);
aneg = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch IFM_SUBTYPE(mii->mii_media_active) {
case IFM_10_T:
case IFM_100_TX:
return;
case IFM_1000_T:
aneg++;
break;
default:
break;
}
}
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_100T2CR, 0);
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
DELAY(1000);
if (aneg != 0) {
/*
* Poll link state until ale(4) get a 10/100Mbps link.
*/
for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
mii_pollstat(mii);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
== (IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(
mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
ale_mac_config(sc);
return;
default:
break;
}
}
ALE_UNLOCK(sc);
pause("alelnk", hz);
ALE_LOCK(sc);
}
if (i == MII_ANEGTICKS_GIGE)
device_printf(sc->ale_dev,
"establishing a link failed, WOL may not work!");
}
/*
* No link, force MAC to have 100Mbps, full-duplex link.
* This is the last resort and may/may not work.
*/
mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
ale_mac_config(sc);
}
static void
ale_setwol(struct ale_softc *sc)
{
if_t ifp;
uint32_t reg, pmcs;
uint16_t pmstat;
int pmc;
ALE_LOCK_ASSERT(sc);
if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) != 0) {
/* Disable WOL. */
CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC);
reg |= PCIE_PHYMISC_FORCE_RCV_DET;
CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg);
/* Force PHY power down. */
CSR_WRITE_2(sc, ALE_GPHY_CTRL,
GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN |
GPHY_CTRL_HIB_PULSE | GPHY_CTRL_PHY_PLL_ON |
GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_IDDQ |
GPHY_CTRL_PCLK_SEL_DIS | GPHY_CTRL_PWDOWN_HW);
return;
}
ifp = sc->ale_ifp;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
ale_setlinkspeed(sc);
}
pmcs = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
CSR_WRITE_4(sc, ALE_WOL_CFG, pmcs);
reg = CSR_READ_4(sc, ALE_MAC_CFG);
reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
MAC_CFG_BCAST);
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
reg |= MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
/* WOL disabled, PHY power down. */
reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC);
reg |= PCIE_PHYMISC_FORCE_RCV_DET;
CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg);
CSR_WRITE_2(sc, ALE_GPHY_CTRL,
GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN |
GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PCLK_SEL_DIS |
GPHY_CTRL_PWDOWN_HW);
}
/* Request PME. */
pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
static int
ale_suspend(device_t dev)
{
struct ale_softc *sc;
sc = device_get_softc(dev);
ALE_LOCK(sc);
ale_stop(sc);
ale_setwol(sc);
ALE_UNLOCK(sc);
return (0);
}
static int
ale_resume(device_t dev)
{
struct ale_softc *sc;
if_t ifp;
int pmc;
uint16_t pmstat;
sc = device_get_softc(dev);
ALE_LOCK(sc);
if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) == 0) {
/* Disable PME and clear PME status. */
pmstat = pci_read_config(sc->ale_dev,
pmc + PCIR_POWER_STATUS, 2);
if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->ale_dev,
pmc + PCIR_POWER_STATUS, pmstat, 2);
}
}
/* Reset PHY. */
ale_phy_reset(sc);
ifp = sc->ale_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ale_init_locked(sc);
}
ALE_UNLOCK(sc);
return (0);
}
static int
ale_encap(struct ale_softc *sc, struct mbuf **m_head)
{
struct ale_txdesc *txd, *txd_last;
struct tx_desc *desc;
struct mbuf *m;
struct ip *ip;
struct tcphdr *tcp;
bus_dma_segment_t txsegs[ALE_MAXTXSEGS];
bus_dmamap_t map;
uint32_t cflags, hdrlen, ip_off, poff, vtag;
int error, i, nsegs, prod, si;
ALE_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
m = *m_head;
ip = NULL;
tcp = NULL;
cflags = vtag = 0;
ip_off = poff = 0;
if ((m->m_pkthdr.csum_flags & (ALE_CSUM_FEATURES | CSUM_TSO)) != 0) {
/*
* AR81xx requires offset of TCP/UDP payload in its Tx
* descriptor to perform hardware Tx checksum offload.
* Additionally, TSO requires IP/TCP header size and
* modification of IP/TCP header in order to make TSO
* engine work. This kind of operation takes many CPU
* cycles on FreeBSD so fast host CPU is required to
* get smooth TSO performance.
*/
struct ether_header *eh;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
/* Release original mbufs. */
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
/*
* Buggy-controller requires 4 byte aligned Tx buffer
* to make custom checksum offload work.
*/
if ((sc->ale_flags & ALE_FLAG_TXCSUM_BUG) != 0 &&
(m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0 &&
(mtod(m, intptr_t) & 3) != 0) {
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
ip_off = sizeof(struct ether_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
/*
* Check if hardware VLAN insertion is off.
* Additional check for LLC/SNAP frame?
*/
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ip_off = sizeof(struct ether_vlan_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
}
m = m_pullup(m, ip_off + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + ip_off);
poff = ip_off + (ip->ip_hl << 2);
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
/*
* XXX
* AR81xx requires the first descriptor should
* not include any TCP playload for TSO case.
* (i.e. ethernet header + IP + TCP header only)
* m_pullup(9) above will ensure this too.
* However it's not correct if the first mbuf
* of the chain does not use cluster.
*/
m = m_pullup(m, poff + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + ip_off);
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
m = m_pullup(m, poff + (tcp->th_off << 2));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
/*
* AR81xx requires IP/TCP header size and offset as
* well as TCP pseudo checksum which complicates
* TSO configuration. I guess this comes from the
* adherence to Microsoft NDIS Large Send
* specification which requires insertion of
* pseudo checksum by upper stack. The pseudo
* checksum that NDIS refers to doesn't include
* TCP payload length so ale(4) should recompute
* the pseudo checksum here. Hopefully this wouldn't
* be much burden on modern CPUs.
* Reset IP checksum and recompute TCP pseudo
* checksum as NDIS specification said.
*/
ip->ip_sum = 0;
tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
}
*m_head = m;
}
si = prod = sc->ale_cdata.ale_tx_prod;
txd = &sc->ale_cdata.ale_txdesc[prod];
txd_last = txd;
map = txd->tx_dmamap;
error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, ALE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check descriptor overrun. */
if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 3) {
bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, map);
return (ENOBUFS);
}
bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, map, BUS_DMASYNC_PREWRITE);
m = *m_head;
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
/* Request TSO and set MSS. */
cflags |= ALE_TD_TSO;
cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << ALE_TD_MSS_SHIFT);
/* Set IP/TCP header size. */
cflags |= ip->ip_hl << ALE_TD_IPHDR_LEN_SHIFT;
cflags |= tcp->th_off << ALE_TD_TCPHDR_LEN_SHIFT;
} else if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) {
/*
* AR81xx supports Tx custom checksum offload feature
* that offloads single 16bit checksum computation.
* So you can choose one among IP, TCP and UDP.
* Normally driver sets checksum start/insertion
* position from the information of TCP/UDP frame as
* TCP/UDP checksum takes more time than that of IP.
* However it seems that custom checksum offload
* requires 4 bytes aligned Tx buffers due to hardware
* bug.
* AR81xx also supports explicit Tx checksum computation
* if it is told that the size of IP header and TCP
* header(for UDP, the header size does not matter
* because it's fixed length). However with this scheme
* TSO does not work so you have to choose one either
* TSO or explicit Tx checksum offload. I chosen TSO
* plus custom checksum offload with work-around which
* will cover most common usage for this consumer
* ethernet controller. The work-around takes a lot of
* CPU cycles if Tx buffer is not aligned on 4 bytes
* boundary, though.
*/
cflags |= ALE_TD_CXSUM;
/* Set checksum start offset. */
cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT);
/* Set checksum insertion position of TCP/UDP. */
cflags |= ((poff + m->m_pkthdr.csum_data) <<
ALE_TD_CSUM_XSUMOFFSET_SHIFT);
}
/* Configure VLAN hardware tag insertion. */
if ((m->m_flags & M_VLANTAG) != 0) {
vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK);
cflags |= ALE_TD_INSERT_VLAN_TAG;
}
i = 0;
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
/*
* Make sure the first fragment contains
* only ethernet and IP/TCP header with options.
*/
hdrlen = poff + (tcp->th_off << 2);
desc = &sc->ale_cdata.ale_tx_ring[prod];
desc->addr = htole64(txsegs[i].ds_addr);
desc->len = htole32(ALE_TX_BYTES(hdrlen) | vtag);
desc->flags = htole32(cflags);
sc->ale_cdata.ale_tx_cnt++;
ALE_DESC_INC(prod, ALE_TX_RING_CNT);
if (m->m_len - hdrlen > 0) {
/* Handle remaining payload of the first fragment. */
desc = &sc->ale_cdata.ale_tx_ring[prod];
desc->addr = htole64(txsegs[i].ds_addr + hdrlen);
desc->len = htole32(ALE_TX_BYTES(m->m_len - hdrlen) |
vtag);
desc->flags = htole32(cflags);
sc->ale_cdata.ale_tx_cnt++;
ALE_DESC_INC(prod, ALE_TX_RING_CNT);
}
i = 1;
}
for (; i < nsegs; i++) {
desc = &sc->ale_cdata.ale_tx_ring[prod];
desc->addr = htole64(txsegs[i].ds_addr);
desc->len = htole32(ALE_TX_BYTES(txsegs[i].ds_len) | vtag);
desc->flags = htole32(cflags);
sc->ale_cdata.ale_tx_cnt++;
ALE_DESC_INC(prod, ALE_TX_RING_CNT);
}
/* Update producer index. */
sc->ale_cdata.ale_tx_prod = prod;
/* Set TSO header on the first descriptor. */
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
desc = &sc->ale_cdata.ale_tx_ring[si];
desc->flags |= htole32(ALE_TD_TSO_HDR);
}
/* Finally set EOP on the last descriptor. */
prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT;
desc = &sc->ale_cdata.ale_tx_ring[prod];
desc->flags |= htole32(ALE_TD_EOP);
/* Swap dmamap of the first and the last. */
txd = &sc->ale_cdata.ale_txdesc[prod];
map = txd_last->tx_dmamap;
txd_last->tx_dmamap = txd->tx_dmamap;
txd->tx_dmamap = map;
txd->tx_m = m;
/* Sync descriptors. */
bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
sc->ale_cdata.ale_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
ale_start(if_t ifp)
{
struct ale_softc *sc;
sc = if_getsoftc(ifp);
ALE_LOCK(sc);
ale_start_locked(ifp);
ALE_UNLOCK(sc);
}
static void
ale_start_locked(if_t ifp)
{
struct ale_softc *sc;
struct mbuf *m_head;
int enq;
sc = if_getsoftc(ifp);
ALE_LOCK_ASSERT(sc);
/* Reclaim transmitted frames. */
if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT)
ale_txeof(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->ale_flags & ALE_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp); ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (ale_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
/* Kick. */
CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX,
sc->ale_cdata.ale_tx_prod);
/* Set a timeout in case the chip goes out to lunch. */
sc->ale_watchdog_timer = ALE_TX_TIMEOUT;
}
}
static void
ale_watchdog(struct ale_softc *sc)
{
if_t ifp;
ALE_LOCK_ASSERT(sc);
if (sc->ale_watchdog_timer == 0 || --sc->ale_watchdog_timer)
return;
ifp = sc->ale_ifp;
if ((sc->ale_flags & ALE_FLAG_LINK) == 0) {
if_printf(sc->ale_ifp, "watchdog timeout (lost link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ale_init_locked(sc);
return;
}
if_printf(sc->ale_ifp, "watchdog timeout -- resetting\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ale_init_locked(sc);
if (!if_sendq_empty(ifp))
ale_start_locked(ifp);
}
static int
ale_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct ale_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALE_JUMBO_MTU ||
((sc->ale_flags & ALE_FLAG_JUMBO) == 0 &&
ifr->ifr_mtu > ETHERMTU))
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
ALE_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ale_init_locked(sc);
}
ALE_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
ALE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->ale_if_flags)
& (IFF_PROMISC | IFF_ALLMULTI)) != 0)
ale_rxfilter(sc);
} else {
ale_init_locked(sc);
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
ale_stop(sc);
}
sc->ale_if_flags = if_getflags(ifp);
ALE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ALE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
ale_rxfilter(sc);
ALE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->ale_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
ALE_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, ALE_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, ALE_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if ((mask & IFCAP_WOL_MCAST) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
ale_rxvlan(sc);
}
ALE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
ale_mac_config(struct ale_softc *sc)
{
struct mii_data *mii;
uint32_t reg;
ALE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->ale_miibus);
reg = CSR_READ_4(sc, ALE_MAC_CFG);
reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
MAC_CFG_SPEED_MASK);
/* Reprogram MAC with resolved speed/duplex. */
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
reg |= MAC_CFG_SPEED_10_100;
break;
case IFM_1000_T:
reg |= MAC_CFG_SPEED_1000;
break;
}
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
reg |= MAC_CFG_FULL_DUPLEX;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
reg |= MAC_CFG_TX_FC;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
reg |= MAC_CFG_RX_FC;
}
CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
}
static void
ale_stats_clear(struct ale_softc *sc)
{
struct smb sb;
uint32_t *reg;
int i;
for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
i += sizeof(uint32_t);
}
/* Read Tx statistics. */
for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
i += sizeof(uint32_t);
}
}
static void
ale_stats_update(struct ale_softc *sc)
{
struct ale_hw_stats *stat;
struct smb sb, *smb;
if_t ifp;
uint32_t *reg;
int i;
ALE_LOCK_ASSERT(sc);
ifp = sc->ale_ifp;
stat = &sc->ale_stats;
smb = &sb;
/* Read Rx statistics. */
for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
*reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
i += sizeof(uint32_t);
}
/* Read Tx statistics. */
for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
*reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
i += sizeof(uint32_t);
}
/* Rx stats. */
stat->rx_frames += smb->rx_frames;
stat->rx_bcast_frames += smb->rx_bcast_frames;
stat->rx_mcast_frames += smb->rx_mcast_frames;
stat->rx_pause_frames += smb->rx_pause_frames;
stat->rx_control_frames += smb->rx_control_frames;
stat->rx_crcerrs += smb->rx_crcerrs;
stat->rx_lenerrs += smb->rx_lenerrs;
stat->rx_bytes += smb->rx_bytes;
stat->rx_runts += smb->rx_runts;
stat->rx_fragments += smb->rx_fragments;
stat->rx_pkts_64 += smb->rx_pkts_64;
stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
stat->rx_pkts_truncated += smb->rx_pkts_truncated;
stat->rx_fifo_oflows += smb->rx_fifo_oflows;
stat->rx_rrs_errs += smb->rx_rrs_errs;
stat->rx_alignerrs += smb->rx_alignerrs;
stat->rx_bcast_bytes += smb->rx_bcast_bytes;
stat->rx_mcast_bytes += smb->rx_mcast_bytes;
stat->rx_pkts_filtered += smb->rx_pkts_filtered;
/* Tx stats. */
stat->tx_frames += smb->tx_frames;
stat->tx_bcast_frames += smb->tx_bcast_frames;
stat->tx_mcast_frames += smb->tx_mcast_frames;
stat->tx_pause_frames += smb->tx_pause_frames;
stat->tx_excess_defer += smb->tx_excess_defer;
stat->tx_control_frames += smb->tx_control_frames;
stat->tx_deferred += smb->tx_deferred;
stat->tx_bytes += smb->tx_bytes;
stat->tx_pkts_64 += smb->tx_pkts_64;
stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
stat->tx_single_colls += smb->tx_single_colls;
stat->tx_multi_colls += smb->tx_multi_colls;
stat->tx_late_colls += smb->tx_late_colls;
stat->tx_excess_colls += smb->tx_excess_colls;
stat->tx_underrun += smb->tx_underrun;
stat->tx_desc_underrun += smb->tx_desc_underrun;
stat->tx_lenerrs += smb->tx_lenerrs;
stat->tx_pkts_truncated += smb->tx_pkts_truncated;
stat->tx_bcast_bytes += smb->tx_bcast_bytes;
stat->tx_mcast_bytes += smb->tx_mcast_bytes;
/* Update counters in ifnet. */
if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls +
smb->tx_multi_colls * 2 + smb->tx_late_colls +
smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_late_colls +
smb->tx_excess_colls + smb->tx_underrun + smb->tx_pkts_truncated);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames);
if_inc_counter(ifp, IFCOUNTER_IERRORS,
smb->rx_crcerrs + smb->rx_lenerrs +
smb->rx_runts + smb->rx_pkts_truncated +
smb->rx_fifo_oflows + smb->rx_rrs_errs +
smb->rx_alignerrs);
}
static int
ale_intr(void *arg)
{
struct ale_softc *sc;
uint32_t status;
sc = (struct ale_softc *)arg;
status = CSR_READ_4(sc, ALE_INTR_STATUS);
if ((status & ALE_INTRS) == 0)
return (FILTER_STRAY);
/* Disable interrupts. */
CSR_WRITE_4(sc, ALE_INTR_STATUS, INTR_DIS_INT);
taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task);
return (FILTER_HANDLED);
}
static void
ale_int_task(void *arg, int pending)
{
struct ale_softc *sc;
if_t ifp;
uint32_t status;
int more;
sc = (struct ale_softc *)arg;
status = CSR_READ_4(sc, ALE_INTR_STATUS);
ALE_LOCK(sc);
if (sc->ale_morework != 0)
status |= INTR_RX_PKT;
if ((status & ALE_INTRS) == 0)
goto done;
/* Acknowledge interrupts but still disable interrupts. */
CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT);
ifp = sc->ale_ifp;
more = 0;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
more = ale_rxeof(sc, sc->ale_process_limit);
if (more == EAGAIN)
sc->ale_morework = 1;
else if (more == EIO) {
sc->ale_stats.reset_brk_seq++;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ale_init_locked(sc);
ALE_UNLOCK(sc);
return;
}
if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
if ((status & INTR_DMA_RD_TO_RST) != 0)
device_printf(sc->ale_dev,
"DMA read error! -- resetting\n");
if ((status & INTR_DMA_WR_TO_RST) != 0)
device_printf(sc->ale_dev,
"DMA write error! -- resetting\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ale_init_locked(sc);
ALE_UNLOCK(sc);
return;
}
if (!if_sendq_empty(ifp))
ale_start_locked(ifp);
}
if (more == EAGAIN ||
(CSR_READ_4(sc, ALE_INTR_STATUS) & ALE_INTRS) != 0) {
ALE_UNLOCK(sc);
taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task);
return;
}
done:
ALE_UNLOCK(sc);
/* Re-enable interrupts. */
CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF);
}
static void
ale_txeof(struct ale_softc *sc)
{
if_t ifp;
struct ale_txdesc *txd;
uint32_t cons, prod;
int prog;
ALE_LOCK_ASSERT(sc);
ifp = sc->ale_ifp;
if (sc->ale_cdata.ale_tx_cnt == 0)
return;
bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
sc->ale_cdata.ale_tx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) {
bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag,
sc->ale_cdata.ale_tx_cmb_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK;
} else
prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX);
cons = sc->ale_cdata.ale_tx_cons;
/*
* Go through our Tx list and free mbufs for those
* frames which have been transmitted.
*/
for (prog = 0; cons != prod; prog++,
ALE_DESC_INC(cons, ALE_TX_RING_CNT)) {
if (sc->ale_cdata.ale_tx_cnt <= 0)
break;
prog++;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->ale_cdata.ale_tx_cnt--;
txd = &sc->ale_cdata.ale_txdesc[cons];
if (txd->tx_m != NULL) {
/* Reclaim transmitted mbufs. */
bus_dmamap_sync(sc->ale_cdata.ale_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ale_cdata.ale_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
if (prog > 0) {
sc->ale_cdata.ale_tx_cons = cons;
/*
* Unarm watchdog timer only when there is no pending
* Tx descriptors in queue.
*/
if (sc->ale_cdata.ale_tx_cnt == 0)
sc->ale_watchdog_timer = 0;
}
}
static void
ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page,
uint32_t length, uint32_t *prod)
{
struct ale_rx_page *rx_page;
rx_page = *page;
/* Update consumer position. */
rx_page->cons += roundup(length + sizeof(struct rx_rs),
ALE_RX_PAGE_ALIGN);
if (rx_page->cons >= ALE_RX_PAGE_SZ) {
/*
* End of Rx page reached, let hardware reuse
* this page.
*/
rx_page->cons = 0;
*rx_page->cmb_addr = 0;
bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp,
RXF_VALID);
/* Switch to alternate Rx page. */
sc->ale_cdata.ale_rx_curp ^= 1;
rx_page = *page =
&sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
/* Page flipped, sync CMB and Rx page. */
bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Sync completed, cache updated producer index. */
*prod = *rx_page->cmb_addr;
}
}
/*
* It seems that AR81xx controller can compute partial checksum.
* The partial checksum value can be used to accelerate checksum
* computation for fragmented TCP/UDP packets. Upper network stack
* already takes advantage of the partial checksum value in IP
* reassembly stage. But I'm not sure the correctness of the
* partial hardware checksum assistance due to lack of data sheet.
* In addition, the Rx feature of controller that requires copying
* for every frames effectively nullifies one of most nice offload
* capability of controller.
*/
static void
ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status)
{
if_t ifp;
struct ip *ip;
char *p;
ifp = sc->ale_ifp;
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((status & ALE_RD_IPCSUM_NOK) == 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) {
if (((status & ALE_RD_IPV4_FRAG) == 0) &&
((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) &&
((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
} else {
if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 &&
(status & ALE_RD_TCP_UDPCSUM_NOK) == 0) {
p = mtod(m, char *);
p += ETHER_HDR_LEN;
if ((status & ALE_RD_802_3) != 0)
p += LLC_SNAPFRAMELEN;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0 &&
(status & ALE_RD_VLAN) != 0)
p += ETHER_VLAN_ENCAP_LEN;
ip = (struct ip *)p;
if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0)
return;
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
/*
* Don't mark bad checksum for TCP/UDP frames
* as fragmented frames may always have set
* bad checksummed bit of frame status.
*/
}
/* Process received frames. */
static int
ale_rxeof(struct ale_softc *sc, int count)
{
struct ale_rx_page *rx_page;
struct rx_rs *rs;
if_t ifp;
struct mbuf *m;
uint32_t length, prod, seqno, status, vtags;
int prog;
ifp = sc->ale_ifp;
rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Don't directly access producer index as hardware may
* update it while Rx handler is in progress. It would
* be even better if there is a way to let hardware
* know how far driver processed its received frames.
* Alternatively, hardware could provide a way to disable
* CMB updates until driver acknowledges the end of CMB
* access.
*/
prod = *rx_page->cmb_addr;
for (prog = 0; prog < count; prog++) {
if (rx_page->cons >= prod)
break;
rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons);
seqno = ALE_RX_SEQNO(le32toh(rs->seqno));
if (sc->ale_cdata.ale_rx_seqno != seqno) {
/*
* Normally I believe this should not happen unless
* severe driver bug or corrupted memory. However
* it seems to happen under certain conditions which
* is triggered by abrupt Rx events such as initiation
* of bulk transfer of remote host. It's not easy to
* reproduce this and I doubt it could be related
* with FIFO overflow of hardware or activity of Tx
* CMB updates. I also remember similar behaviour
* seen on RealTek 8139 which uses resembling Rx
* scheme.
*/
if (bootverbose)
device_printf(sc->ale_dev,
"garbled seq: %u, expected: %u -- "
"resetting!\n", seqno,
sc->ale_cdata.ale_rx_seqno);
return (EIO);
}
/* Frame received. */
sc->ale_cdata.ale_rx_seqno++;
length = ALE_RX_BYTES(le32toh(rs->length));
status = le32toh(rs->flags);
if ((status & ALE_RD_ERROR) != 0) {
/*
* We want to pass the following frames to upper
* layer regardless of error status of Rx return
* status.
*
* o IP/TCP/UDP checksum is bad.
* o frame length and protocol specific length
* does not match.
*/
if ((status & (ALE_RD_CRC | ALE_RD_CODE |
ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW |
ALE_RD_TRUNC)) != 0) {
ale_rx_update_page(sc, &rx_page, length, &prod);
continue;
}
}
/*
* m_devget(9) is major bottle-neck of ale(4)(It comes
* from hardware limitation). For jumbo frames we could
* get a slightly better performance if driver use
* m_getjcl(9) with proper buffer size argument. However
* that would make code more complicated and I don't
* think users would expect good Rx performance numbers
* on these low-end consumer ethernet controller.
*/
m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN,
ETHER_ALIGN, ifp, NULL);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
ale_rx_update_page(sc, &rx_page, length, &prod);
continue;
}
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
(status & ALE_RD_IPV4) != 0)
ale_rxcsum(sc, m, status);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
(status & ALE_RD_VLAN) != 0) {
vtags = ALE_RX_VLAN(le32toh(rs->vtags));
m->m_pkthdr.ether_vtag = ALE_RX_VLAN_TAG(vtags);
m->m_flags |= M_VLANTAG;
}
/* Pass it to upper layer. */
ALE_UNLOCK(sc);
if_input(ifp, m);
ALE_LOCK(sc);
ale_rx_update_page(sc, &rx_page, length, &prod);
}
return (count > 0 ? 0 : EAGAIN);
}
static void
ale_tick(void *arg)
{
struct ale_softc *sc;
struct mii_data *mii;
sc = (struct ale_softc *)arg;
ALE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->ale_miibus);
mii_tick(mii);
ale_stats_update(sc);
/*
* Reclaim Tx buffers that have been transferred. It's not
* needed here but it would release allocated mbuf chains
* faster and limit the maximum delay to a hz.
*/
ale_txeof(sc);
ale_watchdog(sc);
callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc);
}
static void
ale_reset(struct ale_softc *sc)
{
uint32_t reg;
int i;
/* Initialize PCIe module. From Linux. */
CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET);
for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
DELAY(10);
if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0)
break;
}
if (i == 0)
device_printf(sc->ale_dev, "master reset timeout!\n");
for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->ale_dev, "reset timeout(0x%08x)!\n", reg);
}
static void
ale_init(void *xsc)
{
struct ale_softc *sc;
sc = (struct ale_softc *)xsc;
ALE_LOCK(sc);
ale_init_locked(sc);
ALE_UNLOCK(sc);
}
static void
ale_init_locked(struct ale_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint8_t eaddr[ETHER_ADDR_LEN];
bus_addr_t paddr;
uint32_t reg, rxf_hi, rxf_lo;
ALE_LOCK_ASSERT(sc);
ifp = sc->ale_ifp;
mii = device_get_softc(sc->ale_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel any pending I/O.
*/
ale_stop(sc);
/*
* Reset the chip to a known state.
*/
ale_reset(sc);
/* Initialize Tx descriptors, DMA memory blocks. */
ale_init_rx_pages(sc);
ale_init_tx_ring(sc);
/* Reprogram the station address. */
bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
CSR_WRITE_4(sc, ALE_PAR0,
eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]);
/*
* Clear WOL status and disable all WOL feature as WOL
* would interfere Rx operation under normal environments.
*/
CSR_READ_4(sc, ALE_WOL_CFG);
CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
/*
* Set Tx descriptor/RXF0/CMB base addresses. They share
* the same high address part of DMAable region.
*/
paddr = sc->ale_cdata.ale_tx_ring_paddr;
CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr));
CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr));
CSR_WRITE_4(sc, ALE_TPD_CNT,
(ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK);
/* Set Rx page base address, note we use single queue. */
paddr = sc->ale_cdata.ale_rx_page[0].page_paddr;
CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr));
paddr = sc->ale_cdata.ale_rx_page[1].page_paddr;
CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr));
/* Set Tx/Rx CMB addresses. */
paddr = sc->ale_cdata.ale_tx_cmb_paddr;
CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr));
paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr;
CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr));
paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr;
CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr));
/* Mark RXF0 is valid. */
CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID);
CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID);
/*
* No need to initialize RFX1/RXF2/RXF3. We don't use
* multi-queue yet.
*/
/* Set Rx page size, excluding guard frame size. */
CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ);
/* Tell hardware that we're ready to load DMA blocks. */
CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD);
/* Set Rx/Tx interrupt trigger threshold. */
CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) |
(4 << INT_TRIG_TX_THRESH_SHIFT));
/*
* XXX
* Set interrupt trigger timer, its purpose and relation
* with interrupt moderation mechanism is not clear yet.
*/
CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER,
((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) |
(ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT)));
/* Configure interrupt moderation timer. */
reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT;
reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT;
CSR_WRITE_4(sc, ALE_IM_TIMER, reg);
reg = CSR_READ_4(sc, ALE_MASTER_CFG);
reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
if (ALE_USECS(sc->ale_int_rx_mod) != 0)
reg |= MASTER_IM_RX_TIMER_ENB;
if (ALE_USECS(sc->ale_int_tx_mod) != 0)
reg |= MASTER_IM_TX_TIMER_ENB;
CSR_WRITE_4(sc, ALE_MASTER_CFG, reg);
CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000));
/* Set Maximum frame size of controller. */
if (if_getmtu(ifp) < ETHERMTU)
sc->ale_max_frame_size = ETHERMTU;
else
sc->ale_max_frame_size = if_getmtu(ifp);
sc->ale_max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
ETHER_CRC_LEN;
CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size);
/* Configure IPG/IFG parameters. */
CSR_WRITE_4(sc, ALE_IPG_IFG_CFG,
((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
/* Set parameters for half-duplex media. */
CSR_WRITE_4(sc, ALE_HDPX_CFG,
((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
HDPX_CFG_LCOL_MASK) |
((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
HDPX_CFG_ABEBT_MASK) |
((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
HDPX_CFG_JAMIPG_MASK));
/* Configure Tx jumbo frame parameters. */
if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
if (if_getmtu(ifp) < ETHERMTU)
reg = sc->ale_max_frame_size;
else if (if_getmtu(ifp) < 6 * 1024)
reg = (sc->ale_max_frame_size * 2) / 3;
else
reg = sc->ale_max_frame_size / 2;
CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH,
roundup(reg, TX_JUMBO_THRESH_UNIT) >>
TX_JUMBO_THRESH_UNIT_SHIFT);
}
/* Configure TxQ. */
reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT))
<< TXQ_CFG_TX_FIFO_BURST_SHIFT;
reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
TXQ_CFG_TPD_BURST_MASK;
CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB);
/* Configure Rx jumbo frame & flow control parameters. */
if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT);
CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH,
(((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) <<
RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) |
((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) &
RX_JUMBO_LKAH_MASK));
reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
rxf_hi = (reg * 7) / 10;
rxf_lo = (reg * 3)/ 10;
CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH,
((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
RX_FIFO_PAUSE_THRESH_LO_MASK) |
((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
RX_FIFO_PAUSE_THRESH_HI_MASK));
}
/* Disable RSS. */
CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0);
CSR_WRITE_4(sc, ALE_RSS_CPU, 0);
/* Configure RxQ. */
CSR_WRITE_4(sc, ALE_RXQ_CFG,
RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
/* Configure DMA parameters. */
reg = 0;
if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0)
reg |= DMA_CFG_TXCMB_ENB;
CSR_WRITE_4(sc, ALE_DMA_CFG,
DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 |
sc->ale_dma_rd_burst | reg |
sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB |
((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
DMA_CFG_RD_DELAY_CNT_MASK) |
((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
DMA_CFG_WR_DELAY_CNT_MASK));
/*
* Hardware can be configured to issue SMB interrupt based
* on programmed interval. Since there is a callout that is
* invoked for every hz in driver we use that instead of
* relying on periodic SMB interrupt.
*/
CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0));
/* Clear MAC statistics. */
ale_stats_clear(sc);
/*
* Configure Tx/Rx MACs.
* - Auto-padding for short frames.
* - Enable CRC generation.
* Actual reconfiguration of MAC for resolved speed/duplex
* is followed after detection of link establishment.
* AR81xx always does checksum computation regardless of
* MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will
* cause Rx handling issue for fragmented IP datagrams due
* to silicon bug.
*/
reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
MAC_CFG_PREAMBLE_MASK);
if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0)
reg |= MAC_CFG_SPEED_10_100;
else
reg |= MAC_CFG_SPEED_1000;
CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
/* Set up the receive filter. */
ale_rxfilter(sc);
ale_rxvlan(sc);
/* Acknowledge all pending interrupts and clear it. */
CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS);
CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
CSR_WRITE_4(sc, ALE_INTR_STATUS, 0);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->ale_flags &= ~ALE_FLAG_LINK;
/* Switch to the current media. */
mii_mediachg(mii);
callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc);
}
static void
ale_stop(struct ale_softc *sc)
{
if_t ifp;
struct ale_txdesc *txd;
uint32_t reg;
int i;
ALE_LOCK_ASSERT(sc);
/*
* Mark the interface down and cancel the watchdog timer.
*/
ifp = sc->ale_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->ale_flags &= ~ALE_FLAG_LINK;
callout_stop(&sc->ale_tick_ch);
sc->ale_watchdog_timer = 0;
ale_stats_update(sc);
/* Disable interrupts. */
CSR_WRITE_4(sc, ALE_INTR_MASK, 0);
CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
/* Disable queue processing and DMA. */
reg = CSR_READ_4(sc, ALE_TXQ_CFG);
reg &= ~TXQ_CFG_ENB;
CSR_WRITE_4(sc, ALE_TXQ_CFG, reg);
reg = CSR_READ_4(sc, ALE_RXQ_CFG);
reg &= ~RXQ_CFG_ENB;
CSR_WRITE_4(sc, ALE_RXQ_CFG, reg);
reg = CSR_READ_4(sc, ALE_DMA_CFG);
reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB);
CSR_WRITE_4(sc, ALE_DMA_CFG, reg);
DELAY(1000);
/* Stop Rx/Tx MACs. */
ale_stop_mac(sc);
/* Disable interrupts which might be touched in taskq handler. */
CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
/*
* Free TX mbufs still in the queues.
*/
for (i = 0; i < ALE_TX_RING_CNT; i++) {
txd = &sc->ale_cdata.ale_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->ale_cdata.ale_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ale_cdata.ale_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
}
static void
ale_stop_mac(struct ale_softc *sc)
{
uint32_t reg;
int i;
ALE_LOCK_ASSERT(sc);
reg = CSR_READ_4(sc, ALE_MAC_CFG);
if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
}
for (i = ALE_TIMEOUT; i > 0; i--) {
reg = CSR_READ_4(sc, ALE_IDLE_STATUS);
if (reg == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->ale_dev,
"could not disable Tx/Rx MAC(0x%08x)!\n", reg);
}
static void
ale_init_tx_ring(struct ale_softc *sc)
{
struct ale_txdesc *txd;
int i;
ALE_LOCK_ASSERT(sc);
sc->ale_cdata.ale_tx_prod = 0;
sc->ale_cdata.ale_tx_cons = 0;
sc->ale_cdata.ale_tx_cnt = 0;
bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ);
bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ);
for (i = 0; i < ALE_TX_RING_CNT; i++) {
txd = &sc->ale_cdata.ale_txdesc[i];
txd->tx_m = NULL;
}
*sc->ale_cdata.ale_tx_cmb = 0;
bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag,
sc->ale_cdata.ale_tx_cmb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
sc->ale_cdata.ale_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
ale_init_rx_pages(struct ale_softc *sc)
{
struct ale_rx_page *rx_page;
int i;
ALE_LOCK_ASSERT(sc);
sc->ale_morework = 0;
sc->ale_cdata.ale_rx_seqno = 0;
sc->ale_cdata.ale_rx_curp = 0;
for (i = 0; i < ALE_RX_PAGES; i++) {
rx_page = &sc->ale_cdata.ale_rx_page[i];
bzero(rx_page->page_addr, sc->ale_pagesize);
bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ);
rx_page->cons = 0;
*rx_page->cmb_addr = 0;
bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
}
static void
ale_rxvlan(struct ale_softc *sc)
{
if_t ifp;
uint32_t reg;
ALE_LOCK_ASSERT(sc);
ifp = sc->ale_ifp;
reg = CSR_READ_4(sc, ALE_MAC_CFG);
reg &= ~MAC_CFG_VLAN_TAG_STRIP;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
reg |= MAC_CFG_VLAN_TAG_STRIP;
CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
}
static u_int
ale_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *mchash = arg;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
return (1);
}
static void
ale_rxfilter(struct ale_softc *sc)
{
if_t ifp;
uint32_t mchash[2];
uint32_t rxcfg;
ALE_LOCK_ASSERT(sc);
ifp = sc->ale_ifp;
rxcfg = CSR_READ_4(sc, ALE_MAC_CFG);
rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxcfg |= MAC_CFG_BCAST;
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxcfg |= MAC_CFG_PROMISC;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
rxcfg |= MAC_CFG_ALLMULTI;
CSR_WRITE_4(sc, ALE_MAR0, 0xFFFFFFFF);
CSR_WRITE_4(sc, ALE_MAR1, 0xFFFFFFFF);
CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
return;
}
/* Program new filter. */
bzero(mchash, sizeof(mchash));
if_foreach_llmaddr(ifp, ale_hash_maddr, &mchash);
CSR_WRITE_4(sc, ALE_MAR0, mchash[0]);
CSR_WRITE_4(sc, ALE_MAR1, mchash[1]);
CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
ALE_PROC_MIN, ALE_PROC_MAX));
}
static int
sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
ALE_IM_TIMER_MIN, ALE_IM_TIMER_MAX));
}
diff --git a/sys/dev/altera/atse/if_atse.c b/sys/dev/altera/atse/if_atse.c
index 923292484207..9e3871cf5779 100644
--- a/sys/dev/altera/atse/if_atse.c
+++ b/sys/dev/altera/atse/if_atse.c
@@ -1,1597 +1,1592 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2012, 2013 Bjoern A. Zeeb
* Copyright (c) 2014 Robert N. M. Watson
* Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
* ("MRC2"), as part of the DARPA MRC research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Altera Triple-Speed Ethernet MegaCore, Function User Guide
* UG-01008-3.0, Software Version: 12.0, June 2012.
* Available at the time of writing at:
* http://www.altera.com/literature/ug/ug_ethernet.pdf
*
* We are using an Marvell E1111 (Alaska) PHY on the DE4. See mii/e1000phy.c.
*/
/*
* XXX-BZ NOTES:
* - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
* seems an IP core bug, they count ether broadcasts as multicast. Is this
* still the case?
* - figure out why the TX FIFO fill status and intr did not work as expected.
* - test 100Mbit/s and 10Mbit/s
* - blacklist the one special factory programmed ethernet address (for now
* hardcoded, later from loader?)
* - resolve all XXX, left as reminders to shake out details later
* - Jumbo frame support
*/
#include <sys/cdefs.h>
#include "opt_device_polling.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/jail.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/types.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/altera/atse/if_atsereg.h>
#include <dev/xdma/xdma.h>
#define RX_QUEUE_SIZE 4096
#define TX_QUEUE_SIZE 4096
#define NUM_RX_MBUF 512
#define BUFRING_SIZE 8192
#include <machine/cache.h>
/* XXX once we'd do parallel attach, we need a global lock for this. */
#define ATSE_ETHERNET_OPTION_BITS_UNDEF 0
#define ATSE_ETHERNET_OPTION_BITS_READ 1
static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
/*
* Softc and critical resource locking.
*/
#define ATSE_LOCK(_sc) mtx_lock(&(_sc)->atse_mtx)
#define ATSE_UNLOCK(_sc) mtx_unlock(&(_sc)->atse_mtx)
#define ATSE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
#define ATSE_DEBUG
#undef ATSE_DEBUG
#ifdef ATSE_DEBUG
#define DPRINTF(format, ...) printf(format, __VA_ARGS__)
#else
#define DPRINTF(format, ...)
#endif
/*
* Register space access macros.
*/
static inline void
csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
const char *f, const int l)
{
val4 = htole32(val4);
DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
"atse_mem_res", reg, reg * 4, val4);
bus_write_4(sc->atse_mem_res, reg * 4, val4);
}
static inline uint32_t
csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
{
uint32_t val4;
val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
"atse_mem_res", reg, reg * 4, val4);
return (val4);
}
/*
* See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
* on write and ignored on read.
*/
static inline void
pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
const char *f, const int l, const char *s)
{
uint32_t val4;
val4 = htole32(val & 0x0000ffff);
DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
"atse_mem_res", reg, (bmcr + reg) * 4, val4);
bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
}
static inline uint16_t
pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
const int l, const char *s)
{
uint32_t val4;
uint16_t val;
val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
val = le32toh(val4) & 0x0000ffff;
DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
"atse_mem_res", reg, (bmcr + reg) * 4, val);
return (val);
}
#define CSR_WRITE_4(sc, reg, val) \
csr_write_4((sc), (reg), (val), __func__, __LINE__)
#define CSR_READ_4(sc, reg) \
csr_read_4((sc), (reg), __func__, __LINE__)
#define PCS_WRITE_2(sc, reg, val) \
pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
"PCS")
#define PCS_READ_2(sc, reg) \
pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
#define PHY_WRITE_2(sc, reg, val) \
pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
"PHY")
#define PHY_READ_2(sc, reg) \
pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
static void atse_tick(void *);
static int atse_detach(device_t);
static int
atse_rx_enqueue(struct atse_softc *sc, uint32_t n)
{
struct mbuf *m;
int i;
for (i = 0; i < n; i++) {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->dev,
"%s: Can't alloc rx mbuf\n", __func__);
return (-1);
}
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
}
return (0);
}
static int
atse_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct atse_softc *sc;
if_t ifp;
struct mbuf *m;
int err;
sc = arg;
ATSE_LOCK(sc);
ifp = sc->atse_ifp;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
if (err != 0) {
break;
}
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
m_freem(m);
sc->txcount--;
}
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
ATSE_UNLOCK(sc);
return (0);
}
static int
atse_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct atse_softc *sc;
if_t ifp;
struct mbuf *m;
int err;
uint32_t cnt_processed;
sc = arg;
ATSE_LOCK(sc);
ifp = sc->atse_ifp;
cnt_processed = 0;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
if (err != 0) {
break;
}
cnt_processed++;
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
continue;
}
m->m_pkthdr.len = m->m_len = st.transferred;
m->m_pkthdr.rcvif = ifp;
m_adj(m, ETHER_ALIGN);
ATSE_UNLOCK(sc);
if_input(ifp, m);
ATSE_LOCK(sc);
}
atse_rx_enqueue(sc, cnt_processed);
ATSE_UNLOCK(sc);
return (0);
}
static int
atse_transmit_locked(if_t ifp)
{
struct atse_softc *sc;
struct mbuf *m;
struct buf_ring *br;
int error;
int enq;
sc = if_getsoftc(ifp);
br = sc->br;
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
error = xdma_enqueue_mbuf(sc->xchan_tx, &m, 0, 4, 4, XDMA_MEM_TO_DEV);
if (error != 0) {
/* No space in request queue available yet. */
drbr_putback(ifp, br, m);
break;
}
drbr_advance(ifp, br);
sc->txcount++;
enq++;
/* If anyone is interested give them a copy. */
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0)
xdma_queue_submit(sc->xchan_tx);
return (0);
}
static int
atse_transmit(if_t ifp, struct mbuf *m)
{
struct atse_softc *sc;
struct buf_ring *br;
int error;
sc = if_getsoftc(ifp);
br = sc->br;
ATSE_LOCK(sc);
mtx_lock(&sc->br_mtx);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
error = drbr_enqueue(ifp, sc->br, m);
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
error = drbr_enqueue(ifp, sc->br, m);
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
error = drbr_enqueue(ifp, br, m);
if (error) {
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
error = atse_transmit_locked(ifp);
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
static void
atse_qflush(if_t ifp)
{
struct atse_softc *sc;
sc = if_getsoftc(ifp);
printf("%s\n", __func__);
}
static int
atse_stop_locked(struct atse_softc *sc)
{
uint32_t mask, val4;
if_t ifp;
int i;
ATSE_LOCK_ASSERT(sc);
callout_stop(&sc->atse_tick);
ifp = sc->atse_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
/* Disable MAC transmit and receive datapath. */
mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 &= ~mask;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & mask) == 0) {
break;
}
DELAY(10);
}
if ((val4 & mask) != 0) {
device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
/* Punt. */
}
sc->atse_flags &= ~ATSE_FLAGS_LINK;
return (0);
}
static u_int
atse_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint64_t *h = arg;
uint8_t *addr, x, y;
int i, j;
addr = LLADDR(sdl);
x = 0;
for (i = 0; i < ETHER_ADDR_LEN; i++) {
y = addr[i] & 0x01;
for (j = 1; j < 8; j++)
y ^= (addr[i] >> j) & 0x01;
x |= (y << i);
}
*h |= (1 << x);
return (1);
}
static int
atse_rxfilter_locked(struct atse_softc *sc)
{
if_t ifp;
uint32_t val4;
int i;
/* XXX-BZ can we find out if we have the MHASH synthesized? */
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
/* For simplicity always hash full 48 bits of addresses. */
if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
ifp = sc->atse_ifp;
if (if_getflags(ifp) & IFF_PROMISC) {
val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
} else {
val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
}
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
if (if_getflags(ifp) & IFF_ALLMULTI) {
/* Accept all multicast addresses. */
for (i = 0; i <= MHASH_LEN; i++)
CSR_WRITE_4(sc, MHASH_START + i, 0x1);
} else {
/*
* Can hold MHASH_LEN entries.
* XXX-BZ bitstring.h would be more general.
*/
uint64_t h;
/*
* Re-build and re-program hash table. First build the
* bit-field "yes" or "no" for each slot per address, then
* do all the programming afterwards.
*/
h = 0;
(void)if_foreach_llmaddr(ifp, atse_hash_maddr, &h);
for (i = 0; i <= MHASH_LEN; i++) {
CSR_WRITE_4(sc, MHASH_START + i,
(h & (1 << i)) ? 0x01 : 0x00);
}
}
return (0);
}
static int
atse_ethernet_option_bits_read_fdt(device_t dev)
{
struct resource *res;
device_t fdev;
int i, rid;
if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ) {
return (0);
}
fdev = device_find_child(device_get_parent(dev), "cfi", 0);
if (fdev == NULL) {
return (ENOENT);
}
rid = 0;
res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
RF_ACTIVE | RF_SHAREABLE);
if (res == NULL) {
return (ENXIO);
}
for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++) {
atse_ethernet_option_bits[i] = bus_read_1(res,
ALTERA_ETHERNET_OPTION_BITS_OFF + i);
}
bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
return (0);
}
static int
atse_ethernet_option_bits_read(device_t dev)
{
int error;
error = atse_ethernet_option_bits_read_fdt(dev);
if (error == 0)
return (0);
device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
return (error);
}
static int
atse_get_eth_address(struct atse_softc *sc)
{
unsigned long hostid;
uint32_t val4;
int unit;
/*
* Make sure to only ever do this once. Otherwise a reset would
* possibly change our ethernet address, which is not good at all.
*/
if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
sc->atse_eth_addr[2] != 0x00) {
return (0);
}
if ((atse_ethernet_option_bits_flag &
ATSE_ETHERNET_OPTION_BITS_READ) == 0) {
goto get_random;
}
val4 = atse_ethernet_option_bits[0] << 24;
val4 |= atse_ethernet_option_bits[1] << 16;
val4 |= atse_ethernet_option_bits[2] << 8;
val4 |= atse_ethernet_option_bits[3];
/* They chose "safe". */
if (val4 != le32toh(0x00005afe)) {
device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
"Falling back to random numbers for hardware address.\n",
val4);
goto get_random;
}
sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
/* Handle factory default ethernet address: 00:07:ed:ff:ed:15 */
if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
device_printf(sc->atse_dev, "Factory programmed Ethernet "
"hardware address blacklisted. Falling back to random "
"address to avoid collisions.\n");
device_printf(sc->atse_dev, "Please re-program your flash.\n");
goto get_random;
}
if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
device_printf(sc->atse_dev, "All zero's Ethernet hardware "
"address blacklisted. Falling back to random address.\n");
device_printf(sc->atse_dev, "Please re-program your flash.\n");
goto get_random;
}
if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
device_printf(sc->atse_dev, "Multicast Ethernet hardware "
"address blacklisted. Falling back to random address.\n");
device_printf(sc->atse_dev, "Please re-program your flash.\n");
goto get_random;
}
/*
* If we find an Altera prefixed address with a 0x0 ending
* adjust by device unit. If not and this is not the first
* Ethernet, go to random.
*/
unit = device_get_unit(sc->atse_dev);
if (unit == 0x00) {
return (0);
}
if (unit > 0x0f) {
device_printf(sc->atse_dev, "We do not support Ethernet "
"addresses for more than 16 MACs. Falling back to "
"random hadware address.\n");
goto get_random;
}
if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
(sc->atse_eth_addr[5] & 0x0f) != 0x0) {
device_printf(sc->atse_dev, "Ethernet address not meeting our "
"multi-MAC standards. Falling back to random hadware "
"address.\n");
goto get_random;
}
sc->atse_eth_addr[5] |= (unit & 0x0f);
return (0);
get_random:
/*
* Fall back to random code we also use on bridge(4).
*/
getcredhostid(curthread->td_ucred, &hostid);
if (hostid == 0) {
arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
} else {
sc->atse_eth_addr[0] = 0x2;
sc->atse_eth_addr[1] = (hostid >> 24) & 0xff;
sc->atse_eth_addr[2] = (hostid >> 16) & 0xff;
sc->atse_eth_addr[3] = (hostid >> 8 ) & 0xff;
sc->atse_eth_addr[4] = hostid & 0xff;
sc->atse_eth_addr[5] = sc->atse_unit & 0xff;
}
return (0);
}
static int
atse_set_eth_address(struct atse_softc *sc, int n)
{
uint32_t v0, v1;
v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
(sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
if (n & ATSE_ETH_ADDR_DEF) {
CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP1) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP2) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP3) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP4) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
}
return (0);
}
static int
atse_reset(struct atse_softc *sc)
{
uint32_t val4, mask;
uint16_t val;
int i;
/* 1. External PHY Initialization using MDIO. */
/*
* We select the right MDIO space in atse_attach() and let MII do
* anything else.
*/
/* 2. PCS Configuration Register Initialization. */
/* a. Set auto negotiation link timer to 1.6ms for SGMII. */
PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
/* b. Configure SGMII. */
val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
/* c. Enable auto negotiation. */
/* Ignore Bits 6,8,13; should be set,set,unset. */
val = PCS_READ_2(sc, PCS_CONTROL);
val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
val &= ~PCS_CONTROL_LOOPBACK; /* Make this a -link1 option? */
val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
PCS_WRITE_2(sc, PCS_CONTROL, val);
/* d. PCS reset. */
val = PCS_READ_2(sc, PCS_CONTROL);
val |= PCS_CONTROL_RESET;
PCS_WRITE_2(sc, PCS_CONTROL, val);
/* Wait for reset bit to clear; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val = PCS_READ_2(sc, PCS_CONTROL);
if ((val & PCS_CONTROL_RESET) == 0) {
break;
}
DELAY(10);
}
if ((val & PCS_CONTROL_RESET) != 0) {
device_printf(sc->atse_dev, "PCS reset timed out.\n");
return (ENXIO);
}
/* 3. MAC Configuration Register Initialization. */
/* a. Disable MAC transmit and receive datapath. */
mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 &= ~mask;
/* Samples in the manual do have the SW_RESET bit set here, why? */
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & mask) == 0) {
break;
}
DELAY(10);
}
if ((val4 & mask) != 0) {
device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
return (ENXIO);
}
/* b. MAC FIFO configuration. */
CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
#if 0
CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
#else
/* For store-and-forward mode, set this threshold to 0. */
CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
#endif
/* c. MAC address configuration. */
/* Also intialize supplementary addresses to our primary one. */
/* XXX-BZ FreeBSD really needs to grow and API for using these. */
atse_get_eth_address(sc);
atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
/* d. MAC function configuration. */
CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518); /* Default. */
CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
/*
* If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
* and ENA_10 (bit 25) in command_config register to 0. If half duplex
* is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
* to 1 in command_config register.
* BZ: We shoot for 1000 instead.
*/
#if 0
val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
#else
val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
#endif
val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
#if 0
/*
* We do not want to set this, otherwise, we could not even send
* random raw ethernet frames for various other research. By default
* FreeBSD will use the right ether source address.
*/
val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
#endif
val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
#if 0
val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
#endif
#if 1
val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
#endif
val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA; /* link0? */
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/*
* Make sure we do not enable 32bit alignment; FreeBSD cannot
* cope with the additional padding (though we should!?).
* Also make sure we get the CRC appended.
*/
val4 = CSR_READ_4(sc, TX_CMD_STAT);
val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
CSR_WRITE_4(sc, TX_CMD_STAT, val4);
val4 = CSR_READ_4(sc, RX_CMD_STAT);
val4 &= ~RX_CMD_STAT_RX_SHIFT16;
val4 |= RX_CMD_STAT_RX_SHIFT16;
CSR_WRITE_4(sc, RX_CMD_STAT, val4);
/* e. Reset MAC. */
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0) {
break;
}
DELAY(10);
}
if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
device_printf(sc->atse_dev, "MAC reset timed out.\n");
return (ENXIO);
}
/* f. Enable MAC transmit and receive datapath. */
mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 |= mask;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & mask) == mask) {
break;
}
DELAY(10);
}
if ((val4 & mask) != mask) {
device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
return (ENXIO);
}
return (0);
}
static void
atse_init_locked(struct atse_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint8_t *eaddr;
ATSE_LOCK_ASSERT(sc);
ifp = sc->atse_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
return;
}
/*
* Must update the ether address if changed. Given we do not handle
* in atse_ioctl() but it's in the general framework, just always
* do it here before atse_reset().
*/
eaddr = if_getlladdr(sc->atse_ifp);
bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
/* Make things frind to halt, cleanup, ... */
atse_stop_locked(sc);
atse_reset(sc);
/* ... and fire up the engine again. */
atse_rxfilter_locked(sc);
sc->atse_flags &= ATSE_FLAGS_LINK; /* Preserve. */
mii = device_get_softc(sc->atse_miibus);
sc->atse_flags &= ~ATSE_FLAGS_LINK;
mii_mediachg(mii);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->atse_tick, hz, atse_tick, sc);
}
static void
atse_init(void *xsc)
{
struct atse_softc *sc;
/*
* XXXRW: There is some argument that we should immediately do RX
* processing after enabling interrupts, or one may not fire if there
* are buffered packets.
*/
sc = (struct atse_softc *)xsc;
ATSE_LOCK(sc);
atse_init_locked(sc);
ATSE_UNLOCK(sc);
}
static int
atse_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct atse_softc *sc;
struct ifreq *ifr;
int error, mask;
error = 0;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFFLAGS:
ATSE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->atse_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
atse_rxfilter_locked(sc);
else
atse_init_locked(sc);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
atse_stop_locked(sc);
sc->atse_if_flags = if_getflags(ifp);
ATSE_UNLOCK(sc);
break;
case SIOCSIFCAP:
ATSE_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
ATSE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ATSE_LOCK(sc);
atse_rxfilter_locked(sc);
ATSE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
{
struct mii_data *mii;
struct ifreq *ifr;
mii = device_get_softc(sc->atse_miibus);
ifr = (struct ifreq *)data;
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
}
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
atse_tick(void *xsc)
{
struct atse_softc *sc;
struct mii_data *mii;
if_t ifp;
sc = (struct atse_softc *)xsc;
ATSE_LOCK_ASSERT(sc);
ifp = sc->atse_ifp;
mii = device_get_softc(sc->atse_miibus);
mii_tick(mii);
if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
atse_miibus_statchg(sc->atse_dev);
}
callout_reset(&sc->atse_tick, hz, atse_tick, sc);
}
/*
* Set media options.
*/
static int
atse_ifmedia_upd(if_t ifp)
{
struct atse_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
ATSE_LOCK(sc);
mii = device_get_softc(sc->atse_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
PHY_RESET(miisc);
}
error = mii_mediachg(mii);
ATSE_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
atse_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct atse_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
ATSE_LOCK(sc);
mii = device_get_softc(sc->atse_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
ATSE_UNLOCK(sc);
}
static struct atse_mac_stats_regs {
const char *name;
const char *descr; /* Mostly copied from Altera datasheet. */
} atse_mac_stats_regs[] = {
[0x1a] =
{ "aFramesTransmittedOK",
"The number of frames that are successfully transmitted including "
"the pause frames." },
{ "aFramesReceivedOK",
"The number of frames that are successfully received including the "
"pause frames." },
{ "aFrameCheckSequenceErrors",
"The number of receive frames with CRC error." },
{ "aAlignmentErrors",
"The number of receive frames with alignment error." },
{ "aOctetsTransmittedOK",
"The lower 32 bits of the number of data and padding octets that "
"are successfully transmitted." },
{ "aOctetsReceivedOK",
"The lower 32 bits of the number of data and padding octets that "
" are successfully received." },
{ "aTxPAUSEMACCtrlFrames",
"The number of pause frames transmitted." },
{ "aRxPAUSEMACCtrlFrames",
"The number received pause frames received." },
{ "ifInErrors",
"The number of errored frames received." },
{ "ifOutErrors",
"The number of transmit frames with either a FIFO overflow error, "
"a FIFO underflow error, or a error defined by the user "
"application." },
{ "ifInUcastPkts",
"The number of valid unicast frames received." },
{ "ifInMulticastPkts",
"The number of valid multicast frames received. The count does "
"not include pause frames." },
{ "ifInBroadcastPkts",
"The number of valid broadcast frames received." },
{ "ifOutDiscards",
"This statistics counter is not in use. The MAC function does not "
"discard frames that are written to the FIFO buffer by the user "
"application." },
{ "ifOutUcastPkts",
"The number of valid unicast frames transmitted." },
{ "ifOutMulticastPkts",
"The number of valid multicast frames transmitted, excluding pause "
"frames." },
{ "ifOutBroadcastPkts",
"The number of valid broadcast frames transmitted." },
{ "etherStatsDropEvents",
"The number of frames that are dropped due to MAC internal errors "
"when FIFO buffer overflow persists." },
{ "etherStatsOctets",
"The lower 32 bits of the total number of octets received. This "
"count includes both good and errored frames." },
{ "etherStatsPkts",
"The total number of good and errored frames received." },
{ "etherStatsUndersizePkts",
"The number of frames received with length less than 64 bytes. "
"This count does not include errored frames." },
{ "etherStatsOversizePkts",
"The number of frames received that are longer than the value "
"configured in the frm_length register. This count does not "
"include errored frames." },
{ "etherStatsPkts64Octets",
"The number of 64-byte frames received. This count includes good "
"and errored frames." },
{ "etherStatsPkts65to127Octets",
"The number of received good and errored frames between the length "
"of 65 and 127 bytes." },
{ "etherStatsPkts128to255Octets",
"The number of received good and errored frames between the length "
"of 128 and 255 bytes." },
{ "etherStatsPkts256to511Octets",
"The number of received good and errored frames between the length "
"of 256 and 511 bytes." },
{ "etherStatsPkts512to1023Octets",
"The number of received good and errored frames between the length "
"of 512 and 1023 bytes." },
{ "etherStatsPkts1024to1518Octets",
"The number of received good and errored frames between the length "
"of 1024 and 1518 bytes." },
{ "etherStatsPkts1519toXOctets",
"The number of received good and errored frames between the length "
"of 1519 and the maximum frame length configured in the frm_length "
"register." },
{ "etherStatsJabbers",
"Too long frames with CRC error." },
{ "etherStatsFragments",
"Too short frames with CRC error." },
/* 0x39 unused, 0x3a/b non-stats. */
[0x3c] =
/* Extended Statistics Counters */
{ "msb_aOctetsTransmittedOK",
"Upper 32 bits of the number of data and padding octets that are "
"successfully transmitted." },
{ "msb_aOctetsReceivedOK",
"Upper 32 bits of the number of data and padding octets that are "
"successfully received." },
{ "msb_etherStatsOctets",
"Upper 32 bits of the total number of octets received. This count "
"includes both good and errored frames." }
};
static int
sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
{
struct atse_softc *sc;
int error, offset, s;
sc = arg1;
offset = arg2;
s = CSR_READ_4(sc, offset);
error = sysctl_handle_int(oidp, &s, 0, req);
if (error || !req->newptr) {
return (error);
}
return (0);
}
static struct atse_rx_err_stats_regs {
const char *name;
const char *descr;
} atse_rx_err_stats_regs[] = {
#define ATSE_RX_ERR_FIFO_THRES_EOP 0 /* FIFO threshold reached, on EOP. */
#define ATSE_RX_ERR_ELEN 1 /* Frame/payload length not valid. */
#define ATSE_RX_ERR_CRC32 2 /* CRC-32 error. */
#define ATSE_RX_ERR_FIFO_THRES_TRUNC 3 /* FIFO thresh., truncated frame. */
#define ATSE_RX_ERR_4 4 /* ? */
#define ATSE_RX_ERR_5 5 /* / */
{ "rx_err_fifo_thres_eop",
"FIFO threshold reached, reported on EOP." },
{ "rx_err_fifo_elen",
"Frame or payload length not valid." },
{ "rx_err_fifo_crc32",
"CRC-32 error." },
{ "rx_err_fifo_thres_trunc",
"FIFO threshold reached, truncated frame" },
{ "rx_err_4",
"?" },
{ "rx_err_5",
"?" },
};
static int
sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
{
struct atse_softc *sc;
int error, offset, s;
sc = arg1;
offset = arg2;
s = sc->atse_rx_err[offset];
error = sysctl_handle_int(oidp, &s, 0, req);
if (error || !req->newptr) {
return (error);
}
return (0);
}
static void
atse_sysctl_stats_attach(device_t dev)
{
struct sysctl_ctx_list *sctx;
struct sysctl_oid *soid;
struct atse_softc *sc;
int i;
sc = device_get_softc(dev);
sctx = device_get_sysctl_ctx(dev);
soid = device_get_sysctl_tree(dev);
/* MAC statistics. */
for (i = 0; i < nitems(atse_mac_stats_regs); i++) {
if (atse_mac_stats_regs[i].name == NULL ||
atse_mac_stats_regs[i].descr == NULL) {
continue;
}
SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
atse_mac_stats_regs[i].name,
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
sc, i, sysctl_atse_mac_stats_proc, "IU",
atse_mac_stats_regs[i].descr);
}
/* rx_err[]. */
for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
if (atse_rx_err_stats_regs[i].name == NULL ||
atse_rx_err_stats_regs[i].descr == NULL) {
continue;
}
SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
atse_rx_err_stats_regs[i].name,
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
sc, i, sysctl_atse_rx_err_stats_proc, "IU",
atse_rx_err_stats_regs[i].descr);
}
}
/*
* Generic device handling routines.
*/
int
atse_attach(device_t dev)
{
struct atse_softc *sc;
if_t ifp;
uint32_t caps;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
/* Get xDMA controller */
sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
if (sc->xdma_tx == NULL) {
device_printf(dev, "Can't find DMA controller.\n");
return (ENXIO);
}
/*
* Only final (EOP) write can be less than "symbols per beat" value
* so we have to defrag mbuf chain.
* Chapter 15. On-Chip FIFO Memory Core.
* Embedded Peripherals IP User Guide.
*/
caps = XCHAN_CAP_NOSEG;
/* Alloc xDMA virtual channel. */
sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
if (sc->xchan_tx == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
error = xdma_setup_intr(sc->xchan_tx, 0,
atse_xdma_tx_intr, sc, &sc->ih_tx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA interrupt handler.\n");
return (ENXIO);
}
xdma_prep_sg(sc->xchan_tx,
TX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
8, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
/* Get RX xDMA controller */
sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
if (sc->xdma_rx == NULL) {
device_printf(dev, "Can't find DMA controller.\n");
return (ENXIO);
}
/* Alloc xDMA virtual channel. */
sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
if (sc->xchan_rx == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
atse_xdma_rx_intr, sc, &sc->ih_rx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA interrupt handler.\n");
return (ENXIO);
}
xdma_prep_sg(sc->xchan_rx,
RX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
1, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
mtx_init(&sc->br_mtx, "buf ring mtx", NULL, MTX_DEF);
sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &sc->br_mtx);
if (sc->br == NULL) {
return (ENOMEM);
}
atse_ethernet_option_bits_read(dev);
mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
/*
* We are only doing single-PHY with this driver currently. The
* defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
* 1st PHY address (0) apart from the fact that BMCR0 is always
* the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
*/
#if 0 /* Always PCS. */
sc->atse_bmcr0 = MDIO_0_START;
CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
#endif
/* Always use matching PHY for atse[0..]. */
sc->atse_phy_addr = device_get_unit(dev);
sc->atse_bmcr1 = MDIO_1_START;
CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
/* Reset the adapter. */
atse_reset(sc);
/* Setup interface. */
ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "if_alloc() failed\n");
- error = ENOSPC;
- goto err;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, atse_ioctl);
if_settransmitfn(ifp, atse_transmit);
if_setqflushfn(ifp, atse_qflush);
if_setinitfn(ifp, atse_init);
if_setsendqlen(ifp, ATSE_TX_LIST_CNT - 1);
if_setsendqready(ifp);
/* MII setup. */
error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHY failed: %d\n", error);
goto err;
}
/* Call media-indepedent attach routine. */
ether_ifattach(ifp, sc->atse_eth_addr);
/* Tell the upper layer(s) about vlan mtu support. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
err:
if (error != 0) {
atse_detach(dev);
}
if (error == 0) {
atse_sysctl_stats_attach(dev);
}
atse_rx_enqueue(sc, NUM_RX_MBUF);
xdma_queue_submit(sc->xchan_rx);
return (error);
}
static int
atse_detach(device_t dev)
{
struct atse_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
device_get_nameunit(dev)));
ifp = sc->atse_ifp;
/* Only cleanup if attach succeeded. */
if (device_is_attached(dev)) {
ATSE_LOCK(sc);
atse_stop_locked(sc);
ATSE_UNLOCK(sc);
callout_drain(&sc->atse_tick);
ether_ifdetach(ifp);
}
if (sc->atse_miibus != NULL) {
device_delete_child(dev, sc->atse_miibus);
}
if (ifp != NULL) {
if_free(ifp);
}
mtx_destroy(&sc->atse_mtx);
xdma_channel_free(sc->xchan_tx);
xdma_channel_free(sc->xchan_rx);
xdma_put(sc->xdma_tx);
xdma_put(sc->xdma_rx);
return (0);
}
/* Shared between nexus and fdt implementation. */
void
atse_detach_resources(device_t dev)
{
struct atse_softc *sc;
sc = device_get_softc(dev);
if (sc->atse_mem_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
sc->atse_mem_res);
sc->atse_mem_res = NULL;
}
}
int
atse_detach_dev(device_t dev)
{
int error;
error = atse_detach(dev);
if (error) {
/* We are basically in undefined state now. */
device_printf(dev, "atse_detach() failed: %d\n", error);
return (error);
}
atse_detach_resources(dev);
return (0);
}
int
atse_miibus_readreg(device_t dev, int phy, int reg)
{
struct atse_softc *sc;
int val;
sc = device_get_softc(dev);
/*
* We currently do not support re-mapping of MDIO space on-the-fly
* but de-facto hard-code the phy#.
*/
if (phy != sc->atse_phy_addr) {
return (0);
}
val = PHY_READ_2(sc, reg);
return (val);
}
int
atse_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct atse_softc *sc;
sc = device_get_softc(dev);
/*
* We currently do not support re-mapping of MDIO space on-the-fly
* but de-facto hard-code the phy#.
*/
if (phy != sc->atse_phy_addr) {
return (0);
}
PHY_WRITE_2(sc, reg, data);
return (0);
}
void
atse_miibus_statchg(device_t dev)
{
struct atse_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t val4;
sc = device_get_softc(dev);
ATSE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->atse_miibus);
ifp = sc->atse_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
return;
}
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
/* Assume no link. */
sc->atse_flags &= ~ATSE_FLAGS_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
sc->atse_flags |= ATSE_FLAGS_LINK;
break;
case IFM_100_TX:
val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
sc->atse_flags |= ATSE_FLAGS_LINK;
break;
case IFM_1000_T:
val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
sc->atse_flags |= ATSE_FLAGS_LINK;
break;
default:
break;
}
}
if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
/* Need to stop the MAC? */
return;
}
if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
} else {
val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
}
/* flow control? */
/* Make sure the MAC is activated. */
val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
}
MODULE_DEPEND(atse, ether, 1, 1, 1);
MODULE_DEPEND(atse, miibus, 1, 1, 1);
diff --git a/sys/dev/axgbe/if_axgbe.c b/sys/dev/axgbe/if_axgbe.c
index 65a546f8dae2..b35beca9c886 100644
--- a/sys/dev/axgbe/if_axgbe.c
+++ b/sys/dev/axgbe/if_axgbe.c
@@ -1,615 +1,610 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2016,2017 SoftIron Inc.
* Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* This software was developed by Andrew Turner under
* the sponsorship of SoftIron Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <machine/bus.h>
#include "miibus_if.h"
#include "xgbe.h"
#include "xgbe-common.h"
static device_probe_t axgbe_probe;
static device_attach_t axgbe_attach;
struct axgbe_softc {
/* Must be first */
struct xgbe_prv_data prv;
uint8_t mac_addr[ETHER_ADDR_LEN];
struct ifmedia media;
};
static struct ofw_compat_data compat_data[] = {
{ "amd,xgbe-seattle-v1a", true },
{ NULL, false }
};
static struct resource_spec old_phy_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Rx/Tx regs */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* Interrupt */
{ -1, 0 }
};
static struct resource_spec old_mac_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
/* Per-channel interrupts */
{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
{ -1, 0 }
};
static struct resource_spec mac_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
{ SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Rx/Tx regs */
{ SYS_RES_MEMORY, 3, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_MEMORY, 4, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
/* Per-channel and auto-negotiation interrupts */
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 5, RF_ACTIVE | RF_OPTIONAL },
{ -1, 0 }
};
static struct xgbe_version_data xgbe_v1 = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
.xpcs_access = XGBE_XPCS_ACCESS_V1,
.tx_max_fifo_size = 81920,
.rx_max_fifo_size = 81920,
.tx_tstamp_workaround = 1,
};
MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data");
static void
axgbe_init(void *p)
{
struct axgbe_softc *sc;
if_t ifp;
sc = p;
ifp = sc->prv.netdev;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
}
static int
axgbe_ioctl(if_t ifp, unsigned long command, caddr_t data)
{
struct axgbe_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int error = 0;
switch(command) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
error = EINVAL;
/* TODO - change it to iflib way */
break;
case SIOCSIFFLAGS:
error = 0;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
axgbe_qflush(if_t ifp)
{
if_qflush(ifp);
}
static int
axgbe_media_change(if_t ifp)
{
struct axgbe_softc *sc;
int cur_media;
sc = if_getsoftc(ifp);
sx_xlock(&sc->prv.an_mutex);
cur_media = sc->media.ifm_cur->ifm_media;
switch (IFM_SUBTYPE(cur_media)) {
case IFM_10G_KR:
sc->prv.phy.speed = SPEED_10000;
sc->prv.phy.autoneg = AUTONEG_DISABLE;
break;
case IFM_2500_KX:
sc->prv.phy.speed = SPEED_2500;
sc->prv.phy.autoneg = AUTONEG_DISABLE;
break;
case IFM_1000_KX:
sc->prv.phy.speed = SPEED_1000;
sc->prv.phy.autoneg = AUTONEG_DISABLE;
break;
case IFM_AUTO:
sc->prv.phy.autoneg = AUTONEG_ENABLE;
break;
}
sx_xunlock(&sc->prv.an_mutex);
return (-sc->prv.phy_if.phy_config_aneg(&sc->prv));
}
static void
axgbe_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct axgbe_softc *sc;
sc = if_getsoftc(ifp);
ifmr->ifm_status = IFM_AVALID;
if (!sc->prv.phy.link)
return;
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active = IFM_ETHER;
if (sc->prv.phy.duplex == DUPLEX_FULL)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
switch (sc->prv.phy.speed) {
case SPEED_10000:
ifmr->ifm_active |= IFM_10G_KR;
break;
case SPEED_2500:
ifmr->ifm_active |= IFM_2500_KX;
break;
case SPEED_1000:
ifmr->ifm_active |= IFM_1000_KX;
break;
}
}
static uint64_t
axgbe_get_counter(if_t ifp, ift_counter c)
{
struct xgbe_prv_data *pdata = if_getsoftc(ifp);
struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
DBGPR("-->%s\n", __func__);
pdata->hw_if.read_mmc_stats(pdata);
switch(c) {
case IFCOUNTER_IPACKETS:
return (pstats->rxframecount_gb);
case IFCOUNTER_IERRORS:
return (pstats->rxframecount_gb -
pstats->rxbroadcastframes_g -
pstats->rxmulticastframes_g -
pstats->rxunicastframes_g);
case IFCOUNTER_OPACKETS:
return (pstats->txframecount_gb);
case IFCOUNTER_OERRORS:
return (pstats->txframecount_gb - pstats->txframecount_g);
case IFCOUNTER_IBYTES:
return (pstats->rxoctetcount_gb);
case IFCOUNTER_OBYTES:
return (pstats->txoctetcount_gb);
default:
return (if_get_counter_default(ifp, c));
}
}
static int
axgbe_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
return (ENXIO);
device_set_desc(dev, "AMD 10 Gigabit Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
axgbe_get_optional_prop(device_t dev, phandle_t node, const char *name,
int *data, size_t len)
{
if (!OF_hasprop(node, name))
return (-1);
if (OF_getencprop(node, name, data, len) <= 0) {
device_printf(dev,"%s property is invalid\n", name);
return (ENXIO);
}
return (0);
}
static int
axgbe_attach(device_t dev)
{
struct axgbe_softc *sc;
if_t ifp;
pcell_t phy_handle;
device_t phydev;
phandle_t node, phy_node;
struct resource *mac_res[11];
struct resource *phy_res[4];
ssize_t len;
int error, i, j;
sc = device_get_softc(dev);
sc->prv.vdata = &xgbe_v1;
node = ofw_bus_get_node(dev);
if (OF_getencprop(node, "phy-handle", &phy_handle,
sizeof(phy_handle)) <= 0) {
phy_node = node;
if (bus_alloc_resources(dev, mac_spec, mac_res)) {
device_printf(dev,
"could not allocate phy resources\n");
return (ENXIO);
}
sc->prv.xgmac_res = mac_res[0];
sc->prv.xpcs_res = mac_res[1];
sc->prv.rxtx_res = mac_res[2];
sc->prv.sir0_res = mac_res[3];
sc->prv.sir1_res = mac_res[4];
sc->prv.dev_irq_res = mac_res[5];
sc->prv.per_channel_irq = OF_hasprop(node,
XGBE_DMA_IRQS_PROPERTY);
for (i = 0, j = 6; j < nitems(mac_res) - 1 &&
mac_res[j + 1] != NULL; i++, j++) {
if (sc->prv.per_channel_irq) {
sc->prv.chan_irq_res[i] = mac_res[j];
}
}
/* The last entry is the auto-negotiation interrupt */
sc->prv.an_irq_res = mac_res[j];
} else {
phydev = OF_device_from_xref(phy_handle);
phy_node = ofw_bus_get_node(phydev);
if (bus_alloc_resources(phydev, old_phy_spec, phy_res)) {
device_printf(dev,
"could not allocate phy resources\n");
return (ENXIO);
}
if (bus_alloc_resources(dev, old_mac_spec, mac_res)) {
device_printf(dev,
"could not allocate mac resources\n");
return (ENXIO);
}
sc->prv.rxtx_res = phy_res[0];
sc->prv.sir0_res = phy_res[1];
sc->prv.sir1_res = phy_res[2];
sc->prv.an_irq_res = phy_res[3];
sc->prv.xgmac_res = mac_res[0];
sc->prv.xpcs_res = mac_res[1];
sc->prv.dev_irq_res = mac_res[2];
sc->prv.per_channel_irq = OF_hasprop(node,
XGBE_DMA_IRQS_PROPERTY);
if (sc->prv.per_channel_irq) {
for (i = 0, j = 3; i < nitems(sc->prv.chan_irq_res) &&
mac_res[j] != NULL; i++, j++) {
sc->prv.chan_irq_res[i] = mac_res[j];
}
}
}
if ((len = OF_getproplen(node, "mac-address")) < 0) {
device_printf(dev, "No mac-address property\n");
return (EINVAL);
}
if (len != ETHER_ADDR_LEN)
return (EINVAL);
OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN);
sc->prv.netdev = ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Cannot alloc ifnet\n");
- return (ENXIO);
- }
-
sc->prv.dev = dev;
sc->prv.dmat = bus_get_dma_tag(dev);
sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
ADVERTISED_1000baseKX_Full;
/*
* Read the needed properties from the phy node.
*/
/* This is documented as optional, but Linux requires it */
if (OF_getencprop(phy_node, XGBE_SPEEDSET_PROPERTY, &sc->prv.speed_set,
sizeof(sc->prv.speed_set)) <= 0) {
device_printf(dev, "%s property is missing\n",
XGBE_SPEEDSET_PROPERTY);
return (EINVAL);
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_BLWC_PROPERTY,
sc->prv.serdes_blwc, sizeof(sc->prv.serdes_blwc));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_blwc[0] = XGBE_SPEED_1000_BLWC;
sc->prv.serdes_blwc[1] = XGBE_SPEED_2500_BLWC;
sc->prv.serdes_blwc[2] = XGBE_SPEED_10000_BLWC;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_CDR_RATE_PROPERTY,
sc->prv.serdes_cdr_rate, sizeof(sc->prv.serdes_cdr_rate));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_cdr_rate[0] = XGBE_SPEED_1000_CDR;
sc->prv.serdes_cdr_rate[1] = XGBE_SPEED_2500_CDR;
sc->prv.serdes_cdr_rate[2] = XGBE_SPEED_10000_CDR;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_PQ_SKEW_PROPERTY,
sc->prv.serdes_pq_skew, sizeof(sc->prv.serdes_pq_skew));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_pq_skew[0] = XGBE_SPEED_1000_PQ;
sc->prv.serdes_pq_skew[1] = XGBE_SPEED_2500_PQ;
sc->prv.serdes_pq_skew[2] = XGBE_SPEED_10000_PQ;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_TX_AMP_PROPERTY,
sc->prv.serdes_tx_amp, sizeof(sc->prv.serdes_tx_amp));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_tx_amp[0] = XGBE_SPEED_1000_TXAMP;
sc->prv.serdes_tx_amp[1] = XGBE_SPEED_2500_TXAMP;
sc->prv.serdes_tx_amp[2] = XGBE_SPEED_10000_TXAMP;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_CFG_PROPERTY,
sc->prv.serdes_dfe_tap_cfg, sizeof(sc->prv.serdes_dfe_tap_cfg));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_dfe_tap_cfg[0] = XGBE_SPEED_1000_DFE_TAP_CONFIG;
sc->prv.serdes_dfe_tap_cfg[1] = XGBE_SPEED_2500_DFE_TAP_CONFIG;
sc->prv.serdes_dfe_tap_cfg[2] = XGBE_SPEED_10000_DFE_TAP_CONFIG;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_ENA_PROPERTY,
sc->prv.serdes_dfe_tap_ena, sizeof(sc->prv.serdes_dfe_tap_ena));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_dfe_tap_ena[0] = XGBE_SPEED_1000_DFE_TAP_ENABLE;
sc->prv.serdes_dfe_tap_ena[1] = XGBE_SPEED_2500_DFE_TAP_ENABLE;
sc->prv.serdes_dfe_tap_ena[2] = XGBE_SPEED_10000_DFE_TAP_ENABLE;
}
/* Check if the NIC is DMA coherent */
sc->prv.coherent = OF_hasprop(node, "dma-coherent");
if (sc->prv.coherent) {
sc->prv.arcr = XGBE_DMA_OS_ARCR;
sc->prv.awcr = XGBE_DMA_OS_AWCR;
} else {
sc->prv.arcr = XGBE_DMA_SYS_ARCR;
sc->prv.awcr = XGBE_DMA_SYS_AWCR;
}
/* Create the lock & workqueues */
spin_lock_init(&sc->prv.xpcs_lock);
sc->prv.dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
taskqueue_thread_enqueue, &sc->prv.dev_workqueue);
taskqueue_start_threads(&sc->prv.dev_workqueue, 1, PI_NET,
"axgbe taskq");
/* Set the needed pointers */
xgbe_init_function_ptrs_phy(&sc->prv.phy_if);
xgbe_init_function_ptrs_dev(&sc->prv.hw_if);
xgbe_init_function_ptrs_desc(&sc->prv.desc_if);
sc->prv.vdata->init_function_ptrs_phy_impl(&sc->prv.phy_if);
/* Reset the hardware */
sc->prv.hw_if.exit(&sc->prv);
/* Read the hardware features */
xgbe_get_all_hw_features(&sc->prv);
/* Set default values */
sc->prv.tx_desc_count = XGBE_TX_DESC_CNT;
sc->prv.tx_sf_mode = MTL_TSF_ENABLE;
sc->prv.tx_threshold = MTL_TX_THRESHOLD_64;
sc->prv.tx_osp_mode = DMA_OSP_ENABLE;
sc->prv.rx_desc_count = XGBE_RX_DESC_CNT;
sc->prv.rx_sf_mode = MTL_RSF_DISABLE;
sc->prv.rx_threshold = MTL_RX_THRESHOLD_64;
sc->prv.pbl = DMA_PBL_128;
sc->prv.pause_autoneg = 1;
sc->prv.tx_pause = 1;
sc->prv.rx_pause = 1;
sc->prv.phy_speed = SPEED_UNKNOWN;
sc->prv.power_down = 0;
/* TODO: Limit to min(ncpus, hw rings) */
sc->prv.tx_ring_count = 1;
sc->prv.tx_q_count = 1;
sc->prv.rx_ring_count = 1;
sc->prv.rx_q_count = sc->prv.hw_feat.rx_q_cnt;
/* Init the PHY */
sc->prv.phy_if.phy_init(&sc->prv);
/* Set the coalescing */
xgbe_init_rx_coalesce(&sc->prv);
xgbe_init_tx_coalesce(&sc->prv);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setinitfn(ifp, axgbe_init);
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, axgbe_ioctl);
/* TODO - change it to iflib way */
if_setqflushfn(ifp, axgbe_qflush);
if_setgetcounterfn(ifp, axgbe_get_counter);
/* TODO: Support HW offload */
if_setcapabilities(ifp, 0);
if_setcapenable(ifp, 0);
if_sethwassist(ifp, 0);
ether_ifattach(ifp, sc->mac_addr);
ifmedia_init(&sc->media, IFM_IMASK, axgbe_media_change,
axgbe_media_status);
#ifdef notyet
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
#endif
ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
set_bit(XGBE_DOWN, &sc->prv.dev_state);
/* TODO - change it to iflib way */
return (0);
}
static device_method_t axgbe_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axgbe_probe),
DEVMETHOD(device_attach, axgbe_attach),
{ 0, 0 }
};
DEFINE_CLASS_0(axgbe, axgbe_driver, axgbe_methods,
sizeof(struct axgbe_softc));
DRIVER_MODULE(axa, simplebus, axgbe_driver, 0, 0);
static struct ofw_compat_data phy_compat_data[] = {
{ "amd,xgbe-phy-seattle-v1a", true },
{ NULL, false }
};
static int
axgbephy_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_search_compatible(dev, phy_compat_data)->ocd_data)
return (ENXIO);
device_set_desc(dev, "AMD 10 Gigabit Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
axgbephy_attach(device_t dev)
{
phandle_t node;
node = ofw_bus_get_node(dev);
OF_device_register_xref(OF_xref_from_node(node), dev);
return (0);
}
static device_method_t axgbephy_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axgbephy_probe),
DEVMETHOD(device_attach, axgbephy_attach),
{ 0, 0 }
};
DEFINE_CLASS_0(axgbephy, axgbephy_driver, axgbephy_methods, 0);
EARLY_DRIVER_MODULE(axgbephy, simplebus, axgbephy_driver,
0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index 2564d0c1dac6..226fca16ac28 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -1,11399 +1,11393 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2006-2014 QLogic Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* The following controllers are supported by this driver:
* BCM5706C A2, A3
* BCM5706S A2, A3
* BCM5708C B1, B2
* BCM5708S B1, B2
* BCM5709C A1, C0
* BCM5709S A1, C0
* BCM5716C C0
* BCM5716S C0
*
* The following controllers are not supported by this driver:
* BCM5706C A0, A1 (pre-production)
* BCM5706S A0, A1 (pre-production)
* BCM5708C A0, B0 (pre-production)
* BCM5708S A0, B0 (pre-production)
* BCM5709C A0 B0, B1, B2 (pre-production)
* BCM5709S A0, B0, B1, B2 (pre-production)
*/
#include "opt_bce.h"
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/queue.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include "miidevs.h"
#include <dev/mii/brgphyreg.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "miibus_if.h"
#include <dev/bce/if_bcereg.h>
#include <dev/bce/if_bcefw.h>
/****************************************************************************/
/* BCE Debug Options */
/****************************************************************************/
#ifdef BCE_DEBUG
u32 bce_debug = BCE_WARN;
/* 0 = Never */
/* 1 = 1 in 2,147,483,648 */
/* 256 = 1 in 8,388,608 */
/* 2048 = 1 in 1,048,576 */
/* 65536 = 1 in 32,768 */
/* 1048576 = 1 in 2,048 */
/* 268435456 = 1 in 8 */
/* 536870912 = 1 in 4 */
/* 1073741824 = 1 in 2 */
/* Controls how often the l2_fhdr frame error check will fail. */
int l2fhdr_error_sim_control = 0;
/* Controls how often the unexpected attention check will fail. */
int unexpected_attention_sim_control = 0;
/* Controls how often to simulate an mbuf allocation failure. */
int mbuf_alloc_failed_sim_control = 0;
/* Controls how often to simulate a DMA mapping failure. */
int dma_map_addr_failed_sim_control = 0;
/* Controls how often to simulate a bootcode failure. */
int bootcode_running_failure_sim_control = 0;
#endif
/****************************************************************************/
/* PCI Device ID Table */
/* */
/* Used by bce_probe() to identify the devices supported by this driver. */
/****************************************************************************/
#define BCE_DEVDESC_MAX 64
static const struct bce_type bce_devs[] = {
/* BCM5706C Controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
"HP NC370T Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
"HP NC370i Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
"HP NC380T PCIe DP Multifunc Gig Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
"HP NC371i Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM5706 1000Base-T" },
/* BCM5706S controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
"HP NC370F Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM5706 1000Base-SX" },
/* BCM5708C controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
"HP NC373T PCIe Multifunction Gig Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
"HP NC373i Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
"HP NC374m PCIe Multifunction Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM5708 1000Base-T" },
/* BCM5708S controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
"HP NC373m Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
"HP NC373i Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
"HP NC373F PCIe Multifunc Giga Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM5708 1000Base-SX" },
/* BCM5709C controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
"HP NC382i DP Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM5709 1000Base-T" },
/* BCM5709S controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
"HP NC382m DP 1GbE Multifunction BL-c Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
"HP NC382i DP Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM5709 1000Base-SX" },
/* BCM5716 controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM5716 1000Base-T" },
{ 0, 0, 0, 0, NULL }
};
/****************************************************************************/
/* Supported Flash NVRAM device data. */
/****************************************************************************/
static const struct flash_spec flash_table[] =
{
#define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
#define NONBUFFERED_FLAGS (BCE_NV_WREN)
/* Slow EEPROM */
{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - slow"},
/* Expansion entry 0001 */
{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0001"},
/* Saifun SA25F010 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
"Non-buffered flash (128kB)"},
/* Saifun SA25F020 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
"Non-buffered flash (256kB)"},
/* Expansion entry 0100 */
{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0100"},
/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
"Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
"Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
"Non-buffered flash (64kB)"},
/* Fast EEPROM */
{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - fast"},
/* Expansion entry 1001 */
{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1001"},
/* Expansion entry 1010 */
{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1010"},
/* ATMEL AT45DB011B (buffered flash) */
{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
"Buffered flash (128kB)"},
/* Expansion entry 1100 */
{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1100"},
/* Expansion entry 1101 */
{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1101"},
/* Ateml Expansion entry 1110 */
{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1110 (Atmel)"},
/* ATMEL AT45DB021B (buffered flash) */
{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
"Buffered flash (256kB)"},
};
/*
* The BCM5709 controllers transparently handle the
* differences between Atmel 264 byte pages and all
* flash devices which use 256 byte pages, so no
* logical-to-physical mapping is required in the
* driver.
*/
static const struct flash_spec flash_5709 = {
.flags = BCE_NV_BUFFERED,
.page_bits = BCM5709_FLASH_PAGE_BITS,
.page_size = BCM5709_FLASH_PAGE_SIZE,
.addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
.total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
.name = "5709/5716 buffered flash (256kB)",
};
/****************************************************************************/
/* FreeBSD device entry points. */
/****************************************************************************/
static int bce_probe (device_t);
static int bce_attach (device_t);
static int bce_detach (device_t);
static int bce_shutdown (device_t);
/****************************************************************************/
/* BCE Debug Data Structure Dump Routines */
/****************************************************************************/
#ifdef BCE_DEBUG
static u32 bce_reg_rd (struct bce_softc *, u32);
static void bce_reg_wr (struct bce_softc *, u32, u32);
static void bce_reg_wr16 (struct bce_softc *, u32, u16);
static u32 bce_ctx_rd (struct bce_softc *, u32, u32);
static void bce_dump_enet (struct bce_softc *, struct mbuf *);
static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int);
static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int);
static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int);
static void bce_dump_txbd (struct bce_softc *,
int, struct tx_bd *);
static void bce_dump_rxbd (struct bce_softc *,
int, struct rx_bd *);
static void bce_dump_pgbd (struct bce_softc *,
int, struct rx_bd *);
static void bce_dump_l2fhdr (struct bce_softc *,
int, struct l2_fhdr *);
static void bce_dump_ctx (struct bce_softc *, u16);
static void bce_dump_ftqs (struct bce_softc *);
static void bce_dump_tx_chain (struct bce_softc *, u16, int);
static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int);
static void bce_dump_pg_chain (struct bce_softc *, u16, int);
static void bce_dump_status_block (struct bce_softc *);
static void bce_dump_stats_block (struct bce_softc *);
static void bce_dump_driver_state (struct bce_softc *);
static void bce_dump_hw_state (struct bce_softc *);
static void bce_dump_shmem_state (struct bce_softc *);
static void bce_dump_mq_regs (struct bce_softc *);
static void bce_dump_bc_state (struct bce_softc *);
static void bce_dump_txp_state (struct bce_softc *, int);
static void bce_dump_rxp_state (struct bce_softc *, int);
static void bce_dump_tpat_state (struct bce_softc *, int);
static void bce_dump_cp_state (struct bce_softc *, int);
static void bce_dump_com_state (struct bce_softc *, int);
static void bce_dump_rv2p_state (struct bce_softc *);
static void bce_breakpoint (struct bce_softc *);
#endif /*BCE_DEBUG */
/****************************************************************************/
/* BCE Register/Memory Access Routines */
/****************************************************************************/
static u32 bce_reg_rd_ind (struct bce_softc *, u32);
static void bce_reg_wr_ind (struct bce_softc *, u32, u32);
static void bce_shmem_wr (struct bce_softc *, u32, u32);
static u32 bce_shmem_rd (struct bce_softc *, u32);
static void bce_ctx_wr (struct bce_softc *, u32, u32, u32);
static int bce_miibus_read_reg (device_t, int, int);
static int bce_miibus_write_reg (device_t, int, int, int);
static void bce_miibus_statchg (device_t);
#ifdef BCE_DEBUG
static int bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS);
#ifdef BCE_NVRAM_WRITE_SUPPORT
static int bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS);
#endif
#endif
/****************************************************************************/
/* BCE NVRAM Access Routines */
/****************************************************************************/
static int bce_acquire_nvram_lock (struct bce_softc *);
static int bce_release_nvram_lock (struct bce_softc *);
static void bce_enable_nvram_access(struct bce_softc *);
static void bce_disable_nvram_access(struct bce_softc *);
static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
static int bce_init_nvram (struct bce_softc *);
static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
static int bce_nvram_test (struct bce_softc *);
#ifdef BCE_NVRAM_WRITE_SUPPORT
static int bce_enable_nvram_write (struct bce_softc *);
static void bce_disable_nvram_write(struct bce_softc *);
static int bce_nvram_erase_page (struct bce_softc *, u32);
static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
#endif
/****************************************************************************/
/* */
/****************************************************************************/
static void bce_get_rx_buffer_sizes(struct bce_softc *, int);
static void bce_get_media (struct bce_softc *);
static void bce_init_media (struct bce_softc *);
static u32 bce_get_rphy_link (struct bce_softc *);
static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int);
static int bce_dma_alloc (device_t);
static void bce_dma_free (struct bce_softc *);
static void bce_release_resources (struct bce_softc *);
/****************************************************************************/
/* BCE Firmware Synchronization and Load */
/****************************************************************************/
static void bce_fw_cap_init (struct bce_softc *);
static int bce_fw_sync (struct bce_softc *, u32);
static void bce_load_rv2p_fw (struct bce_softc *, const u32 *, u32,
u32);
static void bce_load_cpu_fw (struct bce_softc *,
struct cpu_reg *, struct fw_info *);
static void bce_start_cpu (struct bce_softc *, struct cpu_reg *);
static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *);
static void bce_start_rxp_cpu (struct bce_softc *);
static void bce_init_rxp_cpu (struct bce_softc *);
static void bce_init_txp_cpu (struct bce_softc *);
static void bce_init_tpat_cpu (struct bce_softc *);
static void bce_init_cp_cpu (struct bce_softc *);
static void bce_init_com_cpu (struct bce_softc *);
static void bce_init_cpus (struct bce_softc *);
static void bce_print_adapter_info (struct bce_softc *);
static void bce_probe_pci_caps (device_t, struct bce_softc *);
static void bce_stop (struct bce_softc *);
static int bce_reset (struct bce_softc *, u32);
static int bce_chipinit (struct bce_softc *);
static int bce_blockinit (struct bce_softc *);
static int bce_init_tx_chain (struct bce_softc *);
static void bce_free_tx_chain (struct bce_softc *);
static int bce_get_rx_buf (struct bce_softc *, u16, u16, u32 *);
static int bce_init_rx_chain (struct bce_softc *);
static void bce_fill_rx_chain (struct bce_softc *);
static void bce_free_rx_chain (struct bce_softc *);
static int bce_get_pg_buf (struct bce_softc *, u16, u16);
static int bce_init_pg_chain (struct bce_softc *);
static void bce_fill_pg_chain (struct bce_softc *);
static void bce_free_pg_chain (struct bce_softc *);
static struct mbuf *bce_tso_setup (struct bce_softc *,
struct mbuf **, u16 *);
static int bce_tx_encap (struct bce_softc *, struct mbuf **);
static void bce_start_locked (if_t);
static void bce_start (if_t);
static int bce_ioctl (if_t, u_long, caddr_t);
static uint64_t bce_get_counter (if_t, ift_counter);
static void bce_watchdog (struct bce_softc *);
static int bce_ifmedia_upd (if_t);
static int bce_ifmedia_upd_locked (if_t);
static void bce_ifmedia_sts (if_t, struct ifmediareq *);
static void bce_ifmedia_sts_rphy (struct bce_softc *, struct ifmediareq *);
static void bce_init_locked (struct bce_softc *);
static void bce_init (void *);
static void bce_mgmt_init_locked (struct bce_softc *sc);
static int bce_init_ctx (struct bce_softc *);
static void bce_get_mac_addr (struct bce_softc *);
static void bce_set_mac_addr (struct bce_softc *);
static void bce_phy_intr (struct bce_softc *);
static inline u16 bce_get_hw_rx_cons (struct bce_softc *);
static void bce_rx_intr (struct bce_softc *);
static void bce_tx_intr (struct bce_softc *);
static void bce_disable_intr (struct bce_softc *);
static void bce_enable_intr (struct bce_softc *, int);
static void bce_intr (void *);
static void bce_set_rx_mode (struct bce_softc *);
static void bce_stats_update (struct bce_softc *);
static void bce_tick (void *);
static void bce_pulse (void *);
static void bce_add_sysctls (struct bce_softc *);
/****************************************************************************/
/* FreeBSD device dispatch table. */
/****************************************************************************/
static device_method_t bce_methods[] = {
/* Device interface (device_if.h) */
DEVMETHOD(device_probe, bce_probe),
DEVMETHOD(device_attach, bce_attach),
DEVMETHOD(device_detach, bce_detach),
DEVMETHOD(device_shutdown, bce_shutdown),
/* Supported by device interface but not used here. */
/* DEVMETHOD(device_identify, bce_identify), */
/* DEVMETHOD(device_suspend, bce_suspend), */
/* DEVMETHOD(device_resume, bce_resume), */
/* DEVMETHOD(device_quiesce, bce_quiesce), */
/* MII interface (miibus_if.h) */
DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
DEVMETHOD(miibus_statchg, bce_miibus_statchg),
/* Supported by MII interface but not used here. */
/* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */
/* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */
DEVMETHOD_END
};
static driver_t bce_driver = {
"bce",
bce_methods,
sizeof(struct bce_softc)
};
MODULE_DEPEND(bce, pci, 1, 1, 1);
MODULE_DEPEND(bce, ether, 1, 1, 1);
MODULE_DEPEND(bce, miibus, 1, 1, 1);
DRIVER_MODULE(bce, pci, bce_driver, NULL, NULL);
DRIVER_MODULE(miibus, bce, miibus_driver, NULL, NULL);
MODULE_PNP_INFO("U16:vendor;U16:device;U16:#;U16:#;D:#", pci, bce,
bce_devs, nitems(bce_devs) - 1);
/****************************************************************************/
/* Tunable device values */
/****************************************************************************/
static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"bce driver parameters");
/* Allowable values are TRUE or FALSE */
static int bce_verbose = TRUE;
SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0,
"Verbose output enable/disable");
/* Allowable values are TRUE or FALSE */
static int bce_tso_enable = TRUE;
SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
"TSO Enable/Disable");
/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
/* ToDo: Add MSI-X support. */
static int bce_msi_enable = 1;
SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
"MSI-X|MSI|INTx selector");
/* Allowable values are 1, 2, 4, 8. */
static int bce_rx_pages = DEFAULT_RX_PAGES;
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0,
"Receive buffer descriptor pages (1 page = 255 buffer descriptors)");
/* Allowable values are 1, 2, 4, 8. */
static int bce_tx_pages = DEFAULT_TX_PAGES;
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0,
"Transmit buffer descriptor pages (1 page = 255 buffer descriptors)");
/* Allowable values are TRUE or FALSE. */
static int bce_hdr_split = TRUE;
SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0,
"Frame header/payload splitting Enable/Disable");
/* Allowable values are TRUE or FALSE. */
static int bce_strict_rx_mtu = FALSE;
SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN,
&bce_strict_rx_mtu, 0,
"Enable/Disable strict RX frame size checking");
/* Allowable values are 0 ... 100 */
#ifdef BCE_DEBUG
/* Generate 1 interrupt for every transmit completion. */
static int bce_tx_quick_cons_trip_int = 1;
#else
/* Generate 1 interrupt for every 20 transmit completions. */
static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN,
&bce_tx_quick_cons_trip_int, 0,
"Transmit BD trip point during interrupts");
/* Allowable values are 0 ... 100 */
/* Generate 1 interrupt for every transmit completion. */
#ifdef BCE_DEBUG
static int bce_tx_quick_cons_trip = 1;
#else
/* Generate 1 interrupt for every 20 transmit completions. */
static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN,
&bce_tx_quick_cons_trip, 0,
"Transmit BD trip point");
/* Allowable values are 0 ... 100 */
#ifdef BCE_DEBUG
/* Generate an interrupt if 0us have elapsed since the last TX completion. */
static int bce_tx_ticks_int = 0;
#else
/* Generate an interrupt if 80us have elapsed since the last TX completion. */
static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN,
&bce_tx_ticks_int, 0, "Transmit ticks count during interrupt");
/* Allowable values are 0 ... 100 */
#ifdef BCE_DEBUG
/* Generate an interrupt if 0us have elapsed since the last TX completion. */
static int bce_tx_ticks = 0;
#else
/* Generate an interrupt if 80us have elapsed since the last TX completion. */
static int bce_tx_ticks = DEFAULT_TX_TICKS;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN,
&bce_tx_ticks, 0, "Transmit ticks count");
/* Allowable values are 1 ... 100 */
#ifdef BCE_DEBUG
/* Generate 1 interrupt for every received frame. */
static int bce_rx_quick_cons_trip_int = 1;
#else
/* Generate 1 interrupt for every 6 received frames. */
static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN,
&bce_rx_quick_cons_trip_int, 0,
"Receive BD trip point during interrupts");
/* Allowable values are 1 ... 100 */
#ifdef BCE_DEBUG
/* Generate 1 interrupt for every received frame. */
static int bce_rx_quick_cons_trip = 1;
#else
/* Generate 1 interrupt for every 6 received frames. */
static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN,
&bce_rx_quick_cons_trip, 0,
"Receive BD trip point");
/* Allowable values are 0 ... 100 */
#ifdef BCE_DEBUG
/* Generate an int. if 0us have elapsed since the last received frame. */
static int bce_rx_ticks_int = 0;
#else
/* Generate an int. if 18us have elapsed since the last received frame. */
static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN,
&bce_rx_ticks_int, 0, "Receive ticks count during interrupt");
/* Allowable values are 0 ... 100 */
#ifdef BCE_DEBUG
/* Generate an int. if 0us have elapsed since the last received frame. */
static int bce_rx_ticks = 0;
#else
/* Generate an int. if 18us have elapsed since the last received frame. */
static int bce_rx_ticks = DEFAULT_RX_TICKS;
#endif
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN,
&bce_rx_ticks, 0, "Receive ticks count");
/****************************************************************************/
/* Device probe function. */
/* */
/* Compares the device to the driver's list of supported devices and */
/* reports back to the OS whether this is the right driver for the device. */
/* */
/* Returns: */
/* BUS_PROBE_DEFAULT on success, positive value on failure. */
/****************************************************************************/
static int
bce_probe(device_t dev)
{
const struct bce_type *t;
struct bce_softc *sc;
u16 vid = 0, did = 0, svid = 0, sdid = 0;
t = bce_devs;
sc = device_get_softc(dev);
sc->bce_unit = device_get_unit(dev);
sc->bce_dev = dev;
/* Get the data for the device to be probed. */
vid = pci_get_vendor(dev);
did = pci_get_device(dev);
svid = pci_get_subvendor(dev);
sdid = pci_get_subdevice(dev);
DBPRINT(sc, BCE_EXTREME_LOAD,
"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
/* Look through the list of known devices for a match. */
while(t->bce_name != NULL) {
if ((vid == t->bce_vid) && (did == t->bce_did) &&
((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
device_set_descf(dev, "%s (%c%d)",
t->bce_name, (((pci_read_config(dev,
PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
(pci_read_config(dev, PCIR_REVID, 4) & 0xf));
return(BUS_PROBE_DEFAULT);
}
t++;
}
return(ENXIO);
}
/****************************************************************************/
/* PCI Capabilities Probe Function. */
/* */
/* Walks the PCI capabiites list for the device to find what features are */
/* supported. */
/* */
/* Returns: */
/* None. */
/****************************************************************************/
static void
bce_print_adapter_info(struct bce_softc *sc)
{
int i = 0;
DBENTER(BCE_VERBOSE_LOAD);
if (bce_verbose || bootverbose) {
BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >>
12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
/* Bus info. */
if (sc->bce_flags & BCE_PCIE_FLAG) {
printf("Bus (PCIe x%d, ", sc->link_width);
switch (sc->link_speed) {
case 1: printf("2.5Gbps); "); break;
case 2: printf("5Gbps); "); break;
default: printf("Unknown link speed); ");
}
} else {
printf("Bus (PCI%s, %s, %dMHz); ",
((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
((sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
"32-bit" : "64-bit"), sc->bus_speed_mhz);
}
/* Firmware version and device features. */
printf("B/C (%s); Bufs (RX:%d;TX:%d;PG:%d); Flags (",
sc->bce_bc_ver, sc->rx_pages, sc->tx_pages,
(bce_hdr_split == TRUE ? sc->pg_pages: 0));
if (bce_hdr_split == TRUE) {
printf("SPLT");
i++;
}
if (sc->bce_flags & BCE_USING_MSI_FLAG) {
if (i > 0) printf("|");
printf("MSI"); i++;
}
if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
if (i > 0) printf("|");
printf("MSI-X"); i++;
}
if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
if (i > 0) printf("|");
printf("2.5G"); i++;
}
if (sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) {
if (i > 0) printf("|");
printf("Remote PHY(%s)",
sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG ?
"FIBER" : "TP"); i++;
}
if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
if (i > 0) printf("|");
printf("MFW); MFW (%s)\n", sc->bce_mfw_ver);
} else {
printf(")\n");
}
printf("Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n",
sc->bce_rx_quick_cons_trip_int,
sc->bce_rx_quick_cons_trip,
sc->bce_rx_ticks_int,
sc->bce_rx_ticks,
sc->bce_tx_quick_cons_trip_int,
sc->bce_tx_quick_cons_trip,
sc->bce_tx_ticks_int,
sc->bce_tx_ticks);
}
DBEXIT(BCE_VERBOSE_LOAD);
}
/****************************************************************************/
/* PCI Capabilities Probe Function. */
/* */
/* Walks the PCI capabiites list for the device to find what features are */
/* supported. */
/* */
/* Returns: */
/* None. */
/****************************************************************************/
static void
bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
{
u32 reg;
DBENTER(BCE_VERBOSE_LOAD);
/* Check if PCI-X capability is enabled. */
if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0) {
if (reg != 0)
sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
}
/* Check if PCIe capability is enabled. */
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
if (reg != 0) {
u16 link_status = pci_read_config(dev, reg + 0x12, 2);
DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = "
"0x%08X\n", link_status);
sc->link_speed = link_status & 0xf;
sc->link_width = (link_status >> 4) & 0x3f;
sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
sc->bce_flags |= BCE_PCIE_FLAG;
}
}
/* Check if MSI capability is enabled. */
if (pci_find_cap(dev, PCIY_MSI, &reg) == 0) {
if (reg != 0)
sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
}
/* Check if MSI-X capability is enabled. */
if (pci_find_cap(dev, PCIY_MSIX, &reg) == 0) {
if (reg != 0)
sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
}
DBEXIT(BCE_VERBOSE_LOAD);
}
/****************************************************************************/
/* Load and validate user tunable settings. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_set_tunables(struct bce_softc *sc)
{
/* Set sysctl values for RX page count. */
switch (bce_rx_pages) {
case 1:
/* fall-through */
case 2:
/* fall-through */
case 4:
/* fall-through */
case 8:
sc->rx_pages = bce_rx_pages;
break;
default:
sc->rx_pages = DEFAULT_RX_PAGES;
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.rx_pages! Setting default of %d.\n",
__FILE__, __LINE__, bce_rx_pages, DEFAULT_RX_PAGES);
}
/* ToDo: Consider allowing user setting for pg_pages. */
sc->pg_pages = min((sc->rx_pages * 4), MAX_PG_PAGES);
/* Set sysctl values for TX page count. */
switch (bce_tx_pages) {
case 1:
/* fall-through */
case 2:
/* fall-through */
case 4:
/* fall-through */
case 8:
sc->tx_pages = bce_tx_pages;
break;
default:
sc->tx_pages = DEFAULT_TX_PAGES;
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.tx_pages! Setting default of %d.\n",
__FILE__, __LINE__, bce_tx_pages, DEFAULT_TX_PAGES);
}
/*
* Validate the TX trip point (i.e. the number of
* TX completions before a status block update is
* generated and an interrupt is asserted.
*/
if (bce_tx_quick_cons_trip_int <= 100) {
sc->bce_tx_quick_cons_trip_int =
bce_tx_quick_cons_trip_int;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.tx_quick_cons_trip_int! Setting default of %d.\n",
__FILE__, __LINE__, bce_tx_quick_cons_trip_int,
DEFAULT_TX_QUICK_CONS_TRIP_INT);
sc->bce_tx_quick_cons_trip_int =
DEFAULT_TX_QUICK_CONS_TRIP_INT;
}
if (bce_tx_quick_cons_trip <= 100) {
sc->bce_tx_quick_cons_trip =
bce_tx_quick_cons_trip;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.tx_quick_cons_trip! Setting default of %d.\n",
__FILE__, __LINE__, bce_tx_quick_cons_trip,
DEFAULT_TX_QUICK_CONS_TRIP);
sc->bce_tx_quick_cons_trip =
DEFAULT_TX_QUICK_CONS_TRIP;
}
/*
* Validate the TX ticks count (i.e. the maximum amount
* of time to wait after the last TX completion has
* occurred before a status block update is generated
* and an interrupt is asserted.
*/
if (bce_tx_ticks_int <= 100) {
sc->bce_tx_ticks_int =
bce_tx_ticks_int;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.tx_ticks_int! Setting default of %d.\n",
__FILE__, __LINE__, bce_tx_ticks_int,
DEFAULT_TX_TICKS_INT);
sc->bce_tx_ticks_int =
DEFAULT_TX_TICKS_INT;
}
if (bce_tx_ticks <= 100) {
sc->bce_tx_ticks =
bce_tx_ticks;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.tx_ticks! Setting default of %d.\n",
__FILE__, __LINE__, bce_tx_ticks,
DEFAULT_TX_TICKS);
sc->bce_tx_ticks =
DEFAULT_TX_TICKS;
}
/*
* Validate the RX trip point (i.e. the number of
* RX frames received before a status block update is
* generated and an interrupt is asserted.
*/
if (bce_rx_quick_cons_trip_int <= 100) {
sc->bce_rx_quick_cons_trip_int =
bce_rx_quick_cons_trip_int;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.rx_quick_cons_trip_int! Setting default of %d.\n",
__FILE__, __LINE__, bce_rx_quick_cons_trip_int,
DEFAULT_RX_QUICK_CONS_TRIP_INT);
sc->bce_rx_quick_cons_trip_int =
DEFAULT_RX_QUICK_CONS_TRIP_INT;
}
if (bce_rx_quick_cons_trip <= 100) {
sc->bce_rx_quick_cons_trip =
bce_rx_quick_cons_trip;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.rx_quick_cons_trip! Setting default of %d.\n",
__FILE__, __LINE__, bce_rx_quick_cons_trip,
DEFAULT_RX_QUICK_CONS_TRIP);
sc->bce_rx_quick_cons_trip =
DEFAULT_RX_QUICK_CONS_TRIP;
}
/*
* Validate the RX ticks count (i.e. the maximum amount
* of time to wait after the last RX frame has been
* received before a status block update is generated
* and an interrupt is asserted.
*/
if (bce_rx_ticks_int <= 100) {
sc->bce_rx_ticks_int = bce_rx_ticks_int;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.rx_ticks_int! Setting default of %d.\n",
__FILE__, __LINE__, bce_rx_ticks_int,
DEFAULT_RX_TICKS_INT);
sc->bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
}
if (bce_rx_ticks <= 100) {
sc->bce_rx_ticks = bce_rx_ticks;
} else {
BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
"hw.bce.rx_ticks! Setting default of %d.\n",
__FILE__, __LINE__, bce_rx_ticks,
DEFAULT_RX_TICKS);
sc->bce_rx_ticks = DEFAULT_RX_TICKS;
}
/* Disabling both RX ticks and RX trips will prevent interrupts. */
if ((bce_rx_quick_cons_trip == 0) && (bce_rx_ticks == 0)) {
BCE_PRINTF("%s(%d): Cannot set both hw.bce.rx_ticks and "
"hw.bce.rx_quick_cons_trip to 0. Setting default values.\n",
__FILE__, __LINE__);
sc->bce_rx_ticks = DEFAULT_RX_TICKS;
sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
}
/* Disabling both TX ticks and TX trips will prevent interrupts. */
if ((bce_tx_quick_cons_trip == 0) && (bce_tx_ticks == 0)) {
BCE_PRINTF("%s(%d): Cannot set both hw.bce.tx_ticks and "
"hw.bce.tx_quick_cons_trip to 0. Setting default values.\n",
__FILE__, __LINE__);
sc->bce_tx_ticks = DEFAULT_TX_TICKS;
sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
}
}
/****************************************************************************/
/* Device attach function. */
/* */
/* Allocates device resources, performs secondary chip identification, */
/* resets and initializes the hardware, and initializes driver instance */
/* variables. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_attach(device_t dev)
{
struct bce_softc *sc;
if_t ifp;
u32 val;
int count, error, rc = 0, rid;
sc = device_get_softc(dev);
sc->bce_dev = dev;
DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
sc->bce_unit = device_get_unit(dev);
/* Set initial device and PHY flags */
sc->bce_flags = 0;
sc->bce_phy_flags = 0;
bce_set_tunables(sc);
pci_enable_busmaster(dev);
/* Allocate PCI memory resources. */
rid = PCIR_BAR(0);
sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->bce_res_mem == NULL) {
BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
__FILE__, __LINE__);
rc = ENXIO;
goto bce_attach_fail;
}
/* Get various resource handles. */
sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
bce_probe_pci_caps(dev, sc);
rid = 1;
count = 0;
#if 0
/* Try allocating MSI-X interrupts. */
if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
(bce_msi_enable >= 2) &&
((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE)) != NULL)) {
msi_needed = count = 1;
if (((error = pci_alloc_msix(dev, &count)) != 0) ||
(count != msi_needed)) {
BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
"Received = %d, error = %d\n", __FILE__, __LINE__,
msi_needed, count, error);
count = 0;
pci_release_msi(dev);
bus_release_resource(dev, SYS_RES_MEMORY, rid,
sc->bce_res_irq);
sc->bce_res_irq = NULL;
} else {
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
__FUNCTION__);
sc->bce_flags |= BCE_USING_MSIX_FLAG;
}
}
#endif
/* Try allocating a MSI interrupt. */
if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
(bce_msi_enable >= 1) && (count == 0)) {
count = 1;
if ((error = pci_alloc_msi(dev, &count)) != 0) {
BCE_PRINTF("%s(%d): MSI allocation failed! "
"error = %d\n", __FILE__, __LINE__, error);
count = 0;
pci_release_msi(dev);
} else {
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI "
"interrupt.\n", __FUNCTION__);
sc->bce_flags |= BCE_USING_MSI_FLAG;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
rid = 1;
}
}
/* Try allocating a legacy interrupt. */
if (count == 0) {
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
__FUNCTION__);
rid = 0;
}
sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_ACTIVE | (count != 0 ? 0 : RF_SHAREABLE));
/* Report any IRQ allocation errors. */
if (sc->bce_res_irq == NULL) {
BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
__FILE__, __LINE__);
rc = ENXIO;
goto bce_attach_fail;
}
/* Initialize mutex for the current device instance. */
BCE_LOCK_INIT(sc, device_get_nameunit(dev));
/*
* Configure byte swap and enable indirect register access.
* Rely on CPU to do target byte swapping on big endian systems.
* Access to registers outside of PCI configurtion space are not
* valid until this is done.
*/
pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
/* Save ASIC revsion info. */
sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
/* Weed out any non-production controller revisions. */
switch(BCE_CHIP_ID(sc)) {
case BCE_CHIP_ID_5706_A0:
case BCE_CHIP_ID_5706_A1:
case BCE_CHIP_ID_5708_A0:
case BCE_CHIP_ID_5708_B0:
case BCE_CHIP_ID_5709_A0:
case BCE_CHIP_ID_5709_B0:
case BCE_CHIP_ID_5709_B1:
case BCE_CHIP_ID_5709_B2:
BCE_PRINTF("%s(%d): Unsupported controller "
"revision (%c%d)!\n", __FILE__, __LINE__,
(((pci_read_config(dev, PCIR_REVID, 4) &
0xf0) >> 4) + 'A'), (pci_read_config(dev,
PCIR_REVID, 4) & 0xf));
rc = ENODEV;
goto bce_attach_fail;
}
/*
* The embedded PCIe to PCI-X bridge (EPB)
* in the 5708 cannot address memory above
* 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
*/
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
else
sc->max_bus_addr = BUS_SPACE_MAXADDR;
/*
* Find the base address for shared memory access.
* Newer versions of bootcode use a signature and offset
* while older versions use a fixed address.
*/
val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
/* Multi-port devices use different offsets in shared memory. */
sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
(pci_get_function(sc->bce_dev) << 2));
else
sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
__FUNCTION__, sc->bce_shmem_base);
/* Fetch the bootcode revision. */
val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
for (int i = 0, j = 0; i < 3; i++) {
u8 num;
num = (u8) (val >> (24 - (i * 8)));
for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
if (num >= k || !skip0 || k == 1) {
sc->bce_bc_ver[j++] = (num / k) + '0';
skip0 = 0;
}
}
if (i != 2)
sc->bce_bc_ver[j++] = '.';
}
/* Check if any management firwmare is enabled. */
val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
/* Allow time for firmware to enter the running state. */
for (int i = 0; i < 30; i++) {
val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
if (val & BCE_CONDITION_MFW_RUN_MASK)
break;
DELAY(10000);
}
/* Check if management firmware is running. */
val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
val &= BCE_CONDITION_MFW_RUN_MASK;
if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) &&
(val != BCE_CONDITION_MFW_RUN_NONE)) {
u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
int i = 0;
/* Read the management firmware version string. */
for (int j = 0; j < 3; j++) {
val = bce_reg_rd_ind(sc, addr + j * 4);
val = bswap32(val);
memcpy(&sc->bce_mfw_ver[i], &val, 4);
i += 4;
}
} else {
/* May cause firmware synchronization timeouts. */
BCE_PRINTF("%s(%d): Management firmware enabled "
"but not running!\n", __FILE__, __LINE__);
strcpy(sc->bce_mfw_ver, "NOT RUNNING!");
/* ToDo: Any action the driver should take? */
}
}
/* Get PCI bus information (speed and type). */
val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
u32 clkreg;
sc->bce_flags |= BCE_PCIX_FLAG;
clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
switch (clkreg) {
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
sc->bus_speed_mhz = 133;
break;
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
sc->bus_speed_mhz = 100;
break;
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
sc->bus_speed_mhz = 66;
break;
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
sc->bus_speed_mhz = 50;
break;
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
sc->bus_speed_mhz = 33;
break;
}
} else {
if (val & BCE_PCICFG_MISC_STATUS_M66EN)
sc->bus_speed_mhz = 66;
else
sc->bus_speed_mhz = 33;
}
if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
sc->bce_flags |= BCE_PCI_32BIT_FLAG;
/* Find the media type for the adapter. */
bce_get_media(sc);
/* Reset controller and announce to bootcode that driver is present. */
if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
BCE_PRINTF("%s(%d): Controller reset failed!\n",
__FILE__, __LINE__);
rc = ENXIO;
goto bce_attach_fail;
}
/* Initialize the controller. */
if (bce_chipinit(sc)) {
BCE_PRINTF("%s(%d): Controller initialization failed!\n",
__FILE__, __LINE__);
rc = ENXIO;
goto bce_attach_fail;
}
/* Perform NVRAM test. */
if (bce_nvram_test(sc)) {
BCE_PRINTF("%s(%d): NVRAM test failed!\n",
__FILE__, __LINE__);
rc = ENXIO;
goto bce_attach_fail;
}
/* Fetch the permanent Ethernet MAC address. */
bce_get_mac_addr(sc);
/* Update statistics once every second. */
sc->bce_stats_ticks = 1000000 & 0xffff00;
/* Store data needed by PHY driver for backplane applications */
sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG);
/* Allocate DMA memory resources. */
if (bce_dma_alloc(dev)) {
BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
__FILE__, __LINE__);
rc = ENXIO;
goto bce_attach_fail;
}
/* Allocate an ifnet structure. */
ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- BCE_PRINTF("%s(%d): Interface allocation failed!\n",
- __FILE__, __LINE__);
- rc = ENXIO;
- goto bce_attach_fail;
- }
/* Initialize the ifnet interface. */
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, bce_ioctl);
if_setstartfn(ifp, bce_start);
if_setgetcounterfn(ifp, bce_get_counter);
if_setinitfn(ifp, bce_init);
if_setmtu(ifp, ETHERMTU);
if (bce_tso_enable) {
if_sethwassist(ifp, BCE_IF_HWASSIST | CSUM_TSO);
if_setcapabilities(ifp, BCE_IF_CAPABILITIES | IFCAP_TSO4 |
IFCAP_VLAN_HWTSO);
} else {
if_sethwassist(ifp, BCE_IF_HWASSIST);
if_setcapabilities(ifp, BCE_IF_CAPABILITIES);
}
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0)
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Assume standard mbuf sizes for buffer allocation.
* This may change later if the MTU size is set to
* something other than 1500.
*/
bce_get_rx_buffer_sizes(sc,
(ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN));
/* Recalculate our buffer allocation sizes. */
if_setsendqlen(ifp, USABLE_TX_BD_ALLOC);
if_setsendqready(ifp);
if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
if_setbaudrate(ifp, IF_Mbps(2500ULL));
else
if_setbaudrate(ifp, IF_Mbps(1000));
/* Handle any special PHY initialization for SerDes PHYs. */
bce_init_media(sc);
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) {
ifmedia_init(&sc->bce_ifmedia, IFM_IMASK, bce_ifmedia_upd,
bce_ifmedia_sts);
/*
* We can't manually override remote PHY's link and assume
* PHY port configuration(Fiber or TP) is not changed after
* device attach. This may not be correct though.
*/
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) {
if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_2500_SX, 0, NULL);
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_2500_SX | IFM_FDX, 0, NULL);
}
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_1000_SX, 0, NULL);
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
} else {
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_10_T, 0, NULL);
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_100_TX, 0, NULL);
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_1000_T, 0, NULL);
ifmedia_add(&sc->bce_ifmedia,
IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
}
ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO);
sc->bce_ifmedia.ifm_media = sc->bce_ifmedia.ifm_cur->ifm_media;
} else {
/* MII child bus by attaching the PHY. */
rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd,
bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr,
MII_OFFSET_ANY, MIIF_DOPAUSE);
if (rc != 0) {
BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__,
__LINE__);
goto bce_attach_fail;
}
}
/* Attach to the Ethernet interface list. */
ether_ifattach(ifp, sc->eaddr);
callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
/* Hookup IRQ last. */
rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, bce_intr, sc, &sc->bce_intrhand);
if (rc) {
BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
__FILE__, __LINE__);
bce_detach(dev);
goto bce_attach_exit;
}
/*
* At this point we've acquired all the resources
* we need to run so there's no turning back, we're
* cleared for launch.
*/
/* Print some important debugging info. */
DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
/* Add the supported sysctls to the kernel. */
bce_add_sysctls(sc);
BCE_LOCK(sc);
/*
* The chip reset earlier notified the bootcode that
* a driver is present. We now need to start our pulse
* routine so that the bootcode is reminded that we're
* still running.
*/
bce_pulse(sc);
bce_mgmt_init_locked(sc);
BCE_UNLOCK(sc);
/* Finally, print some useful adapter info */
bce_print_adapter_info(sc);
DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
__FUNCTION__, sc);
goto bce_attach_exit;
bce_attach_fail:
bce_release_resources(sc);
bce_attach_exit:
DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
return(rc);
}
/****************************************************************************/
/* Device detach function. */
/* */
/* Stops the controller, resets the controller, and releases resources. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_detach(device_t dev)
{
struct bce_softc *sc = device_get_softc(dev);
if_t ifp;
u32 msg;
DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
ifp = sc->bce_ifp;
/* Stop and reset the controller. */
BCE_LOCK(sc);
/* Stop the pulse so the bootcode can go to driver absent state. */
callout_stop(&sc->bce_pulse_callout);
bce_stop(sc);
if (sc->bce_flags & BCE_NO_WOL_FLAG)
msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
else
msg = BCE_DRV_MSG_CODE_UNLOAD;
bce_reset(sc, msg);
BCE_UNLOCK(sc);
ether_ifdetach(ifp);
/* If we have a child device on the MII bus remove it too. */
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0)
ifmedia_removeall(&sc->bce_ifmedia);
else {
bus_generic_detach(dev);
device_delete_child(dev, sc->bce_miibus);
}
/* Release all remaining resources. */
bce_release_resources(sc);
DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
return(0);
}
/****************************************************************************/
/* Device shutdown function. */
/* */
/* Stops and resets the controller. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_shutdown(device_t dev)
{
struct bce_softc *sc = device_get_softc(dev);
u32 msg;
DBENTER(BCE_VERBOSE);
BCE_LOCK(sc);
bce_stop(sc);
if (sc->bce_flags & BCE_NO_WOL_FLAG)
msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
else
msg = BCE_DRV_MSG_CODE_UNLOAD;
bce_reset(sc, msg);
BCE_UNLOCK(sc);
DBEXIT(BCE_VERBOSE);
return (0);
}
#ifdef BCE_DEBUG
/****************************************************************************/
/* Register read. */
/* */
/* Returns: */
/* The value of the register. */
/****************************************************************************/
static u32
bce_reg_rd(struct bce_softc *sc, u32 offset)
{
u32 val = REG_RD(sc, offset);
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
return val;
}
/****************************************************************************/
/* Register write (16 bit). */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
{
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
__FUNCTION__, offset, val);
REG_WR16(sc, offset, val);
}
/****************************************************************************/
/* Register write. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
{
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
REG_WR(sc, offset, val);
}
#endif
/****************************************************************************/
/* Indirect register read. */
/* */
/* Reads NetXtreme II registers using an index/data register pair in PCI */
/* configuration space. Using this mechanism avoids issues with posted */
/* reads but is much slower than memory-mapped I/O. */
/* */
/* Returns: */
/* The value of the register. */
/****************************************************************************/
static u32
bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
{
device_t dev;
dev = sc->bce_dev;
pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
#ifdef BCE_DEBUG
{
u32 val;
val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
return val;
}
#else
return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
#endif
}
/****************************************************************************/
/* Indirect register write. */
/* */
/* Writes NetXtreme II registers using an index/data register pair in PCI */
/* configuration space. Using this mechanism avoids issues with posted */
/* writes but is muchh slower than memory-mapped I/O. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
{
device_t dev;
dev = sc->bce_dev;
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
}
/****************************************************************************/
/* Shared memory write. */
/* */
/* Writes NetXtreme II shared memory region. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val)
{
DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to "
"0x%08X\n", __FUNCTION__, val, offset);
bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
}
/****************************************************************************/
/* Shared memory read. */
/* */
/* Reads NetXtreme II shared memory region. */
/* */
/* Returns: */
/* The 32 bit value read. */
/****************************************************************************/
static u32
bce_shmem_rd(struct bce_softc *sc, u32 offset)
{
u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from "
"0x%08X\n", __FUNCTION__, val, offset);
return val;
}
#ifdef BCE_DEBUG
/****************************************************************************/
/* Context memory read. */
/* */
/* The NetXtreme II controller uses context memory to track connection */
/* information for L2 and higher network protocols. */
/* */
/* Returns: */
/* The requested 32 bit value of context memory. */
/****************************************************************************/
static u32
bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
{
u32 idx, offset, retry_cnt = 5, val;
DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 ||
cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID "
"address: 0x%08X.\n", __FUNCTION__, cid_addr));
offset = ctx_offset + cid_addr;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
for (idx = 0; idx < retry_cnt; idx++) {
val = REG_RD(sc, BCE_CTX_CTX_CTRL);
if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
break;
DELAY(5);
}
if (val & BCE_CTX_CTX_CTRL_READ_REQ)
BCE_PRINTF("%s(%d); Unable to read CTX memory: "
"cid_addr = 0x%08X, offset = 0x%08X!\n",
__FILE__, __LINE__, cid_addr, ctx_offset);
val = REG_RD(sc, BCE_CTX_CTX_DATA);
} else {
REG_WR(sc, BCE_CTX_DATA_ADR, offset);
val = REG_RD(sc, BCE_CTX_DATA);
}
DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
return(val);
}
#endif
/****************************************************************************/
/* Context memory write. */
/* */
/* The NetXtreme II controller uses context memory to track connection */
/* information for L2 and higher network protocols. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
{
u32 idx, offset = ctx_offset + cid_addr;
u32 val, retry_cnt = 5;
DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
__FUNCTION__, cid_addr));
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
for (idx = 0; idx < retry_cnt; idx++) {
val = REG_RD(sc, BCE_CTX_CTX_CTRL);
if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
break;
DELAY(5);
}
if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
BCE_PRINTF("%s(%d); Unable to write CTX memory: "
"cid_addr = 0x%08X, offset = 0x%08X!\n",
__FILE__, __LINE__, cid_addr, ctx_offset);
} else {
REG_WR(sc, BCE_CTX_DATA_ADR, offset);
REG_WR(sc, BCE_CTX_DATA, ctx_val);
}
}
/****************************************************************************/
/* PHY register read. */
/* */
/* Implements register reads on the MII bus. */
/* */
/* Returns: */
/* The value of the register. */
/****************************************************************************/
static int
bce_miibus_read_reg(device_t dev, int phy, int reg)
{
struct bce_softc *sc;
u32 val;
int i;
sc = device_get_softc(dev);
/*
* The 5709S PHY is an IEEE Clause 45 PHY
* with special mappings to work with IEEE
* Clause 22 register accesses.
*/
if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
reg += 0x10;
}
if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
REG_RD(sc, BCE_EMAC_MDIO_MODE);
DELAY(40);
}
val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
BCE_EMAC_MDIO_COMM_START_BUSY;
REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
DELAY(10);
val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
DELAY(5);
val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
val &= BCE_EMAC_MDIO_COMM_DATA;
break;
}
}
if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, "
"reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
val = 0x0;
} else {
val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
}
if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
REG_RD(sc, BCE_EMAC_MDIO_MODE);
DELAY(40);
}
DB_PRINT_PHY_REG(reg, val);
return (val & 0xffff);
}
/****************************************************************************/
/* PHY register write. */
/* */
/* Implements register writes on the MII bus. */
/* */
/* Returns: */
/* The value of the register. */
/****************************************************************************/
static int
bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
{
struct bce_softc *sc;
u32 val1;
int i;
sc = device_get_softc(dev);
DB_PRINT_PHY_REG(reg, val);
/*
* The 5709S PHY is an IEEE Clause 45 PHY
* with special mappings to work with IEEE
* Clause 22 register accesses.
*/
if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
reg += 0x10;
}
if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
REG_RD(sc, BCE_EMAC_MDIO_MODE);
DELAY(40);
}
val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
DELAY(10);
val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
DELAY(5);
break;
}
}
if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
BCE_PRINTF("%s(%d): PHY write timeout!\n",
__FILE__, __LINE__);
if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
REG_RD(sc, BCE_EMAC_MDIO_MODE);
DELAY(40);
}
return 0;
}
/****************************************************************************/
/* MII bus status change. */
/* */
/* Called by the MII bus driver when the PHY establishes link to set the */
/* MAC interface registers. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_miibus_statchg(device_t dev)
{
struct bce_softc *sc;
struct mii_data *mii;
struct ifmediareq ifmr;
int media_active, media_status, val;
sc = device_get_softc(dev);
DBENTER(BCE_VERBOSE_PHY);
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) {
bzero(&ifmr, sizeof(ifmr));
bce_ifmedia_sts_rphy(sc, &ifmr);
media_active = ifmr.ifm_active;
media_status = ifmr.ifm_status;
} else {
mii = device_get_softc(sc->bce_miibus);
media_active = mii->mii_media_active;
media_status = mii->mii_media_status;
}
/* Ignore invalid media status. */
if ((media_status & (IFM_ACTIVE | IFM_AVALID)) !=
(IFM_ACTIVE | IFM_AVALID))
goto bce_miibus_statchg_exit;
val = REG_RD(sc, BCE_EMAC_MODE);
val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
BCE_EMAC_MODE_25G);
/* Set MII or GMII interface based on the PHY speed. */
switch (IFM_SUBTYPE(media_active)) {
case IFM_10_T:
if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
DBPRINT(sc, BCE_INFO_PHY,
"Enabling 10Mb interface.\n");
val |= BCE_EMAC_MODE_PORT_MII_10;
break;
}
/* fall-through */
case IFM_100_TX:
DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n");
val |= BCE_EMAC_MODE_PORT_MII;
break;
case IFM_2500_SX:
DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n");
val |= BCE_EMAC_MODE_25G;
/* fall-through */
case IFM_1000_T:
case IFM_1000_SX:
DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n");
val |= BCE_EMAC_MODE_PORT_GMII;
break;
default:
DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling "
"default GMII interface.\n");
val |= BCE_EMAC_MODE_PORT_GMII;
}
/* Set half or full duplex based on PHY settings. */
if ((IFM_OPTIONS(media_active) & IFM_FDX) == 0) {
DBPRINT(sc, BCE_INFO_PHY,
"Setting Half-Duplex interface.\n");
val |= BCE_EMAC_MODE_HALF_DUPLEX;
} else
DBPRINT(sc, BCE_INFO_PHY,
"Setting Full-Duplex interface.\n");
REG_WR(sc, BCE_EMAC_MODE, val);
if ((IFM_OPTIONS(media_active) & IFM_ETH_RXPAUSE) != 0) {
DBPRINT(sc, BCE_INFO_PHY,
"%s(): Enabling RX flow control.\n", __FUNCTION__);
BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
sc->bce_flags |= BCE_USING_RX_FLOW_CONTROL;
} else {
DBPRINT(sc, BCE_INFO_PHY,
"%s(): Disabling RX flow control.\n", __FUNCTION__);
BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
sc->bce_flags &= ~BCE_USING_RX_FLOW_CONTROL;
}
if ((IFM_OPTIONS(media_active) & IFM_ETH_TXPAUSE) != 0) {
DBPRINT(sc, BCE_INFO_PHY,
"%s(): Enabling TX flow control.\n", __FUNCTION__);
BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL;
} else {
DBPRINT(sc, BCE_INFO_PHY,
"%s(): Disabling TX flow control.\n", __FUNCTION__);
BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL;
}
/* ToDo: Update watermarks in bce_init_rx_context(). */
bce_miibus_statchg_exit:
DBEXIT(BCE_VERBOSE_PHY);
}
/****************************************************************************/
/* Acquire NVRAM lock. */
/* */
/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
/* for use by the driver. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_acquire_nvram_lock(struct bce_softc *sc)
{
u32 val;
int j, rc = 0;
DBENTER(BCE_VERBOSE_NVRAM);
/* Request access to the flash interface. */
REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
val = REG_RD(sc, BCE_NVM_SW_ARB);
if (val & BCE_NVM_SW_ARB_ARB_ARB2)
break;
DELAY(5);
}
if (j >= NVRAM_TIMEOUT_COUNT) {
DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
rc = EBUSY;
}
DBEXIT(BCE_VERBOSE_NVRAM);
return (rc);
}
/****************************************************************************/
/* Release NVRAM lock. */
/* */
/* When the caller is finished accessing NVRAM the lock must be released. */
/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
/* for use by the driver. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_release_nvram_lock(struct bce_softc *sc)
{
u32 val;
int j, rc = 0;
DBENTER(BCE_VERBOSE_NVRAM);
/*
* Relinquish nvram interface.
*/
REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
val = REG_RD(sc, BCE_NVM_SW_ARB);
if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
break;
DELAY(5);
}
if (j >= NVRAM_TIMEOUT_COUNT) {
DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
rc = EBUSY;
}
DBEXIT(BCE_VERBOSE_NVRAM);
return (rc);
}
#ifdef BCE_NVRAM_WRITE_SUPPORT
/****************************************************************************/
/* Enable NVRAM write access. */
/* */
/* Before writing to NVRAM the caller must enable NVRAM writes. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_enable_nvram_write(struct bce_softc *sc)
{
u32 val;
int rc = 0;
DBENTER(BCE_VERBOSE_NVRAM);
val = REG_RD(sc, BCE_MISC_CFG);
REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
int j;
REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
DELAY(5);
val = REG_RD(sc, BCE_NVM_COMMAND);
if (val & BCE_NVM_COMMAND_DONE)
break;
}
if (j >= NVRAM_TIMEOUT_COUNT) {
DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
rc = EBUSY;
}
}
DBENTER(BCE_VERBOSE_NVRAM);
return (rc);
}
/****************************************************************************/
/* Disable NVRAM write access. */
/* */
/* When the caller is finished writing to NVRAM write access must be */
/* disabled. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_disable_nvram_write(struct bce_softc *sc)
{
u32 val;
DBENTER(BCE_VERBOSE_NVRAM);
val = REG_RD(sc, BCE_MISC_CFG);
REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
DBEXIT(BCE_VERBOSE_NVRAM);
}
#endif
/****************************************************************************/
/* Enable NVRAM access. */
/* */
/* Before accessing NVRAM for read or write operations the caller must */
/* enabled NVRAM access. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_enable_nvram_access(struct bce_softc *sc)
{
u32 val;
DBENTER(BCE_VERBOSE_NVRAM);
val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
/* Enable both bits, even on read. */
REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val |
BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
DBEXIT(BCE_VERBOSE_NVRAM);
}
/****************************************************************************/
/* Disable NVRAM access. */
/* */
/* When the caller is finished accessing NVRAM access must be disabled. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_disable_nvram_access(struct bce_softc *sc)
{
u32 val;
DBENTER(BCE_VERBOSE_NVRAM);
val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
/* Disable both bits, even after read. */
REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val &
~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
DBEXIT(BCE_VERBOSE_NVRAM);
}
#ifdef BCE_NVRAM_WRITE_SUPPORT
/****************************************************************************/
/* Erase NVRAM page before writing. */
/* */
/* Non-buffered flash parts require that a page be erased before it is */
/* written. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
{
u32 cmd;
int j, rc = 0;
DBENTER(BCE_VERBOSE_NVRAM);
/* Buffered flash doesn't require an erase. */
if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
goto bce_nvram_erase_page_exit;
/* Build an erase command. */
cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
BCE_NVM_COMMAND_DOIT;
/*
* Clear the DONE bit separately, set the NVRAM address to erase,
* and issue the erase command.
*/
REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
REG_WR(sc, BCE_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
u32 val;
DELAY(5);
val = REG_RD(sc, BCE_NVM_COMMAND);
if (val & BCE_NVM_COMMAND_DONE)
break;
}
if (j >= NVRAM_TIMEOUT_COUNT) {
DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
rc = EBUSY;
}
bce_nvram_erase_page_exit:
DBEXIT(BCE_VERBOSE_NVRAM);
return (rc);
}
#endif /* BCE_NVRAM_WRITE_SUPPORT */
/****************************************************************************/
/* Read a dword (32 bits) from NVRAM. */
/* */
/* Read a 32 bit word from NVRAM. The caller is assumed to have already */
/* obtained the NVRAM lock and enabled the controller for NVRAM access. */
/* */
/* Returns: */
/* 0 on success and the 32 bit value read, positive value on failure. */
/****************************************************************************/
static int
bce_nvram_read_dword(struct bce_softc *sc,
u32 offset, u8 *ret_val, u32 cmd_flags)
{
u32 cmd;
int i, rc = 0;
DBENTER(BCE_EXTREME_NVRAM);
/* Build the command word. */
cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
/* Calculate the offset for buffered flash if translation is used. */
if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
offset = ((offset / sc->bce_flash_info->page_size) <<
sc->bce_flash_info->page_bits) +
(offset % sc->bce_flash_info->page_size);
}
/*
* Clear the DONE bit separately, set the address to read,
* and issue the read.
*/
REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
REG_WR(sc, BCE_NVM_COMMAND, cmd);
/* Wait for completion. */
for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
u32 val;
DELAY(5);
val = REG_RD(sc, BCE_NVM_COMMAND);
if (val & BCE_NVM_COMMAND_DONE) {
val = REG_RD(sc, BCE_NVM_READ);
val = bce_be32toh(val);
memcpy(ret_val, &val, 4);
break;
}
}
/* Check for errors. */
if (i >= NVRAM_TIMEOUT_COUNT) {
BCE_PRINTF("%s(%d): Timeout error reading NVRAM at "
"offset 0x%08X!\n", __FILE__, __LINE__, offset);
rc = EBUSY;
}
DBEXIT(BCE_EXTREME_NVRAM);
return(rc);
}
#ifdef BCE_NVRAM_WRITE_SUPPORT
/****************************************************************************/
/* Write a dword (32 bits) to NVRAM. */
/* */
/* Write a 32 bit word to NVRAM. The caller is assumed to have already */
/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
/* enabled NVRAM write access. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
u32 cmd_flags)
{
u32 cmd, val32;
int j, rc = 0;
DBENTER(BCE_VERBOSE_NVRAM);
/* Build the command word. */
cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
/* Calculate the offset for buffered flash if translation is used. */
if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
offset = ((offset / sc->bce_flash_info->page_size) <<
sc->bce_flash_info->page_bits) +
(offset % sc->bce_flash_info->page_size);
}
/*
* Clear the DONE bit separately, convert NVRAM data to big-endian,
* set the NVRAM address to write, and issue the write command
*/
REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
memcpy(&val32, val, 4);
val32 = htobe32(val32);
REG_WR(sc, BCE_NVM_WRITE, val32);
REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
REG_WR(sc, BCE_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
DELAY(5);
if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
break;
}
if (j >= NVRAM_TIMEOUT_COUNT) {
BCE_PRINTF("%s(%d): Timeout error writing NVRAM at "
"offset 0x%08X\n", __FILE__, __LINE__, offset);
rc = EBUSY;
}
DBEXIT(BCE_VERBOSE_NVRAM);
return (rc);
}
#endif /* BCE_NVRAM_WRITE_SUPPORT */
/****************************************************************************/
/* Initialize NVRAM access. */
/* */
/* Identify the NVRAM device in use and prepare the NVRAM interface to */
/* access that device. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_init_nvram(struct bce_softc *sc)
{
u32 val;
int j, entry_count, rc = 0;
const struct flash_spec *flash;
DBENTER(BCE_VERBOSE_NVRAM);
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
sc->bce_flash_info = &flash_5709;
goto bce_init_nvram_get_flash_size;
}
/* Determine the selected interface. */
val = REG_RD(sc, BCE_NVM_CFG1);
entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
/*
* Flash reconfiguration is required to support additional
* NVRAM devices not directly supported in hardware.
* Check if the flash interface was reconfigured
* by the bootcode.
*/
if (val & 0x40000000) {
/* Flash interface reconfigured by bootcode. */
DBPRINT(sc,BCE_INFO_LOAD,
"bce_init_nvram(): Flash WAS reconfigured.\n");
for (j = 0, flash = &flash_table[0]; j < entry_count;
j++, flash++) {
if ((val & FLASH_BACKUP_STRAP_MASK) ==
(flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
sc->bce_flash_info = flash;
break;
}
}
} else {
/* Flash interface not yet reconfigured. */
u32 mask;
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
__FUNCTION__);
if (val & (1 << 23))
mask = FLASH_BACKUP_STRAP_MASK;
else
mask = FLASH_STRAP_MASK;
/* Look for the matching NVRAM device configuration data. */
for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
/* Check if the device matches any of the known devices. */
if ((val & mask) == (flash->strapping & mask)) {
/* Found a device match. */
sc->bce_flash_info = flash;
/* Request access to the flash interface. */
if ((rc = bce_acquire_nvram_lock(sc)) != 0)
return rc;
/* Reconfigure the flash interface. */
bce_enable_nvram_access(sc);
REG_WR(sc, BCE_NVM_CFG1, flash->config1);
REG_WR(sc, BCE_NVM_CFG2, flash->config2);
REG_WR(sc, BCE_NVM_CFG3, flash->config3);
REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
bce_disable_nvram_access(sc);
bce_release_nvram_lock(sc);
break;
}
}
}
/* Check if a matching device was found. */
if (j == entry_count) {
sc->bce_flash_info = NULL;
BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
__FILE__, __LINE__);
DBEXIT(BCE_VERBOSE_NVRAM);
return (ENODEV);
}
bce_init_nvram_get_flash_size:
/* Write the flash config data to the shared memory interface. */
val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2);
val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
if (val)
sc->bce_flash_size = val;
else
sc->bce_flash_size = sc->bce_flash_info->total_size;
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
__FUNCTION__, sc->bce_flash_info->name,
sc->bce_flash_info->total_size);
DBEXIT(BCE_VERBOSE_NVRAM);
return rc;
}
/****************************************************************************/
/* Read an arbitrary range of data from NVRAM. */
/* */
/* Prepares the NVRAM interface for access and reads the requested data */
/* into the supplied buffer. */
/* */
/* Returns: */
/* 0 on success and the data read, positive value on failure. */
/****************************************************************************/
static int
bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
int buf_size)
{
int rc = 0;
u32 cmd_flags, offset32, len32, extra;
DBENTER(BCE_VERBOSE_NVRAM);
if (buf_size == 0)
goto bce_nvram_read_exit;
/* Request access to the flash interface. */
if ((rc = bce_acquire_nvram_lock(sc)) != 0)
goto bce_nvram_read_exit;
/* Enable access to flash interface */
bce_enable_nvram_access(sc);
len32 = buf_size;
offset32 = offset;
extra = 0;
cmd_flags = 0;
if (offset32 & 3) {
u8 buf[4];
u32 pre_len;
offset32 &= ~3;
pre_len = 4 - (offset & 3);
if (pre_len >= len32) {
pre_len = len32;
cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
}
else {
cmd_flags = BCE_NVM_COMMAND_FIRST;
}
rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
if (rc)
return rc;
memcpy(ret_buf, buf + (offset & 3), pre_len);
offset32 += 4;
ret_buf += pre_len;
len32 -= pre_len;
}
if (len32 & 3) {
extra = 4 - (len32 & 3);
len32 = (len32 + 4) & ~3;
}
if (len32 == 4) {
u8 buf[4];
if (cmd_flags)
cmd_flags = BCE_NVM_COMMAND_LAST;
else
cmd_flags = BCE_NVM_COMMAND_FIRST |
BCE_NVM_COMMAND_LAST;
rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
memcpy(ret_buf, buf, 4 - extra);
}
else if (len32 > 0) {
u8 buf[4];
/* Read the first word. */
if (cmd_flags)
cmd_flags = 0;
else
cmd_flags = BCE_NVM_COMMAND_FIRST;
rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
/* Advance to the next dword. */
offset32 += 4;
ret_buf += 4;
len32 -= 4;
while (len32 > 4 && rc == 0) {
rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
/* Advance to the next dword. */
offset32 += 4;
ret_buf += 4;
len32 -= 4;
}
if (rc)
goto bce_nvram_read_locked_exit;
cmd_flags = BCE_NVM_COMMAND_LAST;
rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
memcpy(ret_buf, buf, 4 - extra);
}
bce_nvram_read_locked_exit:
/* Disable access to flash interface and release the lock. */
bce_disable_nvram_access(sc);
bce_release_nvram_lock(sc);
bce_nvram_read_exit:
DBEXIT(BCE_VERBOSE_NVRAM);
return rc;
}
#ifdef BCE_NVRAM_WRITE_SUPPORT
/****************************************************************************/
/* Write an arbitrary range of data from NVRAM. */
/* */
/* Prepares the NVRAM interface for write access and writes the requested */
/* data from the supplied buffer. The caller is responsible for */
/* calculating any appropriate CRCs. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
int buf_size)
{
u32 written, offset32, len32;
u8 *buf, start[4], end[4];
int rc = 0;
int align_start, align_end;
DBENTER(BCE_VERBOSE_NVRAM);
buf = data_buf;
offset32 = offset;
len32 = buf_size;
align_start = align_end = 0;
if ((align_start = (offset32 & 3))) {
offset32 &= ~3;
len32 += align_start;
if ((rc = bce_nvram_read(sc, offset32, start, 4)))
goto bce_nvram_write_exit;
}
if (len32 & 3) {
if ((len32 > 4) || !align_start) {
align_end = 4 - (len32 & 3);
len32 += align_end;
if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
end, 4))) {
goto bce_nvram_write_exit;
}
}
}
if (align_start || align_end) {
buf = malloc(len32, M_DEVBUF, M_NOWAIT);
if (buf == NULL) {
rc = ENOMEM;
goto bce_nvram_write_exit;
}
if (align_start) {
memcpy(buf, start, 4);
}
if (align_end) {
memcpy(buf + len32 - 4, end, 4);
}
memcpy(buf + align_start, data_buf, buf_size);
}
written = 0;
while ((written < len32) && (rc == 0)) {
u32 page_start, page_end, data_start, data_end;
u32 addr, cmd_flags;
int i;
u8 flash_buffer[264];
/* Find the page_start addr */
page_start = offset32 + written;
page_start -= (page_start % sc->bce_flash_info->page_size);
/* Find the page_end addr */
page_end = page_start + sc->bce_flash_info->page_size;
/* Find the data_start addr */
data_start = (written == 0) ? offset32 : page_start;
/* Find the data_end addr */
data_end = (page_end > offset32 + len32) ?
(offset32 + len32) : page_end;
/* Request access to the flash interface. */
if ((rc = bce_acquire_nvram_lock(sc)) != 0)
goto bce_nvram_write_exit;
/* Enable access to flash interface */
bce_enable_nvram_access(sc);
cmd_flags = BCE_NVM_COMMAND_FIRST;
if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
int j;
/* Read the whole page into the buffer
* (non-buffer flash only) */
for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
if (j == (sc->bce_flash_info->page_size - 4)) {
cmd_flags |= BCE_NVM_COMMAND_LAST;
}
rc = bce_nvram_read_dword(sc,
page_start + j,
&flash_buffer[j],
cmd_flags);
if (rc)
goto bce_nvram_write_locked_exit;
cmd_flags = 0;
}
}
/* Enable writes to flash interface (unlock write-protect) */
if ((rc = bce_enable_nvram_write(sc)) != 0)
goto bce_nvram_write_locked_exit;
/* Erase the page */
if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
goto bce_nvram_write_locked_exit;
/* Re-enable the write again for the actual write */
bce_enable_nvram_write(sc);
/* Loop to write back the buffer data from page_start to
* data_start */
i = 0;
if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
for (addr = page_start; addr < data_start;
addr += 4, i += 4) {
rc = bce_nvram_write_dword(sc, addr,
&flash_buffer[i], cmd_flags);
if (rc != 0)
goto bce_nvram_write_locked_exit;
cmd_flags = 0;
}
}
/* Loop to write the new data from data_start to data_end */
for (addr = data_start; addr < data_end; addr += 4, i++) {
if ((addr == page_end - 4) ||
((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
(addr == data_end - 4))) {
cmd_flags |= BCE_NVM_COMMAND_LAST;
}
rc = bce_nvram_write_dword(sc, addr, buf,
cmd_flags);
if (rc != 0)
goto bce_nvram_write_locked_exit;
cmd_flags = 0;
buf += 4;
}
/* Loop to write back the buffer data from data_end
* to page_end */
if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
for (addr = data_end; addr < page_end;
addr += 4, i += 4) {
if (addr == page_end-4) {
cmd_flags = BCE_NVM_COMMAND_LAST;
}
rc = bce_nvram_write_dword(sc, addr,
&flash_buffer[i], cmd_flags);
if (rc != 0)
goto bce_nvram_write_locked_exit;
cmd_flags = 0;
}
}
/* Disable writes to flash interface (lock write-protect) */
bce_disable_nvram_write(sc);
/* Disable access to flash interface */
bce_disable_nvram_access(sc);
bce_release_nvram_lock(sc);
/* Increment written */
written += data_end - data_start;
}
goto bce_nvram_write_exit;
bce_nvram_write_locked_exit:
bce_disable_nvram_write(sc);
bce_disable_nvram_access(sc);
bce_release_nvram_lock(sc);
bce_nvram_write_exit:
if (align_start || align_end)
free(buf, M_DEVBUF);
DBEXIT(BCE_VERBOSE_NVRAM);
return (rc);
}
#endif /* BCE_NVRAM_WRITE_SUPPORT */
/****************************************************************************/
/* Verifies that NVRAM is accessible and contains valid data. */
/* */
/* Reads the configuration data from NVRAM and verifies that the CRC is */
/* correct. */
/* */
/* Returns: */
/* 0 on success, positive value on failure. */
/****************************************************************************/
static int
bce_nvram_test(struct bce_softc *sc)
{
u32 buf[BCE_NVRAM_SIZE / 4];
u8 *data = (u8 *) buf;
int rc = 0;
u32 magic, csum;
DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
/*
* Check that the device NVRAM is valid by reading
* the magic value at offset 0.
*/
if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
BCE_PRINTF("%s(%d): Unable to read NVRAM!\n",
__FILE__, __LINE__);
goto bce_nvram_test_exit;
}
/*
* Verify that offset 0 of the NVRAM contains
* a valid magic number.
*/
magic = bce_be32toh(buf[0]);
if (magic != BCE_NVRAM_MAGIC) {
rc = ENODEV;
BCE_PRINTF("%s(%d): Invalid NVRAM magic value! "
"Expected: 0x%08X, Found: 0x%08X\n",
__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
goto bce_nvram_test_exit;
}
/*
* Verify that the device NVRAM includes valid
* configuration data.
*/
if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
BCE_PRINTF("%s(%d): Unable to read manufacturing "
"Information from NVRAM!\n", __FILE__, __LINE__);
goto bce_nvram_test_exit;
}
csum = ether_crc32_le(data, 0x100);
if (csum != BCE_CRC32_RESIDUAL) {
rc = ENODEV;
BCE_PRINTF("%s(%d): Invalid manufacturing information "
"NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
goto bce_nvram_test_exit;
}
csum = ether_crc32_le(data + 0x100, 0x100);
if (csum != BCE_CRC32_RESIDUAL) {
rc = ENODEV;
BCE_PRINTF("%s(%d): Invalid feature configuration "
"information NVRAM CRC! Expected: 0x%08X, "
"Found: 08%08X\n", __FILE__, __LINE__,
BCE_CRC32_RESIDUAL, csum);
}
bce_nvram_test_exit:
DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
return rc;
}
/****************************************************************************/
/* Calculates the size of the buffers to allocate based on the MTU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu)
{
DBENTER(BCE_VERBOSE_LOAD);
/* Use a single allocation type when header splitting enabled. */
if (bce_hdr_split == TRUE) {
sc->rx_bd_mbuf_alloc_size = MHLEN;
/* Make sure offset is 16 byte aligned for hardware. */
sc->rx_bd_mbuf_align_pad =
roundup2(MSIZE - MHLEN, 16) - (MSIZE - MHLEN);
sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
sc->rx_bd_mbuf_align_pad;
} else {
if ((mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
ETHER_CRC_LEN) > MCLBYTES) {
/* Setup for jumbo RX buffer allocations. */
sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
sc->rx_bd_mbuf_align_pad =
roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
sc->rx_bd_mbuf_data_len =
sc->rx_bd_mbuf_alloc_size -
sc->rx_bd_mbuf_align_pad;
} else {
/* Setup for standard RX buffer allocations. */
sc->rx_bd_mbuf_alloc_size = MCLBYTES;
sc->rx_bd_mbuf_align_pad =
roundup2(MCLBYTES, 16) - MCLBYTES;
sc->rx_bd_mbuf_data_len =
sc->rx_bd_mbuf_alloc_size -
sc->rx_bd_mbuf_align_pad;
}
}
// DBPRINT(sc, BCE_INFO_LOAD,
DBPRINT(sc, BCE_WARN,
"%s(): rx_bd_mbuf_alloc_size = %d, rx_bd_mbuf_data_len = %d, "
"rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
sc->rx_bd_mbuf_align_pad);
DBEXIT(BCE_VERBOSE_LOAD);
}
/****************************************************************************/
/* Identifies the current media type of the controller and sets the PHY */
/* address. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_get_media(struct bce_softc *sc)
{
u32 val;
DBENTER(BCE_VERBOSE_PHY);
/* Assume PHY address for copper controllers. */
sc->bce_phy_addr = 1;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
u32 strap;
/*
* The BCM5709S is software configurable
* for Copper or SerDes operation.
*/
if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
"for copper.\n");
goto bce_get_media_exit;
} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
"for dual media.\n");
sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
goto bce_get_media_exit;
}
if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
strap = (val &
BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
else
strap = (val &
BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
if (pci_get_function(sc->bce_dev) == 0) {
switch (strap) {
case 0x4:
case 0x5:
case 0x6:
DBPRINT(sc, BCE_INFO_LOAD,
"BCM5709 s/w configured for SerDes.\n");
sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
break;
default:
DBPRINT(sc, BCE_INFO_LOAD,
"BCM5709 s/w configured for Copper.\n");
break;
}
} else {
switch (strap) {
case 0x1:
case 0x2:
case 0x4:
DBPRINT(sc, BCE_INFO_LOAD,
"BCM5709 s/w configured for SerDes.\n");
sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
break;
default:
DBPRINT(sc, BCE_INFO_LOAD,
"BCM5709 s/w configured for Copper.\n");
break;
}
}
} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
sc->bce_flags |= BCE_NO_WOL_FLAG;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG;
if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
/* 5708S/09S/16S use a separate PHY for SerDes. */
sc->bce_phy_addr = 2;
val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
sc->bce_phy_flags |=
BCE_PHY_2_5G_CAPABLE_FLAG;
DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb "
"capable adapter\n");
}
}
} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
bce_get_media_exit:
DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
"Using PHY address %d.\n", sc->bce_phy_addr);
DBEXIT(BCE_VERBOSE_PHY);
}
/****************************************************************************/
/* Performs PHY initialization required before MII drivers access the */
/* device. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_media(struct bce_softc *sc)
{
if ((sc->bce_phy_flags & (BCE_PHY_IEEE_CLAUSE_45_FLAG |
BCE_PHY_REMOTE_CAP_FLAG)) == BCE_PHY_IEEE_CLAUSE_45_FLAG) {
/*
* Configure 5709S/5716S PHYs to use traditional IEEE
* Clause 22 method. Otherwise we have no way to attach
* the PHY in mii(4) layer. PHY specific configuration
* is done in mii layer.
*/
/* Select auto-negotiation MMD of the PHY. */
bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
/* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */
bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
}
}
/****************************************************************************/
/* Free any DMA memory owned by the driver. */
/* */
/* Scans through each data structure that requires DMA memory and frees */
/* the memory if allocated. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_dma_free(struct bce_softc *sc)
{
int i;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
/* Free, unmap, and destroy the status block. */
if (sc->status_block_paddr != 0) {
bus_dmamap_unload(
sc->status_tag,
sc->status_map);
sc->status_block_paddr = 0;
}
if (sc->status_block != NULL) {
bus_dmamem_free(
sc->status_tag,
sc->status_block,
sc->status_map);
sc->status_block = NULL;
}
if (sc->status_tag != NULL) {
bus_dma_tag_destroy(sc->status_tag);
sc->status_tag = NULL;
}
/* Free, unmap, and destroy the statistics block. */
if (sc->stats_block_paddr != 0) {
bus_dmamap_unload(
sc->stats_tag,
sc->stats_map);
sc->stats_block_paddr = 0;
}
if (sc->stats_block != NULL) {
bus_dmamem_free(
sc->stats_tag,
sc->stats_block,
sc->stats_map);
sc->stats_block = NULL;
}
if (sc->stats_tag != NULL) {
bus_dma_tag_destroy(sc->stats_tag);
sc->stats_tag = NULL;
}
/* Free, unmap and destroy all context memory pages. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
for (i = 0; i < sc->ctx_pages; i++ ) {
if (sc->ctx_paddr[i] != 0) {
bus_dmamap_unload(
sc->ctx_tag,
sc->ctx_map[i]);
sc->ctx_paddr[i] = 0;
}
if (sc->ctx_block[i] != NULL) {
bus_dmamem_free(
sc->ctx_tag,
sc->ctx_block[i],
sc->ctx_map[i]);
sc->ctx_block[i] = NULL;
}
}
/* Destroy the context memory tag. */
if (sc->ctx_tag != NULL) {
bus_dma_tag_destroy(sc->ctx_tag);
sc->ctx_tag = NULL;
}
}
/* Free, unmap and destroy all TX buffer descriptor chain pages. */
for (i = 0; i < sc->tx_pages; i++ ) {
if (sc->tx_bd_chain_paddr[i] != 0) {
bus_dmamap_unload(
sc->tx_bd_chain_tag,
sc->tx_bd_chain_map[i]);
sc->tx_bd_chain_paddr[i] = 0;
}
if (sc->tx_bd_chain[i] != NULL) {
bus_dmamem_free(
sc->tx_bd_chain_tag,
sc->tx_bd_chain[i],
sc->tx_bd_chain_map[i]);
sc->tx_bd_chain[i] = NULL;
}
}
/* Destroy the TX buffer descriptor tag. */
if (sc->tx_bd_chain_tag != NULL) {
bus_dma_tag_destroy(sc->tx_bd_chain_tag);
sc->tx_bd_chain_tag = NULL;
}
/* Free, unmap and destroy all RX buffer descriptor chain pages. */
for (i = 0; i < sc->rx_pages; i++ ) {
if (sc->rx_bd_chain_paddr[i] != 0) {
bus_dmamap_unload(
sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i]);
sc->rx_bd_chain_paddr[i] = 0;
}
if (sc->rx_bd_chain[i] != NULL) {
bus_dmamem_free(
sc->rx_bd_chain_tag,
sc->rx_bd_chain[i],
sc->rx_bd_chain_map[i]);
sc->rx_bd_chain[i] = NULL;
}
}
/* Destroy the RX buffer descriptor tag. */
if (sc->rx_bd_chain_tag != NULL) {
bus_dma_tag_destroy(sc->rx_bd_chain_tag);
sc->rx_bd_chain_tag = NULL;
}
/* Free, unmap and destroy all page buffer descriptor chain pages. */
if (bce_hdr_split == TRUE) {
for (i = 0; i < sc->pg_pages; i++ ) {
if (sc->pg_bd_chain_paddr[i] != 0) {
bus_dmamap_unload(
sc->pg_bd_chain_tag,
sc->pg_bd_chain_map[i]);
sc->pg_bd_chain_paddr[i] = 0;
}
if (sc->pg_bd_chain[i] != NULL) {
bus_dmamem_free(
sc->pg_bd_chain_tag,
sc->pg_bd_chain[i],
sc->pg_bd_chain_map[i]);
sc->pg_bd_chain[i] = NULL;
}
}
/* Destroy the page buffer descriptor tag. */
if (sc->pg_bd_chain_tag != NULL) {
bus_dma_tag_destroy(sc->pg_bd_chain_tag);
sc->pg_bd_chain_tag = NULL;
}
}
/* Unload and destroy the TX mbuf maps. */
for (i = 0; i < MAX_TX_BD_AVAIL; i++) {
if (sc->tx_mbuf_map[i] != NULL) {
bus_dmamap_unload(sc->tx_mbuf_tag,
sc->tx_mbuf_map[i]);
bus_dmamap_destroy(sc->tx_mbuf_tag,
sc->tx_mbuf_map[i]);
sc->tx_mbuf_map[i] = NULL;
}
}
/* Destroy the TX mbuf tag. */
if (sc->tx_mbuf_tag != NULL) {
bus_dma_tag_destroy(sc->tx_mbuf_tag);
sc->tx_mbuf_tag = NULL;
}
/* Unload and destroy the RX mbuf maps. */
for (i = 0; i < MAX_RX_BD_AVAIL; i++) {
if (sc->rx_mbuf_map[i] != NULL) {
bus_dmamap_unload(sc->rx_mbuf_tag,
sc->rx_mbuf_map[i]);
bus_dmamap_destroy(sc->rx_mbuf_tag,
sc->rx_mbuf_map[i]);
sc->rx_mbuf_map[i] = NULL;
}
}
/* Destroy the RX mbuf tag. */
if (sc->rx_mbuf_tag != NULL) {
bus_dma_tag_destroy(sc->rx_mbuf_tag);
sc->rx_mbuf_tag = NULL;
}
/* Unload and destroy the page mbuf maps. */
if (bce_hdr_split == TRUE) {
for (i = 0; i < MAX_PG_BD_AVAIL; i++) {
if (sc->pg_mbuf_map[i] != NULL) {
bus_dmamap_unload(sc->pg_mbuf_tag,
sc->pg_mbuf_map[i]);
bus_dmamap_destroy(sc->pg_mbuf_tag,
sc->pg_mbuf_map[i]);
sc->pg_mbuf_map[i] = NULL;
}
}
/* Destroy the page mbuf tag. */
if (sc->pg_mbuf_tag != NULL) {
bus_dma_tag_destroy(sc->pg_mbuf_tag);
sc->pg_mbuf_tag = NULL;
}
}
/* Destroy the parent tag */
if (sc->parent_tag != NULL) {
bus_dma_tag_destroy(sc->parent_tag);
sc->parent_tag = NULL;
}
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
}
/****************************************************************************/
/* Get DMA memory from the OS. */
/* */
/* Validates that the OS has provided DMA buffers in response to a */
/* bus_dmamap_load() call and saves the physical address of those buffers. */
/* When the callback is used the OS will return 0 for the mapping function */
/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
/* failures back to the caller. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *busaddr = arg;
KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!",
__FUNCTION__, nseg));
/* Simulate a mapping failure. */
DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control),
error = ENOMEM);
/* ToDo: How to increment debug sim_count variable here? */
/* Check for an error and signal the caller that an error occurred. */
if (error) {
*busaddr = 0;
} else {
*busaddr = segs->ds_addr;
}
}
/****************************************************************************/
/* Allocate any DMA memory needed by the driver. */
/* */
/* Allocates DMA memory needed for the various global structures needed by */
/* hardware. */
/* */
/* Memory alignment requirements: */
/* +-----------------+----------+----------+----------+----------+ */
/* | | 5706 | 5708 | 5709 | 5716 | */
/* +-----------------+----------+----------+----------+----------+ */
/* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
/* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
/* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
/* |PG Buffers | none | none | none | none | */
/* |TX Buffers | none | none | none | none | */
/* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
/* |Context Memory | | | | | */
/* +-----------------+----------+----------+----------+----------+ */
/* */
/* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_dma_alloc(device_t dev)
{
struct bce_softc *sc;
int i, error, rc = 0;
bus_size_t max_size, max_seg_size;
int max_segments;
sc = device_get_softc(dev);
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
/*
* Allocate the parent bus DMA tag appropriate for PCI.
*/
if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY,
sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
&sc->parent_tag)) {
BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
__FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
/*
* Create a DMA tag for the status block, allocate and clear the
* memory, map the memory into DMA space, and fetch the physical
* address of the block.
*/
if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
0, NULL, NULL, &sc->status_tag)) {
BCE_PRINTF("%s(%d): Could not allocate status block "
"DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->status_map)) {
BCE_PRINTF("%s(%d): Could not allocate status block "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
error = bus_dmamap_load(sc->status_tag, sc->status_map,
sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr,
&sc->status_block_paddr, BUS_DMA_NOWAIT);
if (error || sc->status_block_paddr == 0) {
BCE_PRINTF("%s(%d): Could not map status block "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n",
__FUNCTION__, (uintmax_t) sc->status_block_paddr);
/*
* Create a DMA tag for the statistics block, allocate and clear the
* memory, map the memory into DMA space, and fetch the physical
* address of the block.
*/
if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
0, NULL, NULL, &sc->stats_tag)) {
BCE_PRINTF("%s(%d): Could not allocate statistics block "
"DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) {
BCE_PRINTF("%s(%d): Could not allocate statistics block "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
error = bus_dmamap_load(sc->stats_tag, sc->stats_map,
sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr,
&sc->stats_block_paddr, BUS_DMA_NOWAIT);
if (error || sc->stats_block_paddr == 0) {
BCE_PRINTF("%s(%d): Could not map statistics block "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n",
__FUNCTION__, (uintmax_t) sc->stats_block_paddr);
/* BCM5709 uses host memory as cache for context memory. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
if (sc->ctx_pages == 0)
sc->ctx_pages = 1;
DBRUNIF((sc->ctx_pages > 512),
BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
__FILE__, __LINE__, sc->ctx_pages));
/*
* Create a DMA tag for the context pages,
* allocate and clear the memory, map the
* memory into DMA space, and fetch the
* physical address of the block.
*/
if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR,
NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
0, NULL, NULL, &sc->ctx_tag)) {
BCE_PRINTF("%s(%d): Could not allocate CTX "
"DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
for (i = 0; i < sc->ctx_pages; i++) {
if(bus_dmamem_alloc(sc->ctx_tag,
(void **)&sc->ctx_block[i],
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->ctx_map[i])) {
BCE_PRINTF("%s(%d): Could not allocate CTX "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr,
&sc->ctx_paddr[i], BUS_DMA_NOWAIT);
if (error || sc->ctx_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map CTX "
"DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] "
"= 0x%jX\n", __FUNCTION__, i,
(uintmax_t) sc->ctx_paddr[i]);
}
}
/*
* Create a DMA tag for the TX buffer descriptor chain,
* allocate and clear the memory, and fetch the
* physical address of the block.
*/
if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0,
NULL, NULL, &sc->tx_bd_chain_tag)) {
BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
"chain DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
for (i = 0; i < sc->tx_pages; i++) {
if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
(void **)&sc->tx_bd_chain[i],
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->tx_bd_chain_map[i])) {
BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
error = bus_dmamap_load(sc->tx_bd_chain_tag,
sc->tx_bd_chain_map[i], sc->tx_bd_chain[i],
BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr,
&sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
if (error || sc->tx_bd_chain_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map TX descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = "
"0x%jX\n", __FUNCTION__, i,
(uintmax_t) sc->tx_bd_chain_paddr[i]);
}
/* Check the required size before mapping to conserve resources. */
if (bce_tso_enable) {
max_size = BCE_TSO_MAX_SIZE;
max_segments = BCE_MAX_SEGMENTS;
max_seg_size = BCE_TSO_MAX_SEG_SIZE;
} else {
max_size = MCLBYTES * BCE_MAX_SEGMENTS;
max_segments = BCE_MAX_SEGMENTS;
max_seg_size = MCLBYTES;
}
/* Create a DMA tag for TX mbufs. */
if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size,
max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) {
BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
__FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
/* Create DMA maps for the TX mbufs clusters. */
for (i = 0; i < TOTAL_TX_BD_ALLOC; i++) {
if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
&sc->tx_mbuf_map[i])) {
BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA "
"map!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
}
/*
* Create a DMA tag for the RX buffer descriptor chain,
* allocate and clear the memory, and fetch the physical
* address of the blocks.
*/
if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
sc->max_bus_addr, NULL, NULL,
BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
0, NULL, NULL, &sc->rx_bd_chain_tag)) {
BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
"DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
for (i = 0; i < sc->rx_pages; i++) {
if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
(void **)&sc->rx_bd_chain[i],
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->rx_bd_chain_map[i])) {
BCE_PRINTF("%s(%d): Could not allocate RX descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
error = bus_dmamap_load(sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i], sc->rx_bd_chain[i],
BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr,
&sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
if (error || sc->rx_bd_chain_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map RX descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = "
"0x%jX\n", __FUNCTION__, i,
(uintmax_t) sc->rx_bd_chain_paddr[i]);
}
/*
* Create a DMA tag for RX mbufs.
*/
if (bce_hdr_split == TRUE)
max_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
MCLBYTES : sc->rx_bd_mbuf_alloc_size);
else
max_size = MJUM9BYTES;
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag "
"(max size = 0x%jX)\n", __FUNCTION__, (uintmax_t)max_size);
if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN,
BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
max_size, 1, max_size, 0, NULL, NULL, &sc->rx_mbuf_tag)) {
BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
__FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
/* Create DMA maps for the RX mbuf clusters. */
for (i = 0; i < TOTAL_RX_BD_ALLOC; i++) {
if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
&sc->rx_mbuf_map[i])) {
BCE_PRINTF("%s(%d): Unable to create RX mbuf "
"DMA map!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
}
if (bce_hdr_split == TRUE) {
/*
* Create a DMA tag for the page buffer descriptor chain,
* allocate and clear the memory, and fetch the physical
* address of the blocks.
*/
if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr,
NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ,
0, NULL, NULL, &sc->pg_bd_chain_tag)) {
BCE_PRINTF("%s(%d): Could not allocate page descriptor "
"chain DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
for (i = 0; i < sc->pg_pages; i++) {
if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
(void **)&sc->pg_bd_chain[i],
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->pg_bd_chain_map[i])) {
BCE_PRINTF("%s(%d): Could not allocate page "
"descriptor chain DMA memory!\n",
__FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
error = bus_dmamap_load(sc->pg_bd_chain_tag,
sc->pg_bd_chain_map[i], sc->pg_bd_chain[i],
BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr,
&sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT);
if (error || sc->pg_bd_chain_paddr[i] == 0) {
BCE_PRINTF("%s(%d): Could not map page descriptor "
"chain DMA memory!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = "
"0x%jX\n", __FUNCTION__, i,
(uintmax_t) sc->pg_bd_chain_paddr[i]);
}
/*
* Create a DMA tag for page mbufs.
*/
if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
BCE_PRINTF("%s(%d): Could not allocate page mbuf "
"DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
/* Create DMA maps for the page mbuf clusters. */
for (i = 0; i < TOTAL_PG_BD_ALLOC; i++) {
if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
&sc->pg_mbuf_map[i])) {
BCE_PRINTF("%s(%d): Unable to create page mbuf "
"DMA map!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
}
}
bce_dma_alloc_exit:
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
return(rc);
}
/****************************************************************************/
/* Release all resources used by the driver. */
/* */
/* Releases all resources acquired by the driver including interrupts, */
/* interrupt handler, interfaces, mutexes, and DMA memory. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_release_resources(struct bce_softc *sc)
{
device_t dev;
DBENTER(BCE_VERBOSE_RESET);
dev = sc->bce_dev;
bce_dma_free(sc);
if (sc->bce_intrhand != NULL) {
DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
}
if (sc->bce_res_irq != NULL) {
DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
bus_release_resource(dev, SYS_RES_IRQ,
rman_get_rid(sc->bce_res_irq), sc->bce_res_irq);
}
if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
pci_release_msi(dev);
}
if (sc->bce_res_mem != NULL) {
DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
sc->bce_res_mem);
}
if (sc->bce_ifp != NULL) {
DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
if_free(sc->bce_ifp);
}
if (mtx_initialized(&sc->bce_mtx))
BCE_LOCK_DESTROY(sc);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Firmware synchronization. */
/* */
/* Before performing certain events such as a chip reset, synchronize with */
/* the firmware first. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_fw_sync(struct bce_softc *sc, u32 msg_data)
{
int i, rc = 0;
u32 val;
DBENTER(BCE_VERBOSE_RESET);
/* Don't waste any time if we've timed out before. */
if (sc->bce_fw_timed_out == TRUE) {
rc = EBUSY;
goto bce_fw_sync_exit;
}
/* Increment the message sequence number. */
sc->bce_fw_wr_seq++;
msg_data |= sc->bce_fw_wr_seq;
DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = "
"0x%08X\n", msg_data);
/* Send the message to the bootcode driver mailbox. */
bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
/* Wait for the bootcode to acknowledge the message. */
for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
/* Check for a response in the bootcode firmware mailbox. */
val = bce_shmem_rd(sc, BCE_FW_MB);
if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
break;
DELAY(1000);
}
/* If we've timed out, tell bootcode that we've stopped waiting. */
if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
"msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
msg_data &= ~BCE_DRV_MSG_CODE;
msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
sc->bce_fw_timed_out = TRUE;
rc = EBUSY;
}
bce_fw_sync_exit:
DBEXIT(BCE_VERBOSE_RESET);
return (rc);
}
/****************************************************************************/
/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_load_rv2p_fw(struct bce_softc *sc, const u32 *rv2p_code,
u32 rv2p_code_len, u32 rv2p_proc)
{
int i;
u32 val;
DBENTER(BCE_VERBOSE_RESET);
/* Set the page size used by RV2P. */
if (rv2p_proc == RV2P_PROC2) {
BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
}
for (i = 0; i < rv2p_code_len; i += 8) {
REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
rv2p_code++;
REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
rv2p_code++;
if (rv2p_proc == RV2P_PROC1) {
val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
}
else {
val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
}
}
/* Reset the processor, un-stall is done later. */
if (rv2p_proc == RV2P_PROC1) {
REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
}
else {
REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
}
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Load RISC processor firmware. */
/* */
/* Loads firmware from the file if_bcefw.h into the scratchpad memory */
/* associated with a particular processor. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
struct fw_info *fw)
{
u32 offset;
DBENTER(BCE_VERBOSE_RESET);
bce_halt_cpu(sc, cpu_reg);
/* Load the Text area. */
offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
if (fw->text) {
int j;
for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
REG_WR_IND(sc, offset, fw->text[j]);
}
}
/* Load the Data area. */
offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
if (fw->data) {
int j;
for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
REG_WR_IND(sc, offset, fw->data[j]);
}
}
/* Load the SBSS area. */
offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
if (fw->sbss) {
int j;
for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
REG_WR_IND(sc, offset, fw->sbss[j]);
}
}
/* Load the BSS area. */
offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
if (fw->bss) {
int j;
for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
REG_WR_IND(sc, offset, fw->bss[j]);
}
}
/* Load the Read-Only area. */
offset = cpu_reg->spad_base +
(fw->rodata_addr - cpu_reg->mips_view_base);
if (fw->rodata) {
int j;
for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
REG_WR_IND(sc, offset, fw->rodata[j]);
}
}
/* Clear the pre-fetch instruction and set the FW start address. */
REG_WR_IND(sc, cpu_reg->inst, 0);
REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Starts the RISC processor. */
/* */
/* Assumes the CPU starting address has already been set. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
{
u32 val;
DBENTER(BCE_VERBOSE_RESET);
/* Start the CPU. */
val = REG_RD_IND(sc, cpu_reg->mode);
val &= ~cpu_reg->mode_value_halt;
REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
REG_WR_IND(sc, cpu_reg->mode, val);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Halts the RISC processor. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
{
u32 val;
DBENTER(BCE_VERBOSE_RESET);
/* Halt the CPU. */
val = REG_RD_IND(sc, cpu_reg->mode);
val |= cpu_reg->mode_value_halt;
REG_WR_IND(sc, cpu_reg->mode, val);
REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the RX CPU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_start_rxp_cpu(struct bce_softc *sc)
{
struct cpu_reg cpu_reg;
DBENTER(BCE_VERBOSE_RESET);
cpu_reg.mode = BCE_RXP_CPU_MODE;
cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
cpu_reg.state = BCE_RXP_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BCE_RXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n");
bce_start_cpu(sc, &cpu_reg);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the RX CPU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_rxp_cpu(struct bce_softc *sc)
{
struct cpu_reg cpu_reg;
struct fw_info fw;
DBENTER(BCE_VERBOSE_RESET);
cpu_reg.mode = BCE_RXP_CPU_MODE;
cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
cpu_reg.state = BCE_RXP_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BCE_RXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
fw.ver_major = bce_RXP_b09FwReleaseMajor;
fw.ver_minor = bce_RXP_b09FwReleaseMinor;
fw.ver_fix = bce_RXP_b09FwReleaseFix;
fw.start_addr = bce_RXP_b09FwStartAddr;
fw.text_addr = bce_RXP_b09FwTextAddr;
fw.text_len = bce_RXP_b09FwTextLen;
fw.text_index = 0;
fw.text = bce_RXP_b09FwText;
fw.data_addr = bce_RXP_b09FwDataAddr;
fw.data_len = bce_RXP_b09FwDataLen;
fw.data_index = 0;
fw.data = bce_RXP_b09FwData;
fw.sbss_addr = bce_RXP_b09FwSbssAddr;
fw.sbss_len = bce_RXP_b09FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_RXP_b09FwSbss;
fw.bss_addr = bce_RXP_b09FwBssAddr;
fw.bss_len = bce_RXP_b09FwBssLen;
fw.bss_index = 0;
fw.bss = bce_RXP_b09FwBss;
fw.rodata_addr = bce_RXP_b09FwRodataAddr;
fw.rodata_len = bce_RXP_b09FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_RXP_b09FwRodata;
} else {
fw.ver_major = bce_RXP_b06FwReleaseMajor;
fw.ver_minor = bce_RXP_b06FwReleaseMinor;
fw.ver_fix = bce_RXP_b06FwReleaseFix;
fw.start_addr = bce_RXP_b06FwStartAddr;
fw.text_addr = bce_RXP_b06FwTextAddr;
fw.text_len = bce_RXP_b06FwTextLen;
fw.text_index = 0;
fw.text = bce_RXP_b06FwText;
fw.data_addr = bce_RXP_b06FwDataAddr;
fw.data_len = bce_RXP_b06FwDataLen;
fw.data_index = 0;
fw.data = bce_RXP_b06FwData;
fw.sbss_addr = bce_RXP_b06FwSbssAddr;
fw.sbss_len = bce_RXP_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_RXP_b06FwSbss;
fw.bss_addr = bce_RXP_b06FwBssAddr;
fw.bss_len = bce_RXP_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bce_RXP_b06FwBss;
fw.rodata_addr = bce_RXP_b06FwRodataAddr;
fw.rodata_len = bce_RXP_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_RXP_b06FwRodata;
}
DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
bce_load_cpu_fw(sc, &cpu_reg, &fw);
/* Delay RXP start until initialization is complete. */
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the TX CPU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_txp_cpu(struct bce_softc *sc)
{
struct cpu_reg cpu_reg;
struct fw_info fw;
DBENTER(BCE_VERBOSE_RESET);
cpu_reg.mode = BCE_TXP_CPU_MODE;
cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
cpu_reg.state = BCE_TXP_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BCE_TXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
fw.ver_major = bce_TXP_b09FwReleaseMajor;
fw.ver_minor = bce_TXP_b09FwReleaseMinor;
fw.ver_fix = bce_TXP_b09FwReleaseFix;
fw.start_addr = bce_TXP_b09FwStartAddr;
fw.text_addr = bce_TXP_b09FwTextAddr;
fw.text_len = bce_TXP_b09FwTextLen;
fw.text_index = 0;
fw.text = bce_TXP_b09FwText;
fw.data_addr = bce_TXP_b09FwDataAddr;
fw.data_len = bce_TXP_b09FwDataLen;
fw.data_index = 0;
fw.data = bce_TXP_b09FwData;
fw.sbss_addr = bce_TXP_b09FwSbssAddr;
fw.sbss_len = bce_TXP_b09FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_TXP_b09FwSbss;
fw.bss_addr = bce_TXP_b09FwBssAddr;
fw.bss_len = bce_TXP_b09FwBssLen;
fw.bss_index = 0;
fw.bss = bce_TXP_b09FwBss;
fw.rodata_addr = bce_TXP_b09FwRodataAddr;
fw.rodata_len = bce_TXP_b09FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_TXP_b09FwRodata;
} else {
fw.ver_major = bce_TXP_b06FwReleaseMajor;
fw.ver_minor = bce_TXP_b06FwReleaseMinor;
fw.ver_fix = bce_TXP_b06FwReleaseFix;
fw.start_addr = bce_TXP_b06FwStartAddr;
fw.text_addr = bce_TXP_b06FwTextAddr;
fw.text_len = bce_TXP_b06FwTextLen;
fw.text_index = 0;
fw.text = bce_TXP_b06FwText;
fw.data_addr = bce_TXP_b06FwDataAddr;
fw.data_len = bce_TXP_b06FwDataLen;
fw.data_index = 0;
fw.data = bce_TXP_b06FwData;
fw.sbss_addr = bce_TXP_b06FwSbssAddr;
fw.sbss_len = bce_TXP_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_TXP_b06FwSbss;
fw.bss_addr = bce_TXP_b06FwBssAddr;
fw.bss_len = bce_TXP_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bce_TXP_b06FwBss;
fw.rodata_addr = bce_TXP_b06FwRodataAddr;
fw.rodata_len = bce_TXP_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_TXP_b06FwRodata;
}
DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
bce_load_cpu_fw(sc, &cpu_reg, &fw);
bce_start_cpu(sc, &cpu_reg);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the TPAT CPU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_tpat_cpu(struct bce_softc *sc)
{
struct cpu_reg cpu_reg;
struct fw_info fw;
DBENTER(BCE_VERBOSE_RESET);
cpu_reg.mode = BCE_TPAT_CPU_MODE;
cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
cpu_reg.state = BCE_TPAT_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BCE_TPAT_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
fw.ver_major = bce_TPAT_b09FwReleaseMajor;
fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
fw.ver_fix = bce_TPAT_b09FwReleaseFix;
fw.start_addr = bce_TPAT_b09FwStartAddr;
fw.text_addr = bce_TPAT_b09FwTextAddr;
fw.text_len = bce_TPAT_b09FwTextLen;
fw.text_index = 0;
fw.text = bce_TPAT_b09FwText;
fw.data_addr = bce_TPAT_b09FwDataAddr;
fw.data_len = bce_TPAT_b09FwDataLen;
fw.data_index = 0;
fw.data = bce_TPAT_b09FwData;
fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
fw.sbss_len = bce_TPAT_b09FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_TPAT_b09FwSbss;
fw.bss_addr = bce_TPAT_b09FwBssAddr;
fw.bss_len = bce_TPAT_b09FwBssLen;
fw.bss_index = 0;
fw.bss = bce_TPAT_b09FwBss;
fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
fw.rodata_len = bce_TPAT_b09FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_TPAT_b09FwRodata;
} else {
fw.ver_major = bce_TPAT_b06FwReleaseMajor;
fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
fw.ver_fix = bce_TPAT_b06FwReleaseFix;
fw.start_addr = bce_TPAT_b06FwStartAddr;
fw.text_addr = bce_TPAT_b06FwTextAddr;
fw.text_len = bce_TPAT_b06FwTextLen;
fw.text_index = 0;
fw.text = bce_TPAT_b06FwText;
fw.data_addr = bce_TPAT_b06FwDataAddr;
fw.data_len = bce_TPAT_b06FwDataLen;
fw.data_index = 0;
fw.data = bce_TPAT_b06FwData;
fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
fw.sbss_len = bce_TPAT_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_TPAT_b06FwSbss;
fw.bss_addr = bce_TPAT_b06FwBssAddr;
fw.bss_len = bce_TPAT_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bce_TPAT_b06FwBss;
fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
fw.rodata_len = bce_TPAT_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_TPAT_b06FwRodata;
}
DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
bce_load_cpu_fw(sc, &cpu_reg, &fw);
bce_start_cpu(sc, &cpu_reg);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the CP CPU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_cp_cpu(struct bce_softc *sc)
{
struct cpu_reg cpu_reg;
struct fw_info fw;
DBENTER(BCE_VERBOSE_RESET);
cpu_reg.mode = BCE_CP_CPU_MODE;
cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
cpu_reg.state = BCE_CP_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BCE_CP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
fw.ver_major = bce_CP_b09FwReleaseMajor;
fw.ver_minor = bce_CP_b09FwReleaseMinor;
fw.ver_fix = bce_CP_b09FwReleaseFix;
fw.start_addr = bce_CP_b09FwStartAddr;
fw.text_addr = bce_CP_b09FwTextAddr;
fw.text_len = bce_CP_b09FwTextLen;
fw.text_index = 0;
fw.text = bce_CP_b09FwText;
fw.data_addr = bce_CP_b09FwDataAddr;
fw.data_len = bce_CP_b09FwDataLen;
fw.data_index = 0;
fw.data = bce_CP_b09FwData;
fw.sbss_addr = bce_CP_b09FwSbssAddr;
fw.sbss_len = bce_CP_b09FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_CP_b09FwSbss;
fw.bss_addr = bce_CP_b09FwBssAddr;
fw.bss_len = bce_CP_b09FwBssLen;
fw.bss_index = 0;
fw.bss = bce_CP_b09FwBss;
fw.rodata_addr = bce_CP_b09FwRodataAddr;
fw.rodata_len = bce_CP_b09FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_CP_b09FwRodata;
} else {
fw.ver_major = bce_CP_b06FwReleaseMajor;
fw.ver_minor = bce_CP_b06FwReleaseMinor;
fw.ver_fix = bce_CP_b06FwReleaseFix;
fw.start_addr = bce_CP_b06FwStartAddr;
fw.text_addr = bce_CP_b06FwTextAddr;
fw.text_len = bce_CP_b06FwTextLen;
fw.text_index = 0;
fw.text = bce_CP_b06FwText;
fw.data_addr = bce_CP_b06FwDataAddr;
fw.data_len = bce_CP_b06FwDataLen;
fw.data_index = 0;
fw.data = bce_CP_b06FwData;
fw.sbss_addr = bce_CP_b06FwSbssAddr;
fw.sbss_len = bce_CP_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_CP_b06FwSbss;
fw.bss_addr = bce_CP_b06FwBssAddr;
fw.bss_len = bce_CP_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bce_CP_b06FwBss;
fw.rodata_addr = bce_CP_b06FwRodataAddr;
fw.rodata_len = bce_CP_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_CP_b06FwRodata;
}
DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
bce_load_cpu_fw(sc, &cpu_reg, &fw);
bce_start_cpu(sc, &cpu_reg);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the COM CPU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_com_cpu(struct bce_softc *sc)
{
struct cpu_reg cpu_reg;
struct fw_info fw;
DBENTER(BCE_VERBOSE_RESET);
cpu_reg.mode = BCE_COM_CPU_MODE;
cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
cpu_reg.state = BCE_COM_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BCE_COM_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
fw.ver_major = bce_COM_b09FwReleaseMajor;
fw.ver_minor = bce_COM_b09FwReleaseMinor;
fw.ver_fix = bce_COM_b09FwReleaseFix;
fw.start_addr = bce_COM_b09FwStartAddr;
fw.text_addr = bce_COM_b09FwTextAddr;
fw.text_len = bce_COM_b09FwTextLen;
fw.text_index = 0;
fw.text = bce_COM_b09FwText;
fw.data_addr = bce_COM_b09FwDataAddr;
fw.data_len = bce_COM_b09FwDataLen;
fw.data_index = 0;
fw.data = bce_COM_b09FwData;
fw.sbss_addr = bce_COM_b09FwSbssAddr;
fw.sbss_len = bce_COM_b09FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_COM_b09FwSbss;
fw.bss_addr = bce_COM_b09FwBssAddr;
fw.bss_len = bce_COM_b09FwBssLen;
fw.bss_index = 0;
fw.bss = bce_COM_b09FwBss;
fw.rodata_addr = bce_COM_b09FwRodataAddr;
fw.rodata_len = bce_COM_b09FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_COM_b09FwRodata;
} else {
fw.ver_major = bce_COM_b06FwReleaseMajor;
fw.ver_minor = bce_COM_b06FwReleaseMinor;
fw.ver_fix = bce_COM_b06FwReleaseFix;
fw.start_addr = bce_COM_b06FwStartAddr;
fw.text_addr = bce_COM_b06FwTextAddr;
fw.text_len = bce_COM_b06FwTextLen;
fw.text_index = 0;
fw.text = bce_COM_b06FwText;
fw.data_addr = bce_COM_b06FwDataAddr;
fw.data_len = bce_COM_b06FwDataLen;
fw.data_index = 0;
fw.data = bce_COM_b06FwData;
fw.sbss_addr = bce_COM_b06FwSbssAddr;
fw.sbss_len = bce_COM_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bce_COM_b06FwSbss;
fw.bss_addr = bce_COM_b06FwBssAddr;
fw.bss_len = bce_COM_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bce_COM_b06FwBss;
fw.rodata_addr = bce_COM_b06FwRodataAddr;
fw.rodata_len = bce_COM_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bce_COM_b06FwRodata;
}
DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
bce_load_cpu_fw(sc, &cpu_reg, &fw);
bce_start_cpu(sc, &cpu_reg);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
/* */
/* Loads the firmware for each CPU and starts the CPU. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_cpus(struct bce_softc *sc)
{
DBENTER(BCE_VERBOSE_RESET);
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) {
bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
} else {
bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
}
} else {
bce_load_rv2p_fw(sc, bce_rv2p_proc1,
sizeof(bce_rv2p_proc1), RV2P_PROC1);
bce_load_rv2p_fw(sc, bce_rv2p_proc2,
sizeof(bce_rv2p_proc2), RV2P_PROC2);
}
bce_init_rxp_cpu(sc);
bce_init_txp_cpu(sc);
bce_init_tpat_cpu(sc);
bce_init_com_cpu(sc);
bce_init_cp_cpu(sc);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize context memory. */
/* */
/* Clears the memory associated with each Context ID (CID). */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static int
bce_init_ctx(struct bce_softc *sc)
{
u32 offset, val, vcid_addr;
int i, j, rc, retry_cnt;
rc = 0;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
retry_cnt = CTX_INIT_RETRY_COUNT;
DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
/*
* BCM5709 context memory may be cached
* in host memory so prepare the host memory
* for access.
*/
val = BCE_CTX_COMMAND_ENABLED |
BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
val |= (BCM_PAGE_BITS - 8) << 16;
REG_WR(sc, BCE_CTX_COMMAND, val);
/* Wait for mem init command to complete. */
for (i = 0; i < retry_cnt; i++) {
val = REG_RD(sc, BCE_CTX_COMMAND);
if (!(val & BCE_CTX_COMMAND_MEM_INIT))
break;
DELAY(2);
}
if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) {
BCE_PRINTF("%s(): Context memory initialization failed!\n",
__FUNCTION__);
rc = EBUSY;
goto init_ctx_fail;
}
for (i = 0; i < sc->ctx_pages; i++) {
/* Set the physical address of the context memory. */
REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
BCE_ADDR_HI(sc->ctx_paddr[i]));
REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
/* Verify the context memory write was successful. */
for (j = 0; j < retry_cnt; j++) {
val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
if ((val &
BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
break;
DELAY(5);
}
if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) {
BCE_PRINTF("%s(): Failed to initialize "
"context page %d!\n", __FUNCTION__, i);
rc = EBUSY;
goto init_ctx_fail;
}
}
} else {
DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
/*
* For the 5706/5708, context memory is local to
* the controller, so initialize the controller
* context memory.
*/
vcid_addr = GET_CID_ADDR(96);
while (vcid_addr) {
vcid_addr -= PHY_CTX_SIZE;
REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
CTX_WR(sc, 0x00, offset, 0);
}
REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
}
}
init_ctx_fail:
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
return (rc);
}
/****************************************************************************/
/* Fetch the permanent MAC address of the controller. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_get_mac_addr(struct bce_softc *sc)
{
u32 mac_lo = 0, mac_hi = 0;
DBENTER(BCE_VERBOSE_RESET);
/*
* The NetXtreme II bootcode populates various NIC
* power-on and runtime configuration items in a
* shared memory area. The factory configured MAC
* address is available from both NVRAM and the
* shared memory area so we'll read the value from
* shared memory for speed.
*/
mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
if ((mac_lo == 0) && (mac_hi == 0)) {
BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
__FILE__, __LINE__);
} else {
sc->eaddr[0] = (u_char)(mac_hi >> 8);
sc->eaddr[1] = (u_char)(mac_hi >> 0);
sc->eaddr[2] = (u_char)(mac_lo >> 24);
sc->eaddr[3] = (u_char)(mac_lo >> 16);
sc->eaddr[4] = (u_char)(mac_lo >> 8);
sc->eaddr[5] = (u_char)(mac_lo >> 0);
}
DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet "
"address = %6D\n", sc->eaddr, ":");
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Program the MAC address. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_set_mac_addr(struct bce_softc *sc)
{
u32 val;
u8 *mac_addr = sc->eaddr;
/* ToDo: Add support for setting multiple MAC addresses. */
DBENTER(BCE_VERBOSE_RESET);
DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = "
"%6D\n", sc->eaddr, ":");
val = (mac_addr[0] << 8) | mac_addr[1];
REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Stop the controller. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_stop(struct bce_softc *sc)
{
if_t ifp;
DBENTER(BCE_VERBOSE_RESET);
BCE_LOCK_ASSERT(sc);
ifp = sc->bce_ifp;
callout_stop(&sc->bce_tick_callout);
/* Disable the transmit/receive blocks. */
REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
DELAY(20);
bce_disable_intr(sc);
/* Free RX buffers. */
if (bce_hdr_split == TRUE) {
bce_free_pg_chain(sc);
}
bce_free_rx_chain(sc);
/* Free TX buffers. */
bce_free_tx_chain(sc);
sc->watchdog_timer = 0;
sc->bce_link_up = FALSE;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
DBEXIT(BCE_VERBOSE_RESET);
}
static int
bce_reset(struct bce_softc *sc, u32 reset_code)
{
u32 emac_mode_save, val;
int i, rc = 0;
static const u32 emac_mode_mask = BCE_EMAC_MODE_PORT |
BCE_EMAC_MODE_HALF_DUPLEX | BCE_EMAC_MODE_25G;
DBENTER(BCE_VERBOSE_RESET);
DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
__FUNCTION__, reset_code);
/*
* If ASF/IPMI is operational, then the EMAC Mode register already
* contains appropriate values for the link settings that have
* been auto-negotiated. Resetting the chip will clobber those
* values. Save the important bits so we can restore them after
* the reset.
*/
emac_mode_save = REG_RD(sc, BCE_EMAC_MODE) & emac_mode_mask;
/* Wait for pending PCI transactions to complete. */
REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
DELAY(5);
/* Disable DMA */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
}
/* Assume bootcode is running. */
sc->bce_fw_timed_out = FALSE;
sc->bce_drv_cardiac_arrest = FALSE;
/* Give the firmware a chance to prepare for the reset. */
rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
if (rc)
goto bce_reset_exit;
/* Set a firmware reminder that this is a soft reset. */
bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC);
/* Dummy read to force the chip to complete all current transactions. */
val = REG_RD(sc, BCE_MISC_ID);
/* Chip reset. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
REG_RD(sc, BCE_MISC_COMMAND);
DELAY(5);
val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
} else {
val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
/* Allow up to 30us for reset to complete. */
for (i = 0; i < 10; i++) {
val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
break;
}
DELAY(10);
}
/* Check that reset completed successfully. */
if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
BCE_PRINTF("%s(%d): Reset failed!\n",
__FILE__, __LINE__);
rc = EBUSY;
goto bce_reset_exit;
}
}
/* Make sure byte swapping is properly configured. */
val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
if (val != 0x01020304) {
BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
__FILE__, __LINE__);
rc = ENODEV;
goto bce_reset_exit;
}
/* Just completed a reset, assume that firmware is running again. */
sc->bce_fw_timed_out = FALSE;
sc->bce_drv_cardiac_arrest = FALSE;
/* Wait for the firmware to finish its initialization. */
rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
if (rc)
BCE_PRINTF("%s(%d): Firmware did not complete "
"initialization!\n", __FILE__, __LINE__);
/* Get firmware capabilities. */
bce_fw_cap_init(sc);
bce_reset_exit:
/* Restore EMAC Mode bits needed to keep ASF/IPMI running. */
if (reset_code == BCE_DRV_MSG_CODE_RESET) {
val = REG_RD(sc, BCE_EMAC_MODE);
val = (val & ~emac_mode_mask) | emac_mode_save;
REG_WR(sc, BCE_EMAC_MODE, val);
}
DBEXIT(BCE_VERBOSE_RESET);
return (rc);
}
static int
bce_chipinit(struct bce_softc *sc)
{
u32 val;
int rc = 0;
DBENTER(BCE_VERBOSE_RESET);
bce_disable_intr(sc);
/*
* Initialize DMA byte/word swapping, configure the number of DMA
* channels and PCI clock compensation delay.
*/
val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
BCE_DMA_CONFIG_DATA_WORD_SWAP |
#if BYTE_ORDER == BIG_ENDIAN
BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
#endif
BCE_DMA_CONFIG_CNTL_WORD_SWAP |
DMA_READ_CHANS << 12 |
DMA_WRITE_CHANS << 16;
val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
/*
* This setting resolves a problem observed on certain Intel PCI
* chipsets that cannot handle multiple outstanding DMA operations.
* See errata E9_5706A1_65.
*/
if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
(BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
!(sc->bce_flags & BCE_PCIX_FLAG))
val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
REG_WR(sc, BCE_DMA_CONFIG, val);
/* Enable the RX_V2P and Context state machines before access. */
REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
/* Initialize context mapping and zero out the quick contexts. */
if ((rc = bce_init_ctx(sc)) != 0)
goto bce_chipinit_exit;
/* Initialize the on-boards CPUs */
bce_init_cpus(sc);
/* Enable management frames (NC-SI) to flow to the MCP. */
if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
}
/* Prepare NVRAM for access. */
if ((rc = bce_init_nvram(sc)) != 0)
goto bce_chipinit_exit;
/* Set the kernel bypass block size */
val = REG_RD(sc, BCE_MQ_CONFIG);
val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
/* Enable bins used on the 5709. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
val |= BCE_MQ_CONFIG_HALT_DIS;
}
REG_WR(sc, BCE_MQ_CONFIG, val);
val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
/* Set the page size and clear the RV2P processor stall bits. */
val = (BCM_PAGE_BITS - 8) << 24;
REG_WR(sc, BCE_RV2P_CONFIG, val);
/* Configure page size. */
val = REG_RD(sc, BCE_TBDR_CONFIG);
val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
REG_WR(sc, BCE_TBDR_CONFIG, val);
/* Set the perfect match control register to default. */
REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
bce_chipinit_exit:
DBEXIT(BCE_VERBOSE_RESET);
return(rc);
}
/****************************************************************************/
/* Initialize the controller in preparation to send/receive traffic. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_blockinit(struct bce_softc *sc)
{
u32 reg, val;
int rc = 0;
DBENTER(BCE_VERBOSE_RESET);
/* Load the hardware default MAC address. */
bce_set_mac_addr(sc);
/* Set the Ethernet backoff seed value */
val = sc->eaddr[0] + (sc->eaddr[1] << 8) +
(sc->eaddr[2] << 16) + (sc->eaddr[3] ) +
(sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
sc->last_status_idx = 0;
sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
/* Set up link change interrupt generation. */
REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
/* Program the physical address of the status block. */
REG_WR(sc, BCE_HC_STATUS_ADDR_L,
BCE_ADDR_LO(sc->status_block_paddr));
REG_WR(sc, BCE_HC_STATUS_ADDR_H,
BCE_ADDR_HI(sc->status_block_paddr));
/* Program the physical address of the statistics block. */
REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
BCE_ADDR_LO(sc->stats_block_paddr));
REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
BCE_ADDR_HI(sc->stats_block_paddr));
/*
* Program various host coalescing parameters.
* Trip points control how many BDs should be ready before generating
* an interrupt while ticks control how long a BD can sit in the chain
* before generating an interrupt.
*/
REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
(sc->bce_tx_quick_cons_trip_int << 16) |
sc->bce_tx_quick_cons_trip);
REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
(sc->bce_rx_quick_cons_trip_int << 16) |
sc->bce_rx_quick_cons_trip);
REG_WR(sc, BCE_HC_TX_TICKS,
(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
REG_WR(sc, BCE_HC_RX_TICKS,
(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
REG_WR(sc, BCE_HC_STATS_TICKS, sc->bce_stats_ticks & 0xffff00);
REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
/* Not used for L2. */
REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 0);
REG_WR(sc, BCE_HC_COM_TICKS, 0);
REG_WR(sc, BCE_HC_CMD_TICKS, 0);
/* Configure the Host Coalescing block. */
val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
BCE_HC_CONFIG_COLLECT_STATS;
#if 0
/* ToDo: Add MSI-X support. */
if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
BCE_HC_SB_CONFIG_1;
REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
BCE_HC_SB_CONFIG_1_ONE_SHOT);
REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
(sc->tx_quick_cons_trip_int << 16) |
sc->tx_quick_cons_trip);
REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
(sc->tx_ticks_int << 16) | sc->tx_ticks);
val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
}
/*
* Tell the HC block to automatically set the
* INT_MASK bit after an MSI/MSI-X interrupt
* is generated so the driver doesn't have to.
*/
if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
val |= BCE_HC_CONFIG_ONE_SHOT;
/* Set the MSI-X status blocks to 128 byte boundaries. */
if (sc->bce_flags & BCE_USING_MSIX_FLAG)
val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
#endif
REG_WR(sc, BCE_HC_CONFIG, val);
/* Clear the internal statistics counters. */
REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
/* Verify that bootcode is running. */
reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control),
BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
__FILE__, __LINE__);
reg = 0);
if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
BCE_DEV_INFO_SIGNATURE_MAGIC) {
BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
"Expected: 08%08X\n", __FILE__, __LINE__,
(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
BCE_DEV_INFO_SIGNATURE_MAGIC);
rc = ENODEV;
goto bce_blockinit_exit;
}
/* Enable DMA */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
}
/* Allow bootcode to apply additional fixes before enabling MAC. */
rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 |
BCE_DRV_MSG_CODE_RESET);
/* Enable link state change interrupt generation. */
REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
/* Enable the RXP. */
bce_start_rxp_cpu(sc);
/* Disable management frames (NC-SI) from flowing to the MCP. */
if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
}
/* Enable all remaining blocks in the MAC. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
BCE_MISC_ENABLE_DEFAULT_XI);
else
REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
BCE_MISC_ENABLE_DEFAULT);
REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
DELAY(20);
/* Save the current host coalescing block settings. */
sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
bce_blockinit_exit:
DBEXIT(BCE_VERBOSE_RESET);
return (rc);
}
/****************************************************************************/
/* Encapsulate an mbuf into the rx_bd chain. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_get_rx_buf(struct bce_softc *sc, u16 prod, u16 chain_prod, u32 *prod_bseq)
{
bus_dma_segment_t segs[1];
struct mbuf *m_new = NULL;
struct rx_bd *rxbd;
int nsegs, error, rc = 0;
#ifdef BCE_DEBUG
u16 debug_chain_prod = chain_prod;
#endif
DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
/* Make sure the inputs are valid. */
DBRUNIF((chain_prod > MAX_RX_BD_ALLOC),
BCE_PRINTF("%s(%d): RX producer out of range: "
"0x%04X > 0x%04X\n", __FILE__, __LINE__,
chain_prod, (u16)MAX_RX_BD_ALLOC));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
"chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__,
prod, chain_prod, *prod_bseq);
/* Update some debug statistic counters */
DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
sc->rx_low_watermark = sc->free_rx_bd);
DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
sc->rx_empty_count++);
/* Simulate an mbuf allocation failure. */
DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
sc->mbuf_alloc_failed_count++;
sc->mbuf_alloc_failed_sim_count++;
rc = ENOBUFS;
goto bce_get_rx_buf_exit);
/* This is a new mbuf allocation. */
if (bce_hdr_split == TRUE)
MGETHDR(m_new, M_NOWAIT, MT_DATA);
else
m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
sc->rx_bd_mbuf_alloc_size);
if (m_new == NULL) {
sc->mbuf_alloc_failed_count++;
rc = ENOBUFS;
goto bce_get_rx_buf_exit;
}
DBRUN(sc->debug_rx_mbuf_alloc++);
/* Make sure we have a valid packet header. */
M_ASSERTPKTHDR(m_new);
/* Initialize the mbuf size and pad if necessary for alignment. */
m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
m_adj(m_new, sc->rx_bd_mbuf_align_pad);
/* ToDo: Consider calling m_fragment() to test error handling. */
/* Map the mbuf cluster into device memory. */
error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag,
sc->rx_mbuf_map[chain_prod], m_new, segs, &nsegs, BUS_DMA_NOWAIT);
/* Handle any mapping errors. */
if (error) {
BCE_PRINTF("%s(%d): Error mapping mbuf into RX "
"chain (%d)!\n", __FILE__, __LINE__, error);
sc->dma_map_addr_rx_failed_count++;
m_freem(m_new);
DBRUN(sc->debug_rx_mbuf_alloc--);
rc = ENOBUFS;
goto bce_get_rx_buf_exit;
}
/* All mbufs must map to a single segment. */
KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
__FUNCTION__, nsegs));
/* Setup the rx_bd for the segment. */
rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
rxbd->rx_bd_len = htole32(segs[0].ds_len);
rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
*prod_bseq += segs[0].ds_len;
/* Save the mbuf and update our counter. */
sc->rx_mbuf_ptr[chain_prod] = m_new;
sc->free_rx_bd -= nsegs;
DBRUNMSG(BCE_INSANE_RECV,
bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
"chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod,
chain_prod, *prod_bseq);
bce_get_rx_buf_exit:
DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
return(rc);
}
/****************************************************************************/
/* Encapsulate an mbuf cluster into the page chain. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_get_pg_buf(struct bce_softc *sc, u16 prod, u16 prod_idx)
{
bus_dma_segment_t segs[1];
struct mbuf *m_new = NULL;
struct rx_bd *pgbd;
int error, nsegs, rc = 0;
#ifdef BCE_DEBUG
u16 debug_prod_idx = prod_idx;
#endif
DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
/* Make sure the inputs are valid. */
DBRUNIF((prod_idx > MAX_PG_BD_ALLOC),
BCE_PRINTF("%s(%d): page producer out of range: "
"0x%04X > 0x%04X\n", __FILE__, __LINE__,
prod_idx, (u16)MAX_PG_BD_ALLOC));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
"chain_prod = 0x%04X\n", __FUNCTION__, prod, prod_idx);
/* Update counters if we've hit a new low or run out of pages. */
DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
sc->pg_low_watermark = sc->free_pg_bd);
DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
/* Simulate an mbuf allocation failure. */
DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
sc->mbuf_alloc_failed_count++;
sc->mbuf_alloc_failed_sim_count++;
rc = ENOBUFS;
goto bce_get_pg_buf_exit);
/* This is a new mbuf allocation. */
m_new = m_getcl(M_NOWAIT, MT_DATA, 0);
if (m_new == NULL) {
sc->mbuf_alloc_failed_count++;
rc = ENOBUFS;
goto bce_get_pg_buf_exit;
}
DBRUN(sc->debug_pg_mbuf_alloc++);
m_new->m_len = MCLBYTES;
/* ToDo: Consider calling m_fragment() to test error handling. */
/* Map the mbuf cluster into device memory. */
error = bus_dmamap_load_mbuf_sg(sc->pg_mbuf_tag,
sc->pg_mbuf_map[prod_idx], m_new, segs, &nsegs, BUS_DMA_NOWAIT);
/* Handle any mapping errors. */
if (error) {
BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
__FILE__, __LINE__);
m_freem(m_new);
DBRUN(sc->debug_pg_mbuf_alloc--);
rc = ENOBUFS;
goto bce_get_pg_buf_exit;
}
/* All mbufs must map to a single segment. */
KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
__FUNCTION__, nsegs));
/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
/*
* The page chain uses the same rx_bd data structure
* as the receive chain but doesn't require a byte sequence (bseq).
*/
pgbd = &sc->pg_bd_chain[PG_PAGE(prod_idx)][PG_IDX(prod_idx)];
pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
pgbd->rx_bd_len = htole32(MCLBYTES);
pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
/* Save the mbuf and update our counter. */
sc->pg_mbuf_ptr[prod_idx] = m_new;
sc->free_pg_bd--;
DBRUNMSG(BCE_INSANE_RECV,
bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
"prod_idx = 0x%04X\n", __FUNCTION__, prod, prod_idx);
bce_get_pg_buf_exit:
DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
return(rc);
}
/****************************************************************************/
/* Initialize the TX context memory. */
/* */
/* Returns: */
/* Nothing */
/****************************************************************************/
static void
bce_init_tx_context(struct bce_softc *sc)
{
u32 val;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
/* Initialize the context ID for an L2 TX chain. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
/* Set the CID type to support an L2 connection. */
val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI |
BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
CTX_WR(sc, GET_CID_ADDR(TX_CID),
BCE_L2CTX_TX_CMD_TYPE_XI, val);
/* Point the hardware to the first page in the chain. */
val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(TX_CID),
BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(TX_CID),
BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
} else {
/* Set the CID type to support an L2 connection. */
val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
/* Point the hardware to the first page in the chain. */
val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(TX_CID),
BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(TX_CID),
BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
}
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
}
/****************************************************************************/
/* Allocate memory and initialize the TX data structures. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_init_tx_chain(struct bce_softc *sc)
{
struct tx_bd *txbd;
int i, rc = 0;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
/* Set the initial TX producer/consumer indices. */
sc->tx_prod = 0;
sc->tx_cons = 0;
sc->tx_prod_bseq = 0;
sc->used_tx_bd = 0;
sc->max_tx_bd = USABLE_TX_BD_ALLOC;
DBRUN(sc->tx_hi_watermark = 0);
DBRUN(sc->tx_full_count = 0);
/*
* The NetXtreme II supports a linked-list structure called
* a Buffer Descriptor Chain (or BD chain). A BD chain
* consists of a series of 1 or more chain pages, each of which
* consists of a fixed number of BD entries.
* The last BD entry on each page is a pointer to the next page
* in the chain, and the last pointer in the BD chain
* points back to the beginning of the chain.
*/
/* Set the TX next pointer chain entries. */
for (i = 0; i < sc->tx_pages; i++) {
int j;
txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
/* Check if we've reached the last page. */
if (i == (sc->tx_pages - 1))
j = 0;
else
j = i + 1;
txbd->tx_bd_haddr_hi =
htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
txbd->tx_bd_haddr_lo =
htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
}
bce_init_tx_context(sc);
DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
return(rc);
}
/****************************************************************************/
/* Free memory and clear the TX data structures. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_free_tx_chain(struct bce_softc *sc)
{
int i;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
for (i = 0; i < MAX_TX_BD_AVAIL; i++) {
if (sc->tx_mbuf_ptr[i] != NULL) {
if (sc->tx_mbuf_map[i] != NULL)
bus_dmamap_sync(sc->tx_mbuf_tag,
sc->tx_mbuf_map[i],
BUS_DMASYNC_POSTWRITE);
m_freem(sc->tx_mbuf_ptr[i]);
sc->tx_mbuf_ptr[i] = NULL;
DBRUN(sc->debug_tx_mbuf_alloc--);
}
}
/* Clear each TX chain page. */
for (i = 0; i < sc->tx_pages; i++)
bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
sc->used_tx_bd = 0;
/* Check if we lost any mbufs in the process. */
DBRUNIF((sc->debug_tx_mbuf_alloc),
BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
"from tx chain!\n", __FILE__, __LINE__,
sc->debug_tx_mbuf_alloc));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
}
/****************************************************************************/
/* Initialize the RX context memory. */
/* */
/* Returns: */
/* Nothing */
/****************************************************************************/
static void
bce_init_rx_context(struct bce_softc *sc)
{
u32 val;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
/* Init the type, size, and BD cache levels for the RX context. */
val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
(0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
/*
* Set the level for generating pause frames
* when the number of available rx_bd's gets
* too low (the low watermark) and the level
* when pause frames can be stopped (the high
* watermark).
*/
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
u32 lo_water, hi_water;
if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) {
lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
} else {
lo_water = 0;
}
if (lo_water >= USABLE_RX_BD_ALLOC) {
lo_water = 0;
}
hi_water = USABLE_RX_BD_ALLOC / 4;
if (hi_water <= lo_water) {
lo_water = 0;
}
lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
if (hi_water > 0xf)
hi_water = 0xf;
else if (hi_water == 0)
lo_water = 0;
val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
(hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
}
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
val = REG_RD(sc, BCE_MQ_MAP_L2_5);
REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
}
/* Point the hardware to the first page in the chain. */
val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
}
/****************************************************************************/
/* Allocate memory and initialize the RX data structures. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_init_rx_chain(struct bce_softc *sc)
{
struct rx_bd *rxbd;
int i, rc = 0;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
/* Initialize the RX producer and consumer indices. */
sc->rx_prod = 0;
sc->rx_cons = 0;
sc->rx_prod_bseq = 0;
sc->free_rx_bd = USABLE_RX_BD_ALLOC;
sc->max_rx_bd = USABLE_RX_BD_ALLOC;
/* Initialize the RX next pointer chain entries. */
for (i = 0; i < sc->rx_pages; i++) {
int j;
rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
/* Check if we've reached the last page. */
if (i == (sc->rx_pages - 1))
j = 0;
else
j = i + 1;
/* Setup the chain page pointers. */
rxbd->rx_bd_haddr_hi =
htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
rxbd->rx_bd_haddr_lo =
htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
}
/* Fill up the RX chain. */
bce_fill_rx_chain(sc);
DBRUN(sc->rx_low_watermark = USABLE_RX_BD_ALLOC);
DBRUN(sc->rx_empty_count = 0);
for (i = 0; i < sc->rx_pages; i++) {
bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
bce_init_rx_context(sc);
DBRUNMSG(BCE_EXTREME_RECV,
bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
/* ToDo: Are there possible failure modes here? */
return(rc);
}
/****************************************************************************/
/* Add mbufs to the RX chain until its full or an mbuf allocation error */
/* occurs. */
/* */
/* Returns: */
/* Nothing */
/****************************************************************************/
static void
bce_fill_rx_chain(struct bce_softc *sc)
{
u16 prod, prod_idx;
u32 prod_bseq;
DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
/* Get the RX chain producer indices. */
prod = sc->rx_prod;
prod_bseq = sc->rx_prod_bseq;
/* Keep filling the RX chain until it's full. */
while (sc->free_rx_bd > 0) {
prod_idx = RX_CHAIN_IDX(prod);
if (bce_get_rx_buf(sc, prod, prod_idx, &prod_bseq)) {
/* Bail out if we can't add an mbuf to the chain. */
break;
}
prod = NEXT_RX_BD(prod);
}
/* Save the RX chain producer indices. */
sc->rx_prod = prod;
sc->rx_prod_bseq = prod_bseq;
/* We should never end up pointing to a next page pointer. */
DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
__FUNCTION__, rx_prod));
/* Write the mailbox and tell the chip about the waiting rx_bd's. */
REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, prod);
REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, prod_bseq);
DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
}
/****************************************************************************/
/* Free memory and clear the RX data structures. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_free_rx_chain(struct bce_softc *sc)
{
int i;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
/* Free any mbufs still in the RX mbuf chain. */
for (i = 0; i < MAX_RX_BD_AVAIL; i++) {
if (sc->rx_mbuf_ptr[i] != NULL) {
if (sc->rx_mbuf_map[i] != NULL)
bus_dmamap_sync(sc->rx_mbuf_tag,
sc->rx_mbuf_map[i],
BUS_DMASYNC_POSTREAD);
m_freem(sc->rx_mbuf_ptr[i]);
sc->rx_mbuf_ptr[i] = NULL;
DBRUN(sc->debug_rx_mbuf_alloc--);
}
}
/* Clear each RX chain page. */
for (i = 0; i < sc->rx_pages; i++)
if (sc->rx_bd_chain[i] != NULL)
bzero((char *)sc->rx_bd_chain[i],
BCE_RX_CHAIN_PAGE_SZ);
sc->free_rx_bd = sc->max_rx_bd;
/* Check if we lost any mbufs in the process. */
DBRUNIF((sc->debug_rx_mbuf_alloc),
BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
__FUNCTION__, sc->debug_rx_mbuf_alloc));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
}
/****************************************************************************/
/* Allocate memory and initialize the page data structures. */
/* Assumes that bce_init_rx_chain() has not already been called. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_init_pg_chain(struct bce_softc *sc)
{
struct rx_bd *pgbd;
int i, rc = 0;
u32 val;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
/* Initialize the page producer and consumer indices. */
sc->pg_prod = 0;
sc->pg_cons = 0;
sc->free_pg_bd = USABLE_PG_BD_ALLOC;
sc->max_pg_bd = USABLE_PG_BD_ALLOC;
DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
DBRUN(sc->pg_empty_count = 0);
/* Initialize the page next pointer chain entries. */
for (i = 0; i < sc->pg_pages; i++) {
int j;
pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
/* Check if we've reached the last page. */
if (i == (sc->pg_pages - 1))
j = 0;
else
j = i + 1;
/* Setup the chain page pointers. */
pgbd->rx_bd_haddr_hi =
htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
pgbd->rx_bd_haddr_lo =
htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
}
/* Setup the MQ BIN mapping for host_pg_bidx. */
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
/* Configure the rx_bd and page chain mbuf cluster size. */
val = (sc->rx_bd_mbuf_data_len << 16) | MCLBYTES;
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
/* Configure the context reserved for jumbo support. */
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
BCE_L2CTX_RX_RBDC_JUMBO_KEY);
/* Point the hardware to the first page in the page chain. */
val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
/* Fill up the page chain. */
bce_fill_pg_chain(sc);
for (i = 0; i < sc->pg_pages; i++) {
bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i],
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
DBRUNMSG(BCE_EXTREME_RECV,
bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
return(rc);
}
/****************************************************************************/
/* Add mbufs to the page chain until its full or an mbuf allocation error */
/* occurs. */
/* */
/* Returns: */
/* Nothing */
/****************************************************************************/
static void
bce_fill_pg_chain(struct bce_softc *sc)
{
u16 prod, prod_idx;
DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
/* Get the page chain prodcuer index. */
prod = sc->pg_prod;
/* Keep filling the page chain until it's full. */
while (sc->free_pg_bd > 0) {
prod_idx = PG_CHAIN_IDX(prod);
if (bce_get_pg_buf(sc, prod, prod_idx)) {
/* Bail out if we can't add an mbuf to the chain. */
break;
}
prod = NEXT_PG_BD(prod);
}
/* Save the page chain producer index. */
sc->pg_prod = prod;
DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
__FUNCTION__, pg_prod));
/*
* Write the mailbox and tell the chip about
* the new rx_bd's in the page chain.
*/
REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX,
prod);
DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
}
/****************************************************************************/
/* Free memory and clear the RX data structures. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_free_pg_chain(struct bce_softc *sc)
{
int i;
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
/* Free any mbufs still in the mbuf page chain. */
for (i = 0; i < MAX_PG_BD_AVAIL; i++) {
if (sc->pg_mbuf_ptr[i] != NULL) {
if (sc->pg_mbuf_map[i] != NULL)
bus_dmamap_sync(sc->pg_mbuf_tag,
sc->pg_mbuf_map[i],
BUS_DMASYNC_POSTREAD);
m_freem(sc->pg_mbuf_ptr[i]);
sc->pg_mbuf_ptr[i] = NULL;
DBRUN(sc->debug_pg_mbuf_alloc--);
}
}
/* Clear each page chain pages. */
for (i = 0; i < sc->pg_pages; i++)
bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
sc->free_pg_bd = sc->max_pg_bd;
/* Check if we lost any mbufs in the process. */
DBRUNIF((sc->debug_pg_mbuf_alloc),
BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
__FUNCTION__, sc->debug_pg_mbuf_alloc));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
}
static u32
bce_get_rphy_link(struct bce_softc *sc)
{
u32 advertise, link;
int fdpx;
advertise = 0;
fdpx = 0;
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0)
link = bce_shmem_rd(sc, BCE_RPHY_SERDES_LINK);
else
link = bce_shmem_rd(sc, BCE_RPHY_COPPER_LINK);
if (link & BCE_NETLINK_ANEG_ENB)
advertise |= BCE_NETLINK_ANEG_ENB;
if (link & BCE_NETLINK_SPEED_10HALF)
advertise |= BCE_NETLINK_SPEED_10HALF;
if (link & BCE_NETLINK_SPEED_10FULL) {
advertise |= BCE_NETLINK_SPEED_10FULL;
fdpx++;
}
if (link & BCE_NETLINK_SPEED_100HALF)
advertise |= BCE_NETLINK_SPEED_100HALF;
if (link & BCE_NETLINK_SPEED_100FULL) {
advertise |= BCE_NETLINK_SPEED_100FULL;
fdpx++;
}
if (link & BCE_NETLINK_SPEED_1000HALF)
advertise |= BCE_NETLINK_SPEED_1000HALF;
if (link & BCE_NETLINK_SPEED_1000FULL) {
advertise |= BCE_NETLINK_SPEED_1000FULL;
fdpx++;
}
if (link & BCE_NETLINK_SPEED_2500HALF)
advertise |= BCE_NETLINK_SPEED_2500HALF;
if (link & BCE_NETLINK_SPEED_2500FULL) {
advertise |= BCE_NETLINK_SPEED_2500FULL;
fdpx++;
}
if (fdpx)
advertise |= BCE_NETLINK_FC_PAUSE_SYM |
BCE_NETLINK_FC_PAUSE_ASYM;
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0)
advertise |= BCE_NETLINK_PHY_APP_REMOTE |
BCE_NETLINK_ETH_AT_WIRESPEED;
return (advertise);
}
/****************************************************************************/
/* Set media options. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_ifmedia_upd(if_t ifp)
{
struct bce_softc *sc = if_getsoftc(ifp);
int error;
DBENTER(BCE_VERBOSE);
BCE_LOCK(sc);
error = bce_ifmedia_upd_locked(ifp);
BCE_UNLOCK(sc);
DBEXIT(BCE_VERBOSE);
return (error);
}
/****************************************************************************/
/* Set media options. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static int
bce_ifmedia_upd_locked(if_t ifp)
{
struct bce_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
struct mii_softc *miisc;
struct ifmedia *ifm;
u32 link;
int error, fdx;
DBENTER(BCE_VERBOSE_PHY);
error = 0;
BCE_LOCK_ASSERT(sc);
sc->bce_link_up = FALSE;
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) {
ifm = &sc->bce_ifmedia;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
link = 0;
fdx = IFM_OPTIONS(ifm->ifm_media) & IFM_FDX;
switch(IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
/*
* Check advertised link of remote PHY by reading
* BCE_RPHY_SERDES_LINK or BCE_RPHY_COPPER_LINK.
* Always use the same link type of remote PHY.
*/
link = bce_get_rphy_link(sc);
break;
case IFM_2500_SX:
if ((sc->bce_phy_flags &
(BCE_PHY_REMOTE_PORT_FIBER_FLAG |
BCE_PHY_2_5G_CAPABLE_FLAG)) == 0)
return (EINVAL);
/*
* XXX
* Have to enable forced 2.5Gbps configuration.
*/
if (fdx != 0)
link |= BCE_NETLINK_SPEED_2500FULL;
else
link |= BCE_NETLINK_SPEED_2500HALF;
break;
case IFM_1000_SX:
if ((sc->bce_phy_flags &
BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0)
return (EINVAL);
/*
* XXX
* Have to disable 2.5Gbps configuration.
*/
if (fdx != 0)
link = BCE_NETLINK_SPEED_1000FULL;
else
link = BCE_NETLINK_SPEED_1000HALF;
break;
case IFM_1000_T:
if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG)
return (EINVAL);
if (fdx != 0)
link = BCE_NETLINK_SPEED_1000FULL;
else
link = BCE_NETLINK_SPEED_1000HALF;
break;
case IFM_100_TX:
if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG)
return (EINVAL);
if (fdx != 0)
link = BCE_NETLINK_SPEED_100FULL;
else
link = BCE_NETLINK_SPEED_100HALF;
break;
case IFM_10_T:
if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG)
return (EINVAL);
if (fdx != 0)
link = BCE_NETLINK_SPEED_10FULL;
else
link = BCE_NETLINK_SPEED_10HALF;
break;
default:
return (EINVAL);
}
if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
/*
* XXX
* Advertise pause capability for full-duplex media.
*/
if (fdx != 0)
link |= BCE_NETLINK_FC_PAUSE_SYM |
BCE_NETLINK_FC_PAUSE_ASYM;
if ((sc->bce_phy_flags &
BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0)
link |= BCE_NETLINK_PHY_APP_REMOTE |
BCE_NETLINK_ETH_AT_WIRESPEED;
}
bce_shmem_wr(sc, BCE_MB_ARGS_0, link);
error = bce_fw_sync(sc, BCE_DRV_MSG_CODE_CMD_SET_LINK);
} else {
mii = device_get_softc(sc->bce_miibus);
/* Make sure the MII bus has been enumerated. */
if (mii) {
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
}
}
DBEXIT(BCE_VERBOSE_PHY);
return (error);
}
static void
bce_ifmedia_sts_rphy(struct bce_softc *sc, struct ifmediareq *ifmr)
{
if_t ifp;
u32 link;
ifp = sc->bce_ifp;
BCE_LOCK_ASSERT(sc);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
link = bce_shmem_rd(sc, BCE_LINK_STATUS);
/* XXX Handle heart beat status? */
if ((link & BCE_LINK_STATUS_LINK_UP) != 0)
ifmr->ifm_status |= IFM_ACTIVE;
else {
ifmr->ifm_active |= IFM_NONE;
if_setbaudrate(ifp, 0);
return;
}
switch (link & BCE_LINK_STATUS_SPEED_MASK) {
case BCE_LINK_STATUS_10HALF:
ifmr->ifm_active |= IFM_10_T | IFM_HDX;
if_setbaudrate(ifp, IF_Mbps(10UL));
break;
case BCE_LINK_STATUS_10FULL:
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
if_setbaudrate(ifp, IF_Mbps(10UL));
break;
case BCE_LINK_STATUS_100HALF:
ifmr->ifm_active |= IFM_100_TX | IFM_HDX;
if_setbaudrate(ifp, IF_Mbps(100UL));
break;
case BCE_LINK_STATUS_100FULL:
ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
if_setbaudrate(ifp, IF_Mbps(100UL));
break;
case BCE_LINK_STATUS_1000HALF:
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0)
ifmr->ifm_active |= IFM_1000_T | IFM_HDX;
else
ifmr->ifm_active |= IFM_1000_SX | IFM_HDX;
if_setbaudrate(ifp, IF_Mbps(1000UL));
break;
case BCE_LINK_STATUS_1000FULL:
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0)
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
else
ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
if_setbaudrate(ifp, IF_Mbps(1000UL));
break;
case BCE_LINK_STATUS_2500HALF:
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) {
ifmr->ifm_active |= IFM_NONE;
return;
} else
ifmr->ifm_active |= IFM_2500_SX | IFM_HDX;
if_setbaudrate(ifp, IF_Mbps(2500UL));
break;
case BCE_LINK_STATUS_2500FULL:
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) {
ifmr->ifm_active |= IFM_NONE;
return;
} else
ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
if_setbaudrate(ifp, IF_Mbps(2500UL));
break;
default:
ifmr->ifm_active |= IFM_NONE;
return;
}
if ((link & BCE_LINK_STATUS_RX_FC_ENABLED) != 0)
ifmr->ifm_active |= IFM_ETH_RXPAUSE;
if ((link & BCE_LINK_STATUS_TX_FC_ENABLED) != 0)
ifmr->ifm_active |= IFM_ETH_TXPAUSE;
}
/****************************************************************************/
/* Reports current media status. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct bce_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
DBENTER(BCE_VERBOSE_PHY);
BCE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
BCE_UNLOCK(sc);
return;
}
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0)
bce_ifmedia_sts_rphy(sc, ifmr);
else {
mii = device_get_softc(sc->bce_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
BCE_UNLOCK(sc);
DBEXIT(BCE_VERBOSE_PHY);
}
/****************************************************************************/
/* Handles PHY generated interrupt events. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_phy_intr(struct bce_softc *sc)
{
u32 new_link_state, old_link_state;
DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
DBRUN(sc->phy_interrupts++);
new_link_state = sc->status_block->status_attn_bits &
STATUS_ATTN_BITS_LINK_STATE;
old_link_state = sc->status_block->status_attn_bits_ack &
STATUS_ATTN_BITS_LINK_STATE;
/* Handle any changes if the link state has changed. */
if (new_link_state != old_link_state) {
/* Update the status_attn_bits_ack field. */
if (new_link_state) {
REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
STATUS_ATTN_BITS_LINK_STATE);
DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n",
__FUNCTION__);
} else {
REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
STATUS_ATTN_BITS_LINK_STATE);
DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n",
__FUNCTION__);
}
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) {
if (new_link_state) {
if (bootverbose)
if_printf(sc->bce_ifp, "link UP\n");
if_link_state_change(sc->bce_ifp,
LINK_STATE_UP);
} else {
if (bootverbose)
if_printf(sc->bce_ifp, "link DOWN\n");
if_link_state_change(sc->bce_ifp,
LINK_STATE_DOWN);
}
}
/*
* Assume link is down and allow
* tick routine to update the state
* based on the actual media state.
*/
sc->bce_link_up = FALSE;
callout_stop(&sc->bce_tick_callout);
bce_tick(sc);
}
/* Acknowledge the link change interrupt. */
REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
}
/****************************************************************************/
/* Reads the receive consumer value from the status block (skipping over */
/* chain page pointer if necessary). */
/* */
/* Returns: */
/* hw_cons */
/****************************************************************************/
static inline u16
bce_get_hw_rx_cons(struct bce_softc *sc)
{
u16 hw_cons;
rmb();
hw_cons = sc->status_block->status_rx_quick_consumer_index0;
if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
hw_cons++;
return hw_cons;
}
/****************************************************************************/
/* Handles received frame interrupt events. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_rx_intr(struct bce_softc *sc)
{
if_t ifp = sc->bce_ifp;
struct l2_fhdr *l2fhdr;
struct ether_vlan_header *vh;
unsigned int pkt_len;
u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
u32 status;
unsigned int rem_len;
u16 sw_pg_cons, sw_pg_cons_idx;
DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
DBRUN(sc->interrupts_rx++);
DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, "
"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
/* Prepare the RX chain pages to be accessed by the host CPU. */
for (int i = 0; i < sc->rx_pages; i++)
bus_dmamap_sync(sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
/* Prepare the page chain pages to be accessed by the host CPU. */
if (bce_hdr_split == TRUE) {
for (int i = 0; i < sc->pg_pages; i++)
bus_dmamap_sync(sc->pg_bd_chain_tag,
sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
}
/* Get the hardware's view of the RX consumer index. */
hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
/* Get working copies of the driver's view of the consumer indices. */
sw_rx_cons = sc->rx_cons;
sw_pg_cons = sc->pg_cons;
/* Update some debug statistics counters */
DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
sc->rx_low_watermark = sc->free_rx_bd);
DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
sc->rx_empty_count++);
/* Scan through the receive chain as long as there is work to do */
/* ToDo: Consider setting a limit on the number of packets processed. */
rmb();
while (sw_rx_cons != hw_rx_cons) {
struct mbuf *m0;
/* Convert the producer/consumer indices to an actual rx_bd index. */
sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
/* Unmap the mbuf from DMA space. */
bus_dmamap_sync(sc->rx_mbuf_tag,
sc->rx_mbuf_map[sw_rx_cons_idx],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rx_mbuf_tag,
sc->rx_mbuf_map[sw_rx_cons_idx]);
/* Remove the mbuf from the RX chain. */
m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
DBRUN(sc->debug_rx_mbuf_alloc--);
sc->free_rx_bd++;
/*
* Frames received on the NetXteme II are prepended
* with an l2_fhdr structure which provides status
* information about the received frame (including
* VLAN tags and checksum info). The frames are
* also automatically adjusted to word align the IP
* header (i.e. two null bytes are inserted before
* the Ethernet header). As a result the data
* DMA'd by the controller into the mbuf looks
* like this:
*
* +---------+-----+---------------------+-----+
* | l2_fhdr | pad | packet data | FCS |
* +---------+-----+---------------------+-----+
*
* The l2_fhdr needs to be checked and skipped and
* the FCS needs to be stripped before sending the
* packet up the stack.
*/
l2fhdr = mtod(m0, struct l2_fhdr *);
/* Get the packet data + FCS length and the status. */
pkt_len = l2fhdr->l2_fhdr_pkt_len;
status = l2fhdr->l2_fhdr_status;
/*
* Skip over the l2_fhdr and pad, resulting in the
* following data in the mbuf:
* +---------------------+-----+
* | packet data | FCS |
* +---------------------+-----+
*/
m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
/*
* When split header mode is used, an ethernet frame
* may be split across the receive chain and the
* page chain. If that occurs an mbuf cluster must be
* reassembled from the individual mbuf pieces.
*/
if (bce_hdr_split == TRUE) {
/*
* Check whether the received frame fits in a single
* mbuf or not (i.e. packet data + FCS <=
* sc->rx_bd_mbuf_data_len bytes).
*/
if (pkt_len > m0->m_len) {
/*
* The received frame is larger than a single mbuf.
* If the frame was a TCP frame then only the TCP
* header is placed in the mbuf, the remaining
* payload (including FCS) is placed in the page
* chain, the SPLIT flag is set, and the header
* length is placed in the IP checksum field.
* If the frame is not a TCP frame then the mbuf
* is filled and the remaining bytes are placed
* in the page chain.
*/
DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large "
"packet.\n", __FUNCTION__);
DBRUN(sc->split_header_frames_rcvd++);
/*
* When the page chain is enabled and the TCP
* header has been split from the TCP payload,
* the ip_xsum structure will reflect the length
* of the TCP header, not the IP checksum. Set
* the packet length of the mbuf accordingly.
*/
if (status & L2_FHDR_STATUS_SPLIT) {
m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
DBRUN(sc->split_header_tcp_frames_rcvd++);
}
rem_len = pkt_len - m0->m_len;
/* Pull mbufs off the page chain for any remaining data. */
while (rem_len > 0) {
struct mbuf *m_pg;
sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
/* Remove the mbuf from the page chain. */
m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
DBRUN(sc->debug_pg_mbuf_alloc--);
sc->free_pg_bd++;
/* Unmap the page chain mbuf from DMA space. */
bus_dmamap_sync(sc->pg_mbuf_tag,
sc->pg_mbuf_map[sw_pg_cons_idx],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->pg_mbuf_tag,
sc->pg_mbuf_map[sw_pg_cons_idx]);
/* Adjust the mbuf length. */
if (rem_len < m_pg->m_len) {
/* The mbuf chain is complete. */
m_pg->m_len = rem_len;
rem_len = 0;
} else {
/* More packet data is waiting. */
rem_len -= m_pg->m_len;
}
/* Concatenate the mbuf cluster to the mbuf. */
m_cat(m0, m_pg);
sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
}
/* Set the total packet length. */
m0->m_pkthdr.len = pkt_len;
} else {
/*
* The received packet is small and fits in a
* single mbuf (i.e. the l2_fhdr + pad + packet +
* FCS <= MHLEN). In other words, the packet is
* 154 bytes or less in size.
*/
DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small "
"packet.\n", __FUNCTION__);
/* Set the total packet length. */
m0->m_pkthdr.len = m0->m_len = pkt_len;
}
} else
/* Set the total packet length. */
m0->m_pkthdr.len = m0->m_len = pkt_len;
/* Remove the trailing Ethernet FCS. */
m_adj(m0, -ETHER_CRC_LEN);
/* Check that the resulting mbuf chain is valid. */
DBRUN(m_sanity(m0, FALSE));
DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
(m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
BCE_PRINTF("Invalid Ethernet frame size!\n");
m_print(m0, 128));
DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control),
sc->l2fhdr_error_sim_count++;
status = status | L2_FHDR_ERRORS_PHY_DECODE);
/* Check the received frame for errors. */
if (status & (L2_FHDR_ERRORS_BAD_CRC |
L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
/* Log the error and release the mbuf. */
sc->l2fhdr_error_count++;
m_freem(m0);
m0 = NULL;
goto bce_rx_intr_next_rx;
}
/* Send the packet to the appropriate interface. */
m0->m_pkthdr.rcvif = ifp;
/* Assume no hardware checksum. */
m0->m_pkthdr.csum_flags = 0;
/* Validate the checksum if offload enabled. */
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
/* Check for an IP datagram. */
if (!(status & L2_FHDR_STATUS_SPLIT) &&
(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
DBRUN(sc->csum_offload_ip++);
/* Check if the IP checksum is valid. */
if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
m0->m_pkthdr.csum_flags |=
CSUM_IP_VALID;
}
/* Check for a valid TCP/UDP frame. */
if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
L2_FHDR_STATUS_UDP_DATAGRAM)) {
/* Check for a good TCP/UDP checksum. */
if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
DBRUN(sc->csum_offload_tcp_udp++);
m0->m_pkthdr.csum_data =
l2fhdr->l2_fhdr_tcp_udp_xsum;
m0->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID
| CSUM_PSEUDO_HDR);
}
}
}
/* Attach the VLAN tag. */
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(sc->rx_mode & BCE_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
DBRUN(sc->vlan_tagged_frames_rcvd++);
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
DBRUN(sc->vlan_tagged_frames_stripped++);
m0->m_pkthdr.ether_vtag =
l2fhdr->l2_fhdr_vlan_tag;
m0->m_flags |= M_VLANTAG;
} else {
/*
* bce(4) controllers can't disable VLAN
* tag stripping if management firmware
* (ASF/IPMI/UMP) is running. So we always
* strip VLAN tag and manually reconstruct
* the VLAN frame by appending stripped
* VLAN tag in driver if VLAN tag stripping
* was disabled.
*
* TODO: LLC SNAP handling.
*/
bcopy(mtod(m0, uint8_t *),
mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN,
ETHER_ADDR_LEN * 2);
m0->m_data -= ETHER_VLAN_ENCAP_LEN;
vh = mtod(m0, struct ether_vlan_header *);
vh->evl_encap_proto = htons(ETHERTYPE_VLAN);
vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag);
m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN;
m0->m_len += ETHER_VLAN_ENCAP_LEN;
}
}
/* Increment received packet statistics. */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
bce_rx_intr_next_rx:
sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
/* If we have a packet, pass it up the stack */
if (m0) {
/* Make sure we don't lose our place when we release the lock. */
sc->rx_cons = sw_rx_cons;
sc->pg_cons = sw_pg_cons;
BCE_UNLOCK(sc);
if_input(ifp, m0);
BCE_LOCK(sc);
/* Recover our place. */
sw_rx_cons = sc->rx_cons;
sw_pg_cons = sc->pg_cons;
}
/* Refresh hw_cons to see if there's new work */
if (sw_rx_cons == hw_rx_cons)
hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
}
/* No new packets. Refill the page chain. */
if (bce_hdr_split == TRUE) {
sc->pg_cons = sw_pg_cons;
bce_fill_pg_chain(sc);
}
/* No new packets. Refill the RX chain. */
sc->rx_cons = sw_rx_cons;
bce_fill_rx_chain(sc);
/* Prepare the page chain pages to be accessed by the NIC. */
for (int i = 0; i < sc->rx_pages; i++)
bus_dmamap_sync(sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
if (bce_hdr_split == TRUE) {
for (int i = 0; i < sc->pg_pages; i++)
bus_dmamap_sync(sc->pg_bd_chain_tag,
sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
}
DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
}
/****************************************************************************/
/* Reads the transmit consumer value from the status block (skipping over */
/* chain page pointer if necessary). */
/* */
/* Returns: */
/* hw_cons */
/****************************************************************************/
static inline u16
bce_get_hw_tx_cons(struct bce_softc *sc)
{
u16 hw_cons;
mb();
hw_cons = sc->status_block->status_tx_quick_consumer_index0;
if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
hw_cons++;
return hw_cons;
}
/****************************************************************************/
/* Handles transmit completion interrupt events. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_tx_intr(struct bce_softc *sc)
{
if_t ifp = sc->bce_ifp;
u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
DBRUN(sc->interrupts_tx++);
DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, "
"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
BCE_LOCK_ASSERT(sc);
/* Get the hardware's view of the TX consumer index. */
hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
sw_tx_cons = sc->tx_cons;
/* Prevent speculative reads of the status block. */
bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
BUS_SPACE_BARRIER_READ);
/* Cycle through any completed TX chain page entries. */
while (sw_tx_cons != hw_tx_cons) {
#ifdef BCE_DEBUG
struct tx_bd *txbd = NULL;
#endif
sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
DBPRINT(sc, BCE_INFO_SEND,
"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
"sw_tx_chain_cons = 0x%04X\n",
__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
DBRUNIF((sw_tx_chain_cons > MAX_TX_BD_ALLOC),
BCE_PRINTF("%s(%d): TX chain consumer out of range! "
" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
(int) MAX_TX_BD_ALLOC);
bce_breakpoint(sc));
DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
[TX_IDX(sw_tx_chain_cons)]);
DBRUNIF((txbd == NULL),
BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
__FILE__, __LINE__, sw_tx_chain_cons);
bce_breakpoint(sc));
DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
/*
* Free the associated mbuf. Remember
* that only the last tx_bd of a packet
* has an mbuf pointer and DMA map.
*/
if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
/* Validate that this is the last tx_bd. */
DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
BCE_PRINTF("%s(%d): tx_bd END flag not set but "
"txmbuf == NULL!\n", __FILE__, __LINE__);
bce_breakpoint(sc));
DBRUNMSG(BCE_INFO_SEND,
BCE_PRINTF("%s(): Unloading map/freeing mbuf "
"from tx_bd[0x%04X]\n", __FUNCTION__,
sw_tx_chain_cons));
/* Unmap the mbuf. */
bus_dmamap_unload(sc->tx_mbuf_tag,
sc->tx_mbuf_map[sw_tx_chain_cons]);
/* Free the mbuf. */
m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
DBRUN(sc->debug_tx_mbuf_alloc--);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
sc->used_tx_bd--;
sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
/* Refresh hw_cons to see if there's new work. */
hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
/* Prevent speculative reads of the status block. */
bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
BUS_SPACE_BARRIER_READ);
}
/* Clear the TX timeout timer. */
sc->watchdog_timer = 0;
/* Clear the tx hardware queue full flag. */
if (sc->used_tx_bd < sc->max_tx_bd) {
DBRUNIF((if_getdrvflags(ifp) & IFF_DRV_OACTIVE),
DBPRINT(sc, BCE_INFO_SEND,
"%s(): Open TX chain! %d/%d (used/total)\n",
__FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
sc->tx_cons = sw_tx_cons;
DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, "
"tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
__FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
}
/****************************************************************************/
/* Disables interrupt generation. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_disable_intr(struct bce_softc *sc)
{
DBENTER(BCE_VERBOSE_INTR);
REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
DBEXIT(BCE_VERBOSE_INTR);
}
/****************************************************************************/
/* Enables interrupt generation. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_enable_intr(struct bce_softc *sc, int coal_now)
{
DBENTER(BCE_VERBOSE_INTR);
REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
/* Force an immediate interrupt (whether there is new data or not). */
if (coal_now)
REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
DBEXIT(BCE_VERBOSE_INTR);
}
/****************************************************************************/
/* Handles controller initialization. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init_locked(struct bce_softc *sc)
{
if_t ifp;
u32 ether_mtu = 0;
DBENTER(BCE_VERBOSE_RESET);
BCE_LOCK_ASSERT(sc);
ifp = sc->bce_ifp;
/* Check if the driver is still running and bail out if it is. */
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
goto bce_init_locked_exit;
bce_stop(sc);
if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
BCE_PRINTF("%s(%d): Controller reset failed!\n",
__FILE__, __LINE__);
goto bce_init_locked_exit;
}
if (bce_chipinit(sc)) {
BCE_PRINTF("%s(%d): Controller initialization failed!\n",
__FILE__, __LINE__);
goto bce_init_locked_exit;
}
if (bce_blockinit(sc)) {
BCE_PRINTF("%s(%d): Block initialization failed!\n",
__FILE__, __LINE__);
goto bce_init_locked_exit;
}
/* Load our MAC address. */
bcopy(if_getlladdr(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
bce_set_mac_addr(sc);
if (bce_hdr_split == FALSE)
bce_get_rx_buffer_sizes(sc, if_getmtu(ifp));
/*
* Calculate and program the hardware Ethernet MTU
* size. Be generous on the receive if we have room
* and allowed by the user.
*/
if (bce_strict_rx_mtu == TRUE)
ether_mtu = if_getmtu(ifp);
else {
if (bce_hdr_split == TRUE) {
if (if_getmtu(ifp) <= sc->rx_bd_mbuf_data_len + MCLBYTES)
ether_mtu = sc->rx_bd_mbuf_data_len +
MCLBYTES;
else
ether_mtu = if_getmtu(ifp);
} else {
if (if_getmtu(ifp) <= sc->rx_bd_mbuf_data_len)
ether_mtu = sc->rx_bd_mbuf_data_len;
else
ether_mtu = if_getmtu(ifp);
}
}
ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n",
__FUNCTION__, ether_mtu);
/* Program the mtu, enabling jumbo frame support if necessary. */
if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
else
REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
/* Program appropriate promiscuous/multicast filtering. */
bce_set_rx_mode(sc);
if (bce_hdr_split == TRUE) {
/* Init page buffer descriptor chain. */
bce_init_pg_chain(sc);
}
/* Init RX buffer descriptor chain. */
bce_init_rx_chain(sc);
/* Init TX buffer descriptor chain. */
bce_init_tx_chain(sc);
/* Enable host interrupts. */
bce_enable_intr(sc, 1);
bce_ifmedia_upd_locked(ifp);
/* Let the OS know the driver is up and running. */
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
bce_init_locked_exit:
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Initialize the controller just enough so that any management firmware */
/* running on the device will continue to operate correctly. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_mgmt_init_locked(struct bce_softc *sc)
{
if_t ifp;
DBENTER(BCE_VERBOSE_RESET);
BCE_LOCK_ASSERT(sc);
/* Bail out if management firmware is not running. */
if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
DBPRINT(sc, BCE_VERBOSE_SPECIAL,
"No management firmware running...\n");
goto bce_mgmt_init_locked_exit;
}
ifp = sc->bce_ifp;
/* Enable all critical blocks in the MAC. */
REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
DELAY(20);
bce_ifmedia_upd_locked(ifp);
bce_mgmt_init_locked_exit:
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Handles controller initialization when called from an unlocked routine. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_init(void *xsc)
{
struct bce_softc *sc = xsc;
DBENTER(BCE_VERBOSE_RESET);
BCE_LOCK(sc);
bce_init_locked(sc);
BCE_UNLOCK(sc);
DBEXIT(BCE_VERBOSE_RESET);
}
/****************************************************************************/
/* Modifies an mbuf for TSO on the hardware. */
/* */
/* Returns: */
/* Pointer to a modified mbuf. */
/****************************************************************************/
static struct mbuf *
bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags)
{
struct mbuf *m;
struct ether_header *eh;
struct ip *ip;
struct tcphdr *th;
u16 etype;
int hdr_len __unused, ip_len __unused, ip_hlen = 0, tcp_hlen = 0;
DBRUN(sc->tso_frames_requested++);
ip_len = 0;
/* Controller may modify mbuf chains. */
if (M_WRITABLE(*m_head) == 0) {
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
sc->mbuf_alloc_failed_count++;
*m_head = NULL;
return (NULL);
}
*m_head = m;
}
/*
* For TSO the controller needs two pieces of info,
* the MSS and the IP+TCP options length.
*/
m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (NULL);
}
eh = mtod(m, struct ether_header *);
etype = ntohs(eh->ether_type);
/* Check for supported TSO Ethernet types (only IPv4 for now) */
switch (etype) {
case ETHERTYPE_IP:
ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
/* TSO only supported for TCP protocol. */
if (ip->ip_p != IPPROTO_TCP) {
BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
__FILE__, __LINE__);
m_freem(*m_head);
*m_head = NULL;
return (NULL);
}
/* Get IP header length in bytes (min 20) */
ip_hlen = ip->ip_hl << 2;
m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen +
sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (NULL);
}
/* Get the TCP header length in bytes (min 20) */
ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
tcp_hlen = (th->th_off << 2);
/* Make sure all IP/TCP options live in the same buffer. */
m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen +
tcp_hlen);
if (m == NULL) {
*m_head = NULL;
return (NULL);
}
/* Clear IP header length and checksum, will be calc'd by h/w. */
ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
ip_len = ip->ip_len;
ip->ip_len = 0;
ip->ip_sum = 0;
break;
case ETHERTYPE_IPV6:
BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
__FILE__, __LINE__);
m_freem(*m_head);
*m_head = NULL;
return (NULL);
/* NOT REACHED */
default:
BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
__FILE__, __LINE__);
m_freem(*m_head);
*m_head = NULL;
return (NULL);
}
hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen;
DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, "
"ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
__FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen,
tcp_hlen, ip_len);
/* Set the LSO flag in the TX BD */
*flags |= TX_BD_FLAGS_SW_LSO;
/* Set the length of IP + TCP options (in 32 bit words) */
*flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) -
sizeof(struct tcphdr)) >> 2) << 8);
DBRUN(sc->tso_frames_completed++);
return (*m_head);
}
/****************************************************************************/
/* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
/* memory visible to the controller. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/* Modified: */
/* m_head: May be set to NULL if MBUF is excessively fragmented. */
/****************************************************************************/
static int
bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
{
bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
bus_dmamap_t map;
struct tx_bd *txbd = NULL;
struct mbuf *m0;
u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0;
u32 prod_bseq;
#ifdef BCE_DEBUG
u16 debug_prod;
#endif
int i, error, nsegs, rc = 0;
DBENTER(BCE_VERBOSE_SEND);
/* Make sure we have room in the TX chain. */
if (sc->used_tx_bd >= sc->max_tx_bd)
goto bce_tx_encap_exit;
/* Transfer any checksum offload flags to the bd. */
m0 = *m_head;
if (m0->m_pkthdr.csum_flags) {
if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
m0 = bce_tso_setup(sc, m_head, &flags);
if (m0 == NULL) {
DBRUN(sc->tso_frames_failed++);
goto bce_tx_encap_exit;
}
mss = htole16(m0->m_pkthdr.tso_segsz);
} else {
if (m0->m_pkthdr.csum_flags & CSUM_IP)
flags |= TX_BD_FLAGS_IP_CKSUM;
if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
}
}
/* Transfer any VLAN tags to the bd. */
if (m0->m_flags & M_VLANTAG) {
flags |= TX_BD_FLAGS_VLAN_TAG;
vlan_tag = m0->m_pkthdr.ether_vtag;
}
/* Map the mbuf into DMAable memory. */
prod = sc->tx_prod;
chain_prod = TX_CHAIN_IDX(prod);
map = sc->tx_mbuf_map[chain_prod];
/* Map the mbuf into our DMA address space. */
error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
/* Check if the DMA mapping was successful */
if (error == EFBIG) {
sc->mbuf_frag_count++;
/* Try to defrag the mbuf. */
m0 = m_collapse(*m_head, M_NOWAIT, BCE_MAX_SEGMENTS);
if (m0 == NULL) {
/* Defrag was unsuccessful */
m_freem(*m_head);
*m_head = NULL;
sc->mbuf_alloc_failed_count++;
rc = ENOBUFS;
goto bce_tx_encap_exit;
}
/* Defrag was successful, try mapping again */
*m_head = m0;
error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag,
map, m0, segs, &nsegs, BUS_DMA_NOWAIT);
/* Still getting an error after a defrag. */
if (error == ENOMEM) {
/* Insufficient DMA buffers available. */
sc->dma_map_addr_tx_failed_count++;
rc = error;
goto bce_tx_encap_exit;
} else if (error != 0) {
/* Release it and return an error. */
BCE_PRINTF("%s(%d): Unknown error mapping mbuf into "
"TX chain!\n", __FILE__, __LINE__);
m_freem(m0);
*m_head = NULL;
sc->dma_map_addr_tx_failed_count++;
rc = ENOBUFS;
goto bce_tx_encap_exit;
}
} else if (error == ENOMEM) {
/* Insufficient DMA buffers available. */
sc->dma_map_addr_tx_failed_count++;
rc = error;
goto bce_tx_encap_exit;
} else if (error != 0) {
m_freem(m0);
*m_head = NULL;
sc->dma_map_addr_tx_failed_count++;
rc = error;
goto bce_tx_encap_exit;
}
/* Make sure there's room in the chain */
if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
bus_dmamap_unload(sc->tx_mbuf_tag, map);
rc = ENOBUFS;
goto bce_tx_encap_exit;
}
/* prod points to an empty tx_bd at this point. */
prod_bseq = sc->tx_prod_bseq;
#ifdef BCE_DEBUG
debug_prod = chain_prod;
#endif
DBPRINT(sc, BCE_INFO_SEND,
"%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
"prod_bseq = 0x%08X\n",
__FUNCTION__, prod, chain_prod, prod_bseq);
/*
* Cycle through each mbuf segment that makes up
* the outgoing frame, gathering the mapping info
* for that segment and creating a tx_bd for
* the mbuf.
*/
for (i = 0; i < nsegs ; i++) {
chain_prod = TX_CHAIN_IDX(prod);
txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)]
[TX_IDX(chain_prod)];
txbd->tx_bd_haddr_lo =
htole32(BCE_ADDR_LO(segs[i].ds_addr));
txbd->tx_bd_haddr_hi =
htole32(BCE_ADDR_HI(segs[i].ds_addr));
txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
htole16(segs[i].ds_len);
txbd->tx_bd_vlan_tag = htole16(vlan_tag);
txbd->tx_bd_flags = htole16(flags);
prod_bseq += segs[i].ds_len;
if (i == 0)
txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
prod = NEXT_TX_BD(prod);
}
/* Set the END flag on the last TX buffer descriptor. */
txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
DBRUNMSG(BCE_EXTREME_SEND,
bce_dump_tx_chain(sc, debug_prod, nsegs));
/*
* Ensure that the mbuf pointer for this transmission
* is placed at the array index of the last
* descriptor in this chain. This is done
* because a single map is used for all
* segments of the mbuf and we don't want to
* unload the map before all of the segments
* have been freed.
*/
sc->tx_mbuf_ptr[chain_prod] = m0;
sc->used_tx_bd += nsegs;
/* Update some debug statistic counters */
DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
sc->tx_hi_watermark = sc->used_tx_bd);
DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
DBRUNIF(sc->debug_tx_mbuf_alloc++);
DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
/* prod points to the next free tx_bd at this point. */
sc->tx_prod = prod;
sc->tx_prod_bseq = prod_bseq;
/* Tell the chip about the waiting TX frames. */
REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) +
BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
REG_WR(sc, MB_GET_CID_ADDR(TX_CID) +
BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
bce_tx_encap_exit:
DBEXIT(BCE_VERBOSE_SEND);
return(rc);
}
/****************************************************************************/
/* Main transmit routine when called from another routine with a lock. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_start_locked(if_t ifp)
{
struct bce_softc *sc = if_getsoftc(ifp);
struct mbuf *m_head = NULL;
int count = 0;
u16 tx_prod, tx_chain_prod __unused;
DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
BCE_LOCK_ASSERT(sc);
/* prod points to the next free tx_bd. */
tx_prod = sc->tx_prod;
tx_chain_prod = TX_CHAIN_IDX(tx_prod);
DBPRINT(sc, BCE_INFO_SEND,
"%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
"tx_prod_bseq = 0x%08X\n",
__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
/* If there's no link or the transmit queue is empty then just exit. */
if (sc->bce_link_up == FALSE) {
DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
__FUNCTION__);
goto bce_start_locked_exit;
}
if (if_sendq_empty(ifp)) {
DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
__FUNCTION__);
goto bce_start_locked_exit;
}
/*
* Keep adding entries while there is space in the ring.
*/
while (sc->used_tx_bd < sc->max_tx_bd) {
/* Check for any frames to send. */
m_head = if_dequeue(ifp);
/* Stop when the transmit queue is empty. */
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, place the mbuf back at the
* head of the queue and set the OACTIVE flag
* to wait for the NIC to drain the chain.
*/
if (bce_tx_encap(sc, &m_head)) {
if (m_head != NULL)
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
DBPRINT(sc, BCE_INFO_SEND,
"TX chain is closed for business! Total "
"tx_bd used = %d\n", sc->used_tx_bd);
break;
}
count++;
/* Send a copy of the frame to any BPF listeners. */
ETHER_BPF_MTAP(ifp, m_head);
}
/* Exit if no packets were dequeued. */
if (count == 0) {
DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were "
"dequeued\n", __FUNCTION__);
goto bce_start_locked_exit;
}
DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into "
"send queue.\n", __FUNCTION__, count);
/* Set the tx timeout. */
sc->watchdog_timer = BCE_TX_TIMEOUT;
DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID));
DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc));
bce_start_locked_exit:
DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
}
/****************************************************************************/
/* Main transmit routine when called from another routine without a lock. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_start(if_t ifp)
{
struct bce_softc *sc = if_getsoftc(ifp);
DBENTER(BCE_VERBOSE_SEND);
BCE_LOCK(sc);
bce_start_locked(ifp);
BCE_UNLOCK(sc);
DBEXIT(BCE_VERBOSE_SEND);
}
/****************************************************************************/
/* Handles any IOCTL calls from the operating system. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct bce_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int mask, error = 0;
DBENTER(BCE_VERBOSE_MISC);
switch(command) {
/* Set the interface MTU. */
case SIOCSIFMTU:
/* Check that the MTU setting is supported. */
if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
error = EINVAL;
break;
}
DBPRINT(sc, BCE_INFO_MISC,
"SIOCSIFMTU: Changing MTU from %d to %d\n",
(int) if_getmtu(ifp), (int) ifr->ifr_mtu);
BCE_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bce_init_locked(sc);
}
BCE_UNLOCK(sc);
break;
/* Set interface flags. */
case SIOCSIFFLAGS:
DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
BCE_LOCK(sc);
/* Check if the interface is up. */
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Change promiscuous/multicast flags as necessary. */
bce_set_rx_mode(sc);
} else {
/* Start the HW */
bce_init_locked(sc);
}
} else {
/* The interface is down, check if driver is running. */
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
bce_stop(sc);
/* If MFW is running, restart the controller a bit. */
if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
bce_chipinit(sc);
bce_mgmt_init_locked(sc);
}
}
}
BCE_UNLOCK(sc);
break;
/* Add/Delete multicast address */
case SIOCADDMULTI:
case SIOCDELMULTI:
DBPRINT(sc, BCE_VERBOSE_MISC,
"Received SIOCADDMULTI/SIOCDELMULTI\n");
BCE_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
bce_set_rx_mode(sc);
BCE_UNLOCK(sc);
break;
/* Set/Get Interface media */
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
DBPRINT(sc, BCE_VERBOSE_MISC,
"Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0)
error = ifmedia_ioctl(ifp, ifr, &sc->bce_ifmedia,
command);
else {
mii = device_get_softc(sc->bce_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
command);
}
break;
/* Set interface capability */
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
DBPRINT(sc, BCE_INFO_MISC,
"Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
/* Toggle the TX checksum capabilities enable flag. */
if (mask & IFCAP_TXCSUM &&
if_getcapabilities(ifp) & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if (IFCAP_TXCSUM & if_getcapenable(ifp))
if_sethwassistbits(ifp, BCE_IF_HWASSIST, 0);
else
if_sethwassistbits(ifp, 0, BCE_IF_HWASSIST);
}
/* Toggle the RX checksum capabilities enable flag. */
if (mask & IFCAP_RXCSUM &&
if_getcapabilities(ifp) & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
/* Toggle the TSO capabilities enable flag. */
if (bce_tso_enable && (mask & IFCAP_TSO4) &&
if_getcapabilities(ifp) & IFCAP_TSO4) {
if_togglecapenable(ifp, IFCAP_TSO4);
if (IFCAP_TSO4 & if_getcapenable(ifp))
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if (mask & IFCAP_VLAN_HWCSUM &&
if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
/*
* Don't actually disable VLAN tag stripping as
* management firmware (ASF/IPMI/UMP) requires the
* feature. If VLAN tag stripping is disabled driver
* will manually reconstruct the VLAN frame by
* appending stripped VLAN tag.
*/
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING)) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
== 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
}
VLAN_CAPABILITIES(ifp);
break;
default:
/* We don't know how to handle the IOCTL, pass it on. */
error = ether_ioctl(ifp, command, data);
break;
}
DBEXIT(BCE_VERBOSE_MISC);
return(error);
}
/****************************************************************************/
/* Transmit timeout handler. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_watchdog(struct bce_softc *sc)
{
uint32_t status;
DBENTER(BCE_EXTREME_SEND);
BCE_LOCK_ASSERT(sc);
status = 0;
/* If the watchdog timer hasn't expired then just exit. */
if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
goto bce_watchdog_exit;
status = REG_RD(sc, BCE_EMAC_RX_STATUS);
/* If pause frames are active then don't reset the hardware. */
if ((sc->bce_flags & BCE_USING_RX_FLOW_CONTROL) != 0) {
if ((status & BCE_EMAC_RX_STATUS_FFED) != 0) {
/*
* If link partner has us in XOFF state then wait for
* the condition to clear.
*/
sc->watchdog_timer = BCE_TX_TIMEOUT;
goto bce_watchdog_exit;
} else if ((status & BCE_EMAC_RX_STATUS_FF_RECEIVED) != 0 &&
(status & BCE_EMAC_RX_STATUS_N_RECEIVED) != 0) {
/*
* If we're not currently XOFF'ed but have recently
* been XOFF'd/XON'd then assume that's delaying TX
* this time around.
*/
sc->watchdog_timer = BCE_TX_TIMEOUT;
goto bce_watchdog_exit;
}
/*
* Any other condition is unexpected and the controller
* should be reset.
*/
}
BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
__FILE__, __LINE__);
DBRUNMSG(BCE_INFO,
bce_dump_driver_state(sc);
bce_dump_status_block(sc);
bce_dump_stats_block(sc);
bce_dump_ftqs(sc);
bce_dump_txp_state(sc, 0);
bce_dump_rxp_state(sc, 0);
bce_dump_tpat_state(sc, 0);
bce_dump_cp_state(sc, 0);
bce_dump_com_state(sc, 0));
DBRUN(bce_breakpoint(sc));
if_setdrvflagbits(sc->bce_ifp, 0, IFF_DRV_RUNNING);
bce_init_locked(sc);
sc->watchdog_timeouts++;
bce_watchdog_exit:
REG_WR(sc, BCE_EMAC_RX_STATUS, status);
DBEXIT(BCE_EXTREME_SEND);
}
/*
* Interrupt handler.
*/
/****************************************************************************/
/* Main interrupt entry point. Verifies that the controller generated the */
/* interrupt and then calls a separate routine for handle the various */
/* interrupt causes (PHY, TX, RX). */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_intr(void *xsc)
{
struct bce_softc *sc;
if_t ifp;
u32 status_attn_bits;
u16 hw_rx_cons, hw_tx_cons;
sc = xsc;
ifp = sc->bce_ifp;
DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc));
BCE_LOCK(sc);
DBRUN(sc->interrupts_generated++);
/* Synchnorize before we read from interface's status block */
bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
/*
* If the hardware status block index matches the last value read
* by the driver and we haven't asserted our interrupt then there's
* nothing to do. This may only happen in case of INTx due to the
* interrupt arriving at the CPU before the status block is updated.
*/
if ((sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) == 0 &&
sc->status_block->status_idx == sc->last_status_idx &&
(REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
__FUNCTION__);
goto bce_intr_exit;
}
/* Ack the interrupt and stop others from occurring. */
REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BCE_PCICFG_INT_ACK_CMD_MASK_INT);
/* Check if the hardware has finished any work. */
hw_rx_cons = bce_get_hw_rx_cons(sc);
hw_tx_cons = bce_get_hw_tx_cons(sc);
/* Keep processing data as long as there is work to do. */
for (;;) {
status_attn_bits = sc->status_block->status_attn_bits;
DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control),
BCE_PRINTF("Simulating unexpected status attention "
"bit set.");
sc->unexpected_attention_sim_count++;
status_attn_bits = status_attn_bits |
STATUS_ATTN_BITS_PARITY_ERROR);
/* Was it a link change interrupt? */
if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
(sc->status_block->status_attn_bits_ack &
STATUS_ATTN_BITS_LINK_STATE)) {
bce_phy_intr(sc);
/* Clear transient updates during link state change. */
REG_WR(sc, BCE_HC_COMMAND, sc->hc_command |
BCE_HC_COMMAND_COAL_NOW_WO_INT);
REG_RD(sc, BCE_HC_COMMAND);
}
/* If any other attention is asserted, the chip is toast. */
if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
(sc->status_block->status_attn_bits_ack &
~STATUS_ATTN_BITS_LINK_STATE))) {
sc->unexpected_attention_count++;
BCE_PRINTF("%s(%d): Fatal attention detected: "
"0x%08X\n", __FILE__, __LINE__,
sc->status_block->status_attn_bits);
DBRUNMSG(BCE_FATAL,
if (unexpected_attention_sim_control == 0)
bce_breakpoint(sc));
bce_init_locked(sc);
goto bce_intr_exit;
}
/* Check for any completed RX frames. */
if (hw_rx_cons != sc->hw_rx_cons)
bce_rx_intr(sc);
/* Check for any completed TX frames. */
if (hw_tx_cons != sc->hw_tx_cons)
bce_tx_intr(sc);
/* Save status block index value for the next interrupt. */
sc->last_status_idx = sc->status_block->status_idx;
/*
* Prevent speculative reads from getting
* ahead of the status block.
*/
bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
BUS_SPACE_BARRIER_READ);
/*
* If there's no work left then exit the
* interrupt service routine.
*/
hw_rx_cons = bce_get_hw_rx_cons(sc);
hw_tx_cons = bce_get_hw_tx_cons(sc);
if ((hw_rx_cons == sc->hw_rx_cons) &&
(hw_tx_cons == sc->hw_tx_cons))
break;
}
bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREREAD);
/* Re-enable interrupts. */
bce_enable_intr(sc, 0);
/* Handle any frames that arrived while handling the interrupt. */
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
!if_sendq_empty(ifp))
bce_start_locked(ifp);
bce_intr_exit:
BCE_UNLOCK(sc);
DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
}
/****************************************************************************/
/* Programs the various packet receive modes (broadcast and multicast). */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static u_int
bce_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
u32 *hashes = arg;
int h;
h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0xFF;
hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
return (1);
}
static void
bce_set_rx_mode(struct bce_softc *sc)
{
if_t ifp;
u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u32 rx_mode, sort_mode;
int i;
DBENTER(BCE_VERBOSE_MISC);
BCE_LOCK_ASSERT(sc);
ifp = sc->bce_ifp;
/* Initialize receive mode default settings. */
rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
/*
* ASF/IPMI/UMP firmware requires that VLAN tag stripping
* be enbled.
*/
if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
/*
* Check for promiscuous, all multicast, or selected
* multicast address filtering.
*/
if (if_getflags(ifp) & IFF_PROMISC) {
DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
/* Enable promiscuous mode. */
rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
} else if (if_getflags(ifp) & IFF_ALLMULTI) {
DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
/* Enable all multicast addresses. */
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
0xffffffff);
}
sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
} else {
/* Accept one or more multicast(s). */
DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
if_foreach_llmaddr(ifp, bce_hash_maddr, hashes);
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
}
/* Only make changes if the recive mode has actually changed. */
if (rx_mode != sc->rx_mode) {
DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: "
"0x%08X\n", rx_mode);
sc->rx_mode = rx_mode;
REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
}
/* Disable and clear the existing sort before enabling a new sort. */
REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
DBEXIT(BCE_VERBOSE_MISC);
}
/****************************************************************************/
/* Called periodically to updates statistics from the controllers */
/* statistics block. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_stats_update(struct bce_softc *sc)
{
struct statistics_block *stats;
DBENTER(BCE_EXTREME_MISC);
bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD);
stats = (struct statistics_block *) sc->stats_block;
/*
* Update the sysctl statistics from the
* hardware statistics.
*/
sc->stat_IfHCInOctets =
((u64) stats->stat_IfHCInOctets_hi << 32) +
(u64) stats->stat_IfHCInOctets_lo;
sc->stat_IfHCInBadOctets =
((u64) stats->stat_IfHCInBadOctets_hi << 32) +
(u64) stats->stat_IfHCInBadOctets_lo;
sc->stat_IfHCOutOctets =
((u64) stats->stat_IfHCOutOctets_hi << 32) +
(u64) stats->stat_IfHCOutOctets_lo;
sc->stat_IfHCOutBadOctets =
((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
(u64) stats->stat_IfHCOutBadOctets_lo;
sc->stat_IfHCInUcastPkts =
((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
(u64) stats->stat_IfHCInUcastPkts_lo;
sc->stat_IfHCInMulticastPkts =
((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
(u64) stats->stat_IfHCInMulticastPkts_lo;
sc->stat_IfHCInBroadcastPkts =
((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
(u64) stats->stat_IfHCInBroadcastPkts_lo;
sc->stat_IfHCOutUcastPkts =
((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
(u64) stats->stat_IfHCOutUcastPkts_lo;
sc->stat_IfHCOutMulticastPkts =
((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
(u64) stats->stat_IfHCOutMulticastPkts_lo;
sc->stat_IfHCOutBroadcastPkts =
((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
(u64) stats->stat_IfHCOutBroadcastPkts_lo;
/* ToDo: Preserve counters beyond 32 bits? */
/* ToDo: Read the statistics from auto-clear regs? */
sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
sc->stat_Dot3StatsCarrierSenseErrors =
stats->stat_Dot3StatsCarrierSenseErrors;
sc->stat_Dot3StatsFCSErrors =
stats->stat_Dot3StatsFCSErrors;
sc->stat_Dot3StatsAlignmentErrors =
stats->stat_Dot3StatsAlignmentErrors;
sc->stat_Dot3StatsSingleCollisionFrames =
stats->stat_Dot3StatsSingleCollisionFrames;
sc->stat_Dot3StatsMultipleCollisionFrames =
stats->stat_Dot3StatsMultipleCollisionFrames;
sc->stat_Dot3StatsDeferredTransmissions =
stats->stat_Dot3StatsDeferredTransmissions;
sc->stat_Dot3StatsExcessiveCollisions =
stats->stat_Dot3StatsExcessiveCollisions;
sc->stat_Dot3StatsLateCollisions =
stats->stat_Dot3StatsLateCollisions;
sc->stat_EtherStatsCollisions =
stats->stat_EtherStatsCollisions;
sc->stat_EtherStatsFragments =
stats->stat_EtherStatsFragments;
sc->stat_EtherStatsJabbers =
stats->stat_EtherStatsJabbers;
sc->stat_EtherStatsUndersizePkts =
stats->stat_EtherStatsUndersizePkts;
sc->stat_EtherStatsOversizePkts =
stats->stat_EtherStatsOversizePkts;
sc->stat_EtherStatsPktsRx64Octets =
stats->stat_EtherStatsPktsRx64Octets;
sc->stat_EtherStatsPktsRx65Octetsto127Octets =
stats->stat_EtherStatsPktsRx65Octetsto127Octets;
sc->stat_EtherStatsPktsRx128Octetsto255Octets =
stats->stat_EtherStatsPktsRx128Octetsto255Octets;
sc->stat_EtherStatsPktsRx256Octetsto511Octets =
stats->stat_EtherStatsPktsRx256Octetsto511Octets;
sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
sc->stat_EtherStatsPktsTx64Octets =
stats->stat_EtherStatsPktsTx64Octets;
sc->stat_EtherStatsPktsTx65Octetsto127Octets =
stats->stat_EtherStatsPktsTx65Octetsto127Octets;
sc->stat_EtherStatsPktsTx128Octetsto255Octets =
stats->stat_EtherStatsPktsTx128Octetsto255Octets;
sc->stat_EtherStatsPktsTx256Octetsto511Octets =
stats->stat_EtherStatsPktsTx256Octetsto511Octets;
sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
sc->stat_XonPauseFramesReceived =
stats->stat_XonPauseFramesReceived;
sc->stat_XoffPauseFramesReceived =
stats->stat_XoffPauseFramesReceived;
sc->stat_OutXonSent =
stats->stat_OutXonSent;
sc->stat_OutXoffSent =
stats->stat_OutXoffSent;
sc->stat_FlowControlDone =
stats->stat_FlowControlDone;
sc->stat_MacControlFramesReceived =
stats->stat_MacControlFramesReceived;
sc->stat_XoffStateEntered =
stats->stat_XoffStateEntered;
sc->stat_IfInFramesL2FilterDiscards =
stats->stat_IfInFramesL2FilterDiscards;
sc->stat_IfInRuleCheckerDiscards =
stats->stat_IfInRuleCheckerDiscards;
sc->stat_IfInFTQDiscards =
stats->stat_IfInFTQDiscards;
sc->stat_IfInMBUFDiscards =
stats->stat_IfInMBUFDiscards;
sc->stat_IfInRuleCheckerP4Hit =
stats->stat_IfInRuleCheckerP4Hit;
sc->stat_CatchupInRuleCheckerDiscards =
stats->stat_CatchupInRuleCheckerDiscards;
sc->stat_CatchupInFTQDiscards =
stats->stat_CatchupInFTQDiscards;
sc->stat_CatchupInMBUFDiscards =
stats->stat_CatchupInMBUFDiscards;
sc->stat_CatchupInRuleCheckerP4Hit =
stats->stat_CatchupInRuleCheckerP4Hit;
sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
/* ToDo: Add additional statistics? */
DBEXIT(BCE_EXTREME_MISC);
}
static uint64_t
bce_get_counter(if_t ifp, ift_counter cnt)
{
struct bce_softc *sc;
uint64_t rv;
sc = if_getsoftc(ifp);
switch (cnt) {
case IFCOUNTER_COLLISIONS:
return (sc->stat_EtherStatsCollisions);
case IFCOUNTER_IERRORS:
return (sc->stat_EtherStatsUndersizePkts +
sc->stat_EtherStatsOversizePkts +
sc->stat_IfInMBUFDiscards +
sc->stat_Dot3StatsAlignmentErrors +
sc->stat_Dot3StatsFCSErrors +
sc->stat_IfInRuleCheckerDiscards +
sc->stat_IfInFTQDiscards +
sc->l2fhdr_error_count +
sc->com_no_buffers);
case IFCOUNTER_OERRORS:
rv = sc->stat_Dot3StatsExcessiveCollisions +
sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
sc->stat_Dot3StatsLateCollisions +
sc->watchdog_timeouts;
/*
* Certain controllers don't report
* carrier sense errors correctly.
* See errata E11_5708CA0_1165.
*/
if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
!(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
rv += sc->stat_Dot3StatsCarrierSenseErrors;
return (rv);
default:
return (if_get_counter_default(ifp, cnt));
}
}
/****************************************************************************/
/* Periodic function to notify the bootcode that the driver is still */
/* present. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_pulse(void *xsc)
{
struct bce_softc *sc = xsc;
u32 msg;
DBENTER(BCE_EXTREME_MISC);
BCE_LOCK_ASSERT(sc);
/* Tell the firmware that the driver is still running. */
msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
/* Update the bootcode condition. */
sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
/* Report whether the bootcode still knows the driver is running. */
if (bce_verbose || bootverbose) {
if (sc->bce_drv_cardiac_arrest == FALSE) {
if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
sc->bce_drv_cardiac_arrest = TRUE;
BCE_PRINTF("%s(): Warning: bootcode "
"thinks driver is absent! "
"(bc_state = 0x%08X)\n",
__FUNCTION__, sc->bc_state);
}
} else {
/*
* Not supported by all bootcode versions.
* (v5.0.11+ and v5.2.1+) Older bootcode
* will require the driver to reset the
* controller to clear this condition.
*/
if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
sc->bce_drv_cardiac_arrest = FALSE;
BCE_PRINTF("%s(): Bootcode found the "
"driver pulse! (bc_state = 0x%08X)\n",
__FUNCTION__, sc->bc_state);
}
}
}
/* Schedule the next pulse. */
callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
DBEXIT(BCE_EXTREME_MISC);
}
/****************************************************************************/
/* Periodic function to perform maintenance tasks. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static void
bce_tick(void *xsc)
{
struct bce_softc *sc = xsc;
struct mii_data *mii;
if_t ifp;
struct ifmediareq ifmr;
ifp = sc->bce_ifp;
DBENTER(BCE_EXTREME_MISC);
BCE_LOCK_ASSERT(sc);
/* Schedule the next tick. */
callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
/* Update the statistics from the hardware statistics block. */
bce_stats_update(sc);
/* Ensure page and RX chains get refilled in low-memory situations. */
if (bce_hdr_split == TRUE)
bce_fill_pg_chain(sc);
bce_fill_rx_chain(sc);
/* Check that chip hasn't hung. */
bce_watchdog(sc);
/* If link is up already up then we're done. */
if (sc->bce_link_up == TRUE)
goto bce_tick_exit;
/* Link is down. Check what the PHY's doing. */
if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) {
bzero(&ifmr, sizeof(ifmr));
bce_ifmedia_sts_rphy(sc, &ifmr);
if ((ifmr.ifm_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
sc->bce_link_up = TRUE;
bce_miibus_statchg(sc->bce_dev);
}
} else {
mii = device_get_softc(sc->bce_miibus);
mii_tick(mii);
/* Check if the link has come up. */
if ((mii->mii_media_status & IFM_ACTIVE) &&
(IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) {
DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n",
__FUNCTION__);
sc->bce_link_up = TRUE;
if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) &&
(bce_verbose || bootverbose))
BCE_PRINTF("Gigabit link up!\n");
}
}
if (sc->bce_link_up == TRUE) {
/* Now that link is up, handle any outstanding TX traffic. */
if (!if_sendq_empty(ifp)) {
DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found "
"pending TX traffic.\n", __FUNCTION__);
bce_start_locked(ifp);
}
}
bce_tick_exit:
DBEXIT(BCE_EXTREME_MISC);
}
static void
bce_fw_cap_init(struct bce_softc *sc)
{
u32 ack, cap, link;
ack = 0;
cap = bce_shmem_rd(sc, BCE_FW_CAP_MB);
if ((cap & BCE_FW_CAP_SIGNATURE_MAGIC_MASK) !=
BCE_FW_CAP_SIGNATURE_MAGIC)
return;
if ((cap & (BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN)) ==
(BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN))
ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC |
BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN;
if ((sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) != 0 &&
(cap & BCE_FW_CAP_REMOTE_PHY_CAP) != 0) {
sc->bce_phy_flags &= ~BCE_PHY_REMOTE_PORT_FIBER_FLAG;
sc->bce_phy_flags |= BCE_PHY_REMOTE_CAP_FLAG;
link = bce_shmem_rd(sc, BCE_LINK_STATUS);
if ((link & BCE_LINK_STATUS_SERDES_LINK) != 0)
sc->bce_phy_flags |= BCE_PHY_REMOTE_PORT_FIBER_FLAG;
ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC |
BCE_FW_CAP_REMOTE_PHY_CAP;
}
if (ack != 0)
bce_shmem_wr(sc, BCE_DRV_ACK_CAP_MB, ack);
}
#ifdef BCE_DEBUG
/****************************************************************************/
/* Allows the driver state to be dumped through the sysctl interface. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_driver_state(sc);
}
return error;
}
/****************************************************************************/
/* Allows the hardware state to be dumped through the sysctl interface. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_hw_state(sc);
}
return error;
}
/****************************************************************************/
/* Allows the status block to be dumped through the sysctl interface. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_status_block(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_status_block(sc);
}
return error;
}
/****************************************************************************/
/* Allows the stats block to be dumped through the sysctl interface. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_stats_block(sc);
}
return error;
}
/****************************************************************************/
/* Allows the stat counters to be cleared without unloading/reloading the */
/* driver. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
struct statistics_block *stats;
stats = (struct statistics_block *) sc->stats_block;
bzero(stats, sizeof(struct statistics_block));
bus_dmamap_sync(sc->stats_tag, sc->stats_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Clear the internal H/W statistics counters. */
REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
/* Reset the driver maintained statistics. */
sc->interrupts_rx =
sc->interrupts_tx = 0;
sc->tso_frames_requested =
sc->tso_frames_completed =
sc->tso_frames_failed = 0;
sc->rx_empty_count =
sc->tx_full_count = 0;
sc->rx_low_watermark = USABLE_RX_BD_ALLOC;
sc->tx_hi_watermark = 0;
sc->l2fhdr_error_count =
sc->l2fhdr_error_sim_count = 0;
sc->mbuf_alloc_failed_count =
sc->mbuf_alloc_failed_sim_count = 0;
sc->dma_map_addr_rx_failed_count =
sc->dma_map_addr_tx_failed_count = 0;
sc->mbuf_frag_count = 0;
sc->csum_offload_tcp_udp =
sc->csum_offload_ip = 0;
sc->vlan_tagged_frames_rcvd =
sc->vlan_tagged_frames_stripped = 0;
sc->split_header_frames_rcvd =
sc->split_header_tcp_frames_rcvd = 0;
/* Clear firmware maintained statistics. */
REG_WR_IND(sc, 0x120084, 0);
}
return error;
}
/****************************************************************************/
/* Allows the shared memory contents to be dumped through the sysctl . */
/* interface. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_shmem_state(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_shmem_state(sc);
}
return error;
}
/****************************************************************************/
/* Allows the bootcode state to be dumped through the sysctl interface. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_bc_state(sc);
}
return error;
}
/****************************************************************************/
/* Provides a sysctl interface to allow dumping the RX BD chain. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC);
}
return error;
}
/****************************************************************************/
/* Provides a sysctl interface to allow dumping the RX MBUF chain. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC);
}
return error;
}
/****************************************************************************/
/* Provides a sysctl interface to allow dumping the TX chain. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC);
}
return error;
}
/****************************************************************************/
/* Provides a sysctl interface to allow dumping the page chain. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC);
}
return error;
}
/****************************************************************************/
/* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */
/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS)
{
struct bce_softc *sc = (struct bce_softc *)arg1;
int error;
u32 result;
u32 val[1];
u8 *data = (u8 *) val;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
error = bce_nvram_read(sc, result, data, 4);
BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0]));
return (error);
}
/****************************************************************************/
/* Provides a sysctl interface to allow reading arbitrary registers in the */
/* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
{
struct bce_softc *sc = (struct bce_softc *)arg1;
int error;
u32 val, result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
/* Make sure the register is accessible. */
if (result < 0x8000) {
val = REG_RD(sc, result);
BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
} else if (result < 0x0280000) {
val = REG_RD_IND(sc, result);
BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
}
return (error);
}
/****************************************************************************/
/* Provides a sysctl interface to allow reading arbitrary PHY registers in */
/* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
{
struct bce_softc *sc;
device_t dev;
int error, result;
u16 val;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
/* Make sure the register is accessible. */
if (result < 0x20) {
sc = (struct bce_softc *)arg1;
dev = sc->bce_dev;
val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
}
return (error);
}
/****************************************************************************/
/* Provides a sysctl interface for dumping the nvram contents. */
/* DO NOT ENABLE ON PRODUCTION SYSTEMS! */
/* */
/* Returns: */
/* 0 for success, positive errno for failure. */
/****************************************************************************/
static int
bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS)
{
struct bce_softc *sc = (struct bce_softc *)arg1;
int error, i;
if (sc->nvram_buf == NULL)
sc->nvram_buf = malloc(sc->bce_flash_size,
M_TEMP, M_ZERO | M_WAITOK);
error = 0;
if (req->oldlen == sc->bce_flash_size) {
for (i = 0; i < sc->bce_flash_size && error == 0; i++)
error = bce_nvram_read(sc, i, &sc->nvram_buf[i], 1);
}
if (error == 0)
error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size);
return error;
}
#ifdef BCE_NVRAM_WRITE_SUPPORT
/****************************************************************************/
/* Provides a sysctl interface for writing to nvram. */
/* DO NOT ENABLE ON PRODUCTION SYSTEMS! */
/* */
/* Returns: */
/* 0 for success, positive errno for failure. */
/****************************************************************************/
static int
bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS)
{
struct bce_softc *sc = (struct bce_softc *)arg1;
int error;
if (sc->nvram_buf == NULL)
sc->nvram_buf = malloc(sc->bce_flash_size,
M_TEMP, M_ZERO | M_WAITOK);
else
bzero(sc->nvram_buf, sc->bce_flash_size);
error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size);
if (error == 0)
return (error);
if (req->newlen == sc->bce_flash_size)
error = bce_nvram_write(sc, 0, sc->nvram_buf,
sc->bce_flash_size);
return error;
}
#endif
/****************************************************************************/
/* Provides a sysctl interface to allow reading a CID. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS)
{
struct bce_softc *sc;
int error, result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
/* Make sure the register is accessible. */
if (result <= TX_CID) {
sc = (struct bce_softc *)arg1;
bce_dump_ctx(sc, result);
}
return (error);
}
/****************************************************************************/
/* Provides a sysctl interface to forcing the driver to dump state and */
/* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
{
int error;
int result;
struct bce_softc *sc;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
sc = (struct bce_softc *)arg1;
bce_breakpoint(sc);
}
return error;
}
#endif
/****************************************************************************/
/* Adds any sysctl parameters for tuning or debugging purposes. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static void
bce_add_sysctls(struct bce_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
DBENTER(BCE_VERBOSE_MISC);
ctx = device_get_sysctl_ctx(sc->bce_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
#ifdef BCE_DEBUG
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"l2fhdr_error_sim_control",
CTLFLAG_RW, &l2fhdr_error_sim_control,
0, "Debug control to force l2fhdr errors");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"l2fhdr_error_sim_count",
CTLFLAG_RD, &sc->l2fhdr_error_sim_count,
0, "Number of simulated l2_fhdr errors");
#endif
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"l2fhdr_error_count",
CTLFLAG_RD, &sc->l2fhdr_error_count,
0, "Number of l2_fhdr errors");
#ifdef BCE_DEBUG
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"mbuf_alloc_failed_sim_control",
CTLFLAG_RW, &mbuf_alloc_failed_sim_control,
0, "Debug control to force mbuf allocation failures");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"mbuf_alloc_failed_sim_count",
CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count,
0, "Number of simulated mbuf cluster allocation failures");
#endif
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"mbuf_alloc_failed_count",
CTLFLAG_RD, &sc->mbuf_alloc_failed_count,
0, "Number of mbuf allocation failures");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"mbuf_frag_count",
CTLFLAG_RD, &sc->mbuf_frag_count,
0, "Number of fragmented mbufs");
#ifdef BCE_DEBUG
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"dma_map_addr_failed_sim_control",
CTLFLAG_RW, &dma_map_addr_failed_sim_control,
0, "Debug control to force DMA mapping failures");
/* ToDo: Figure out how to update this value in bce_dma_map_addr(). */
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"dma_map_addr_failed_sim_count",
CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count,
0, "Number of simulated DMA mapping failures");
#endif
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"dma_map_addr_rx_failed_count",
CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count,
0, "Number of RX DMA mapping failures");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"dma_map_addr_tx_failed_count",
CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count,
0, "Number of TX DMA mapping failures");
#ifdef BCE_DEBUG
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"unexpected_attention_sim_control",
CTLFLAG_RW, &unexpected_attention_sim_control,
0, "Debug control to simulate unexpected attentions");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"unexpected_attention_sim_count",
CTLFLAG_RW, &sc->unexpected_attention_sim_count,
0, "Number of simulated unexpected attentions");
#endif
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"unexpected_attention_count",
CTLFLAG_RW, &sc->unexpected_attention_count,
0, "Number of unexpected attentions");
#ifdef BCE_DEBUG
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"debug_bootcode_running_failure",
CTLFLAG_RW, &bootcode_running_failure_sim_control,
0, "Debug control to force bootcode running failures");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"rx_low_watermark",
CTLFLAG_RD, &sc->rx_low_watermark,
0, "Lowest level of free rx_bd's");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"rx_empty_count",
CTLFLAG_RD, &sc->rx_empty_count,
"Number of times the RX chain was empty");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_hi_watermark",
CTLFLAG_RD, &sc->tx_hi_watermark,
0, "Highest level of used tx_bd's");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tx_full_count",
CTLFLAG_RD, &sc->tx_full_count,
"Number of times the TX chain was full");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tso_frames_requested",
CTLFLAG_RD, &sc->tso_frames_requested,
"Number of TSO frames requested");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tso_frames_completed",
CTLFLAG_RD, &sc->tso_frames_completed,
"Number of TSO frames completed");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tso_frames_failed",
CTLFLAG_RD, &sc->tso_frames_failed,
"Number of TSO frames failed");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"csum_offload_ip",
CTLFLAG_RD, &sc->csum_offload_ip,
"Number of IP checksum offload frames");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"csum_offload_tcp_udp",
CTLFLAG_RD, &sc->csum_offload_tcp_udp,
"Number of TCP/UDP checksum offload frames");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"vlan_tagged_frames_rcvd",
CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd,
"Number of VLAN tagged frames received");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"vlan_tagged_frames_stripped",
CTLFLAG_RD, &sc->vlan_tagged_frames_stripped,
"Number of VLAN tagged frames stripped");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"interrupts_rx",
CTLFLAG_RD, &sc->interrupts_rx,
"Number of RX interrupts");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"interrupts_tx",
CTLFLAG_RD, &sc->interrupts_tx,
"Number of TX interrupts");
if (bce_hdr_split == TRUE) {
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"split_header_frames_rcvd",
CTLFLAG_RD, &sc->split_header_frames_rcvd,
"Number of split header frames received");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"split_header_tcp_frames_rcvd",
CTLFLAG_RD, &sc->split_header_tcp_frames_rcvd,
"Number of split header TCP frames received");
}
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_nvram_dump, "S", "");
#ifdef BCE_NVRAM_WRITE_SUPPORT
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_nvram_write, "S", "");
#endif
#endif /* BCE_DEBUG */
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHcInOctets",
CTLFLAG_RD, &sc->stat_IfHCInOctets,
"Bytes received");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInBadOctets",
CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
"Bad bytes received");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutOctets",
CTLFLAG_RD, &sc->stat_IfHCOutOctets,
"Bytes sent");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutBadOctets",
CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
"Bad bytes sent");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInUcastPkts",
CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
"Unicast packets received");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInMulticastPkts",
CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
"Multicast packets received");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInBroadcastPkts",
CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
"Broadcast packets received");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutUcastPkts",
CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
"Unicast packets sent");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutMulticastPkts",
CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
"Multicast packets sent");
SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutBroadcastPkts",
CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
"Broadcast packets sent");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
0, "Internal MAC transmit errors");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsCarrierSenseErrors",
CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
0, "Carrier sense errors");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsFCSErrors",
CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
0, "Frame check sequence errors");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsAlignmentErrors",
CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
0, "Alignment errors");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsSingleCollisionFrames",
CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
0, "Single Collision Frames");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsMultipleCollisionFrames",
CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
0, "Multiple Collision Frames");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsDeferredTransmissions",
CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
0, "Deferred Transmissions");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsExcessiveCollisions",
CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
0, "Excessive Collisions");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_Dot3StatsLateCollisions",
CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
0, "Late Collisions");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsCollisions",
CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
0, "Collisions");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsFragments",
CTLFLAG_RD, &sc->stat_EtherStatsFragments,
0, "Fragments");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsJabbers",
CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
0, "Jabbers");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsUndersizePkts",
CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
0, "Undersize packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsOversizePkts",
CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts,
0, "stat_EtherStatsOversizePkts");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsRx64Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
0, "Bytes received in 64 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsRx65Octetsto127Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
0, "Bytes received in 65 to 127 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsRx128Octetsto255Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
0, "Bytes received in 128 to 255 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsRx256Octetsto511Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
0, "Bytes received in 256 to 511 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsRx512Octetsto1023Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
0, "Bytes received in 512 to 1023 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsRx1024Octetsto1522Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
0, "Bytes received in 1024 t0 1522 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsRx1523Octetsto9022Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
0, "Bytes received in 1523 to 9022 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsTx64Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
0, "Bytes sent in 64 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsTx65Octetsto127Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
0, "Bytes sent in 65 to 127 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsTx128Octetsto255Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
0, "Bytes sent in 128 to 255 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsTx256Octetsto511Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
0, "Bytes sent in 256 to 511 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsTx512Octetsto1023Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
0, "Bytes sent in 512 to 1023 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsTx1024Octetsto1522Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
0, "Bytes sent in 1024 to 1522 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_EtherStatsPktsTx1523Octetsto9022Octets",
CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
0, "Bytes sent in 1523 to 9022 byte packets");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_XonPauseFramesReceived",
CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
0, "XON pause frames receved");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_XoffPauseFramesReceived",
CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
0, "XOFF pause frames received");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_OutXonSent",
CTLFLAG_RD, &sc->stat_OutXonSent,
0, "XON pause frames sent");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_OutXoffSent",
CTLFLAG_RD, &sc->stat_OutXoffSent,
0, "XOFF pause frames sent");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_FlowControlDone",
CTLFLAG_RD, &sc->stat_FlowControlDone,
0, "Flow control done");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_MacControlFramesReceived",
CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
0, "MAC control frames received");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_XoffStateEntered",
CTLFLAG_RD, &sc->stat_XoffStateEntered,
0, "XOFF state entered");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_IfInFramesL2FilterDiscards",
CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
0, "Received L2 packets discarded");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_IfInRuleCheckerDiscards",
CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
0, "Received packets discarded by rule");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_IfInFTQDiscards",
CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
0, "Received packet FTQ discards");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_IfInMBUFDiscards",
CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
0, "Received packets discarded due to lack "
"of controller buffer memory");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_IfInRuleCheckerP4Hit",
CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
0, "Received packets rule checker hits");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_CatchupInRuleCheckerDiscards",
CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
0, "Received packets discarded in Catchup path");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_CatchupInFTQDiscards",
CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
0, "Received packets discarded in FTQ in Catchup path");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_CatchupInMBUFDiscards",
CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
0, "Received packets discarded in controller "
"buffer memory in Catchup path");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"stat_CatchupInRuleCheckerP4Hit",
CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
0, "Received packets rule checker hits in Catchup path");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"com_no_buffers",
CTLFLAG_RD, &sc->com_no_buffers,
0, "Valid packets received but no RX buffers available");
#ifdef BCE_DEBUG
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"driver_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_driver_state, "I", "Drive state information");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"hw_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_hw_state, "I", "Hardware state information");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"status_block", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_status_block, "I", "Dump status block");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"stats_block", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_stats_block, "I", "Dump statistics block");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"stats_clear", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_stats_clear, "I", "Clear statistics block");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"shmem_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_shmem_state, "I", "Shared memory state information");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"bc_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_bc_state, "I", "Bootcode state information");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
if (bce_hdr_split == TRUE) {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dump_pg_chain",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_dump_pg_chain, "I", "Dump page chain");
}
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dump_ctx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_dump_ctx, "I", "Dump context memory");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"breakpoint", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_breakpoint, "I", "Driver breakpoint");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"reg_read", CTLTYPE_INT | CTLFLAG_RW| CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_reg_read, "I", "Register read");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"nvram_read", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_nvram_read, "I", "NVRAM read");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"phy_read", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0,
bce_sysctl_phy_read, "I", "PHY register read");
#endif
DBEXIT(BCE_VERBOSE_MISC);
}
/****************************************************************************/
/* BCE Debug Routines */
/****************************************************************************/
#ifdef BCE_DEBUG
/****************************************************************************/
/* Freezes the controller to allow for a cohesive state dump. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_freeze_controller(struct bce_softc *sc)
{
u32 val;
val = REG_RD(sc, BCE_MISC_COMMAND);
val |= BCE_MISC_COMMAND_DISABLE_ALL;
REG_WR(sc, BCE_MISC_COMMAND, val);
}
/****************************************************************************/
/* Unfreezes the controller after a freeze operation. This may not always */
/* work and the controller will require a reset! */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_unfreeze_controller(struct bce_softc *sc)
{
u32 val;
val = REG_RD(sc, BCE_MISC_COMMAND);
val |= BCE_MISC_COMMAND_ENABLE_ALL;
REG_WR(sc, BCE_MISC_COMMAND, val);
}
/****************************************************************************/
/* Prints out Ethernet frame information from an mbuf. */
/* */
/* Partially decode an Ethernet frame to look at some important headers. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_enet(struct bce_softc *sc, struct mbuf *m)
{
struct ether_vlan_header *eh;
u16 etype;
int ehlen;
struct ip *ip;
struct tcphdr *th;
struct udphdr *uh;
struct arphdr *ah;
BCE_PRINTF(
"-----------------------------"
" Frame Decode "
"-----------------------------\n");
eh = mtod(m, struct ether_vlan_header *);
/* Handle VLAN encapsulation if present. */
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
etype = ntohs(eh->evl_proto);
ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
etype = ntohs(eh->evl_encap_proto);
ehlen = ETHER_HDR_LEN;
}
/* ToDo: Add VLAN output. */
BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n",
eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen);
switch (etype) {
case ETHERTYPE_IP:
ip = (struct ip *)(m->m_data + ehlen);
BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, "
"len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n",
ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum));
switch (ip->ip_p) {
case IPPROTO_TCP:
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = "
"%d bytes, flags = 0x%b, csum = 0x%04X\n",
ntohs(th->th_dport), ntohs(th->th_sport),
(th->th_off << 2), th->th_flags,
"\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST"
"\02SYN\01FIN", ntohs(th->th_sum));
break;
case IPPROTO_UDP:
uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
BCE_PRINTF("-udp: dest = %d, src = %d, len = %d "
"bytes, csum = 0x%04X\n", ntohs(uh->uh_dport),
ntohs(uh->uh_sport), ntohs(uh->uh_ulen),
ntohs(uh->uh_sum));
break;
case IPPROTO_ICMP:
BCE_PRINTF("icmp:\n");
break;
default:
BCE_PRINTF("----: Other IP protocol.\n");
}
break;
case ETHERTYPE_IPV6:
BCE_PRINTF("ipv6: No decode supported.\n");
break;
case ETHERTYPE_ARP:
BCE_PRINTF("-arp: ");
ah = (struct arphdr *) (m->m_data + ehlen);
switch (ntohs(ah->ar_op)) {
case ARPOP_REVREQUEST:
printf("reverse ARP request\n");
break;
case ARPOP_REVREPLY:
printf("reverse ARP reply\n");
break;
case ARPOP_REQUEST:
printf("ARP request\n");
break;
case ARPOP_REPLY:
printf("ARP reply\n");
break;
default:
printf("other ARP operation\n");
}
break;
default:
BCE_PRINTF("----: Other protocol.\n");
}
BCE_PRINTF(
"-----------------------------"
"--------------"
"-----------------------------\n");
}
/****************************************************************************/
/* Prints out information about an mbuf. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
{
struct mbuf *mp = m;
if (m == NULL) {
BCE_PRINTF("mbuf: null pointer\n");
return;
}
while (mp) {
BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, "
"m_data = %p\n", mp, mp->m_len, mp->m_flags,
"\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data);
if (mp->m_flags & M_PKTHDR) {
BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, "
"csum_flags = %b\n", mp->m_pkthdr.len,
mp->m_flags, M_FLAG_PRINTF,
mp->m_pkthdr.csum_flags, CSUM_BITS);
}
if (mp->m_flags & M_EXT) {
BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
mp->m_ext.ext_buf, mp->m_ext.ext_size);
switch (mp->m_ext.ext_type) {
case EXT_CLUSTER:
printf("EXT_CLUSTER\n"); break;
case EXT_SFBUF:
printf("EXT_SFBUF\n"); break;
case EXT_JUMBO9:
printf("EXT_JUMBO9\n"); break;
case EXT_JUMBO16:
printf("EXT_JUMBO16\n"); break;
case EXT_PACKET:
printf("EXT_PACKET\n"); break;
case EXT_MBUF:
printf("EXT_MBUF\n"); break;
case EXT_NET_DRV:
printf("EXT_NET_DRV\n"); break;
case EXT_MOD_TYPE:
printf("EXT_MDD_TYPE\n"); break;
case EXT_DISPOSABLE:
printf("EXT_DISPOSABLE\n"); break;
case EXT_EXTREF:
printf("EXT_EXTREF\n"); break;
default:
printf("UNKNOWN\n");
}
}
mp = mp->m_next;
}
}
/****************************************************************************/
/* Prints out the mbufs in the TX mbuf chain. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
{
struct mbuf *m;
BCE_PRINTF(
"----------------------------"
" tx mbuf data "
"----------------------------\n");
for (int i = 0; i < count; i++) {
m = sc->tx_mbuf_ptr[chain_prod];
BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
bce_dump_mbuf(sc, m);
chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the mbufs in the RX mbuf chain. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
{
struct mbuf *m;
BCE_PRINTF(
"----------------------------"
" rx mbuf data "
"----------------------------\n");
for (int i = 0; i < count; i++) {
m = sc->rx_mbuf_ptr[chain_prod];
BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
bce_dump_mbuf(sc, m);
chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the mbufs in the mbuf page chain. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
{
struct mbuf *m;
BCE_PRINTF(
"----------------------------"
" pg mbuf data "
"----------------------------\n");
for (int i = 0; i < count; i++) {
m = sc->pg_mbuf_ptr[chain_prod];
BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
bce_dump_mbuf(sc, m);
chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out a tx_bd structure. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
{
int i = 0;
if (idx > MAX_TX_BD_ALLOC)
/* Index out of range. */
BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
/* TX Chain page pointer. */
BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
"pointer\n", idx, txbd->tx_bd_haddr_hi,
txbd->tx_bd_haddr_lo);
else {
/* Normal tx_bd entry. */
BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, "
"mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = "
"0x%04X (", idx, txbd->tx_bd_haddr_hi,
txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes,
txbd->tx_bd_vlan_tag, txbd->tx_bd_flags);
if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) {
if (i>0)
printf("|");
printf("CONN_FAULT");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) {
if (i>0)
printf("|");
printf("TCP_UDP_CKSUM");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) {
if (i>0)
printf("|");
printf("IP_CKSUM");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) {
if (i>0)
printf("|");
printf("VLAN");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) {
if (i>0)
printf("|");
printf("COAL_NOW");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) {
if (i>0)
printf("|");
printf("DONT_GEN_CRC");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_START) {
if (i>0)
printf("|");
printf("START");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_END) {
if (i>0)
printf("|");
printf("END");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) {
if (i>0)
printf("|");
printf("LSO");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) {
if (i>0)
printf("|");
printf("SW_OPTION=%d", ((txbd->tx_bd_flags &
TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) {
if (i>0)
printf("|");
printf("SW_FLAGS");
i++;
}
if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) {
if (i>0)
printf("|");
printf("SNAP)");
} else {
printf(")\n");
}
}
}
/****************************************************************************/
/* Prints out a rx_bd structure. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
{
if (idx > MAX_RX_BD_ALLOC)
/* Index out of range. */
BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
/* RX Chain page pointer. */
BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
"pointer\n", idx, rxbd->rx_bd_haddr_hi,
rxbd->rx_bd_haddr_lo);
else
/* Normal rx_bd entry. */
BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
"0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi,
rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len,
rxbd->rx_bd_flags);
}
/****************************************************************************/
/* Prints out a rx_bd structure in the page chain. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
{
if (idx > MAX_PG_BD_ALLOC)
/* Index out of range. */
BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
/* Page Chain page pointer. */
BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
else
/* Normal rx_bd entry. */
BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
"flags = 0x%08X\n", idx,
pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
pgbd->rx_bd_len, pgbd->rx_bd_flags);
}
/****************************************************************************/
/* Prints out a l2_fhdr structure. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
{
BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
"pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
"tcp_udp_xsum = 0x%04X\n", idx,
l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
}
/****************************************************************************/
/* Prints out context memory info. (Only useful for CID 0 to 16.) */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_ctx(struct bce_softc *sc, u16 cid)
{
if (cid > TX_CID) {
BCE_PRINTF(" Unknown CID\n");
return;
}
BCE_PRINTF(
"----------------------------"
" CTX Data "
"----------------------------\n");
BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid);
if (cid == RX_CID) {
BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx "
"producer index\n",
CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host "
"byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_RX_HOST_BSEQ));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n",
CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer "
"descriptor address\n",
CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer "
"descriptor address\n",
CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer "
"index\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_RX_NX_BDIDX));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page "
"producer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_RX_HOST_PG_BDIDX));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page "
"buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_RX_PG_BUF_SIZE));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page "
"chain address\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_RX_NX_PG_BDHADDR_HI));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page "
"chain address\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_RX_NX_PG_BDHADDR_LO));
BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page "
"consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_RX_NX_PG_BDIDX));
} else if (cid == TX_CID) {
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_TYPE_XI));
BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx "
"cmd\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_CMD_TYPE_XI));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) "
"h/w buffer descriptor address\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_TBDR_BHADDR_HI_XI));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) "
"h/w buffer descriptor address\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_TBDR_BHADDR_LO_XI));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) "
"host producer index\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_HOST_BIDX_XI));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) "
"host byte sequence\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_HOST_BSEQ_XI));
} else {
BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n",
CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_CMD_TYPE));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) "
"h/w buffer descriptor address\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_TBDR_BHADDR_HI));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) "
"h/w buffer descriptor address\n",
CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_TBDR_BHADDR_LO));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host "
"producer index\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_HOST_BIDX));
BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte "
"sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
BCE_L2CTX_TX_HOST_BSEQ));
}
}
BCE_PRINTF(
"----------------------------"
" Raw CTX "
"----------------------------\n");
for (int i = 0x0; i < 0x300; i += 0x10) {
BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
CTX_RD(sc, GET_CID_ADDR(cid), i),
CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4),
CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8),
CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc));
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the FTQ data. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_ftqs(struct bce_softc *sc)
{
u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val;
BCE_PRINTF(
"----------------------------"
" FTQ Data "
"----------------------------\n");
BCE_PRINTF(" FTQ Command Control Depth_Now "
"Max_Depth Valid_Cnt \n");
BCE_PRINTF(" ------- ---------- ---------- ---------- "
"---------- ----------\n");
/* Setup the generic statistic counters for the FTQ valid count. */
val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT);
REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT);
REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val);
val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT);
REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val);
val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT);
REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val);
/* Input queue to the Receive Lookup state machine */
cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Receive Processor */
cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Recevie Processor */
cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Receive Virtual to Physical state machine */
cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Recevie Virtual to Physical state machine */
cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Receive Virtual to Physical state machine */
cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Receive DMA state machine */
cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Transmit Scheduler state machine */
cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Transmit Buffer Descriptor state machine */
cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Transmit Processor */
cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Transmit DMA state machine */
cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Transmit Patch-Up Processor */
cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Transmit Assembler state machine */
cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Completion Processor */
cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Completion Processor */
cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Completion Processor */
cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Setup the generic statistic counters for the FTQ valid count. */
val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT);
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
val = val |
(BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI <<
24);
REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
/* Input queue to the Management Control Processor */
cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Command Processor */
cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
/* Input queue to the Completion Scheduler state machine */
cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
/* Input queue to the RV2P Command Scheduler */
cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD);
ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL);
cur_depth = (ctl & 0xFFC00000) >> 22;
max_depth = (ctl & 0x003FF000) >> 12;
valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
cmd, ctl, cur_depth, max_depth, valid_cnt);
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the TX chain. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
{
struct tx_bd *txbd;
/* First some info about the tx_bd chain structure. */
BCE_PRINTF(
"----------------------------"
" tx_bd chain "
"----------------------------\n");
BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n",
(u32) BCM_PAGE_SIZE, (u32) sc->tx_pages);
BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD_ALLOC);
BCE_PRINTF(
"----------------------------"
" tx_bd data "
"----------------------------\n");
/* Now print out a decoded list of TX buffer descriptors. */
for (int i = 0; i < count; i++) {
txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
bce_dump_txbd(sc, tx_prod, txbd);
tx_prod++;
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the RX chain. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count)
{
struct rx_bd *rxbd;
/* First some info about the rx_bd chain structure. */
BCE_PRINTF(
"----------------------------"
" rx_bd chain "
"----------------------------\n");
BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n",
(u32) BCM_PAGE_SIZE, (u32) sc->rx_pages);
BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD_ALLOC);
BCE_PRINTF(
"----------------------------"
" rx_bd data "
"----------------------------\n");
/* Now print out the rx_bd's themselves. */
for (int i = 0; i < count; i++) {
rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
bce_dump_rxbd(sc, rx_prod, rxbd);
rx_prod = RX_CHAIN_IDX(rx_prod + 1);
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the page chain. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
{
struct rx_bd *pgbd;
/* First some info about the page chain structure. */
BCE_PRINTF(
"----------------------------"
" page chain "
"----------------------------\n");
BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n",
(u32) BCM_PAGE_SIZE, (u32) sc->pg_pages);
BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
BCE_PRINTF("total pg_bd = 0x%08X\n", (u32) TOTAL_PG_BD_ALLOC);
BCE_PRINTF(
"----------------------------"
" page data "
"----------------------------\n");
/* Now print out the rx_bd's themselves. */
for (int i = 0; i < count; i++) {
pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
bce_dump_pgbd(sc, pg_prod, pgbd);
pg_prod = PG_CHAIN_IDX(pg_prod + 1);
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
#define BCE_PRINT_RX_CONS(arg) \
if (sblk->status_rx_quick_consumer_index##arg) \
BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \
sblk->status_rx_quick_consumer_index##arg, (u16) \
RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \
arg);
#define BCE_PRINT_TX_CONS(arg) \
if (sblk->status_tx_quick_consumer_index##arg) \
BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \
sblk->status_tx_quick_consumer_index##arg, (u16) \
TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \
arg);
/****************************************************************************/
/* Prints out the status block from host memory. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_status_block(struct bce_softc *sc)
{
struct status_block *sblk;
bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD);
sblk = sc->status_block;
BCE_PRINTF(
"----------------------------"
" Status Block "
"----------------------------\n");
/* Theses indices are used for normal L2 drivers. */
BCE_PRINTF(" 0x%08X - attn_bits\n",
sblk->status_attn_bits);
BCE_PRINTF(" 0x%08X - attn_bits_ack\n",
sblk->status_attn_bits_ack);
BCE_PRINT_RX_CONS(0);
BCE_PRINT_TX_CONS(0)
BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx);
/* Theses indices are not used for normal L2 drivers. */
BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3);
BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6);
BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9);
BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12);
BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15);
BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3);
if (sblk->status_completion_producer_index ||
sblk->status_cmd_consumer_index)
BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n",
sblk->status_completion_producer_index,
sblk->status_cmd_consumer_index);
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
#define BCE_PRINT_64BIT_STAT(arg) \
if (sblk->arg##_lo || sblk->arg##_hi) \
BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \
sblk->arg##_lo, #arg);
#define BCE_PRINT_32BIT_STAT(arg) \
if (sblk->arg) \
BCE_PRINTF(" 0x%08X : %s\n", \
sblk->arg, #arg);
/****************************************************************************/
/* Prints out the statistics block from host memory. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_stats_block(struct bce_softc *sc)
{
struct statistics_block *sblk;
bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD);
sblk = sc->stats_block;
BCE_PRINTF(
"---------------"
" Stats Block (All Stats Not Shown Are 0) "
"---------------\n");
BCE_PRINT_64BIT_STAT(stat_IfHCInOctets);
BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets);
BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets);
BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets);
BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts);
BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts);
BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts);
BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts);
BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts);
BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts);
BCE_PRINT_32BIT_STAT(
stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions);
BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions);
BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions);
BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments);
BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers);
BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts);
BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets);
BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets);
BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived);
BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived);
BCE_PRINT_32BIT_STAT(stat_OutXonSent);
BCE_PRINT_32BIT_STAT(stat_OutXoffSent);
BCE_PRINT_32BIT_STAT(stat_FlowControlDone);
BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived);
BCE_PRINT_32BIT_STAT(stat_XoffStateEntered);
BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards);
BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards);
BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards);
BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards);
BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit);
BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards);
BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards);
BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards);
BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit);
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out a summary of the driver state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_driver_state(struct bce_softc *sc)
{
u32 val_hi, val_lo;
BCE_PRINTF(
"-----------------------------"
" Driver State "
"-----------------------------\n");
val_hi = BCE_ADDR_HI(sc);
val_lo = BCE_ADDR_LO(sc);
BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual "
"address\n", val_hi, val_lo);
val_hi = BCE_ADDR_HI(sc->bce_vhandle);
val_lo = BCE_ADDR_LO(sc->bce_vhandle);
BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual "
"address\n", val_hi, val_lo);
val_hi = BCE_ADDR_HI(sc->status_block);
val_lo = BCE_ADDR_LO(sc->status_block);
BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block "
"virtual address\n", val_hi, val_lo);
val_hi = BCE_ADDR_HI(sc->stats_block);
val_lo = BCE_ADDR_LO(sc->stats_block);
BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block "
"virtual address\n", val_hi, val_lo);
val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain "
"virtual address\n", val_hi, val_lo);
val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
"virtual address\n", val_hi, val_lo);
if (bce_hdr_split == TRUE) {
val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain "
"virtual address\n", val_hi, val_lo);
}
val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain "
"virtual address\n", val_hi, val_lo);
val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
"virtual address\n", val_hi, val_lo);
if (bce_hdr_split == TRUE) {
val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain "
"virtual address\n", val_hi, val_lo);
}
BCE_PRINTF(" 0x%016llX - (sc->interrupts_generated) "
"h/w intrs\n",
(long long unsigned int) sc->interrupts_generated);
BCE_PRINTF(" 0x%016llX - (sc->interrupts_rx) "
"rx interrupts handled\n",
(long long unsigned int) sc->interrupts_rx);
BCE_PRINTF(" 0x%016llX - (sc->interrupts_tx) "
"tx interrupts handled\n",
(long long unsigned int) sc->interrupts_tx);
BCE_PRINTF(" 0x%016llX - (sc->phy_interrupts) "
"phy interrupts handled\n",
(long long unsigned int) sc->phy_interrupts);
BCE_PRINTF(" 0x%08X - (sc->last_status_idx) "
"status block index\n", sc->last_status_idx);
BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer "
"index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer "
"index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer "
"byte seq index\n", sc->tx_prod_bseq);
BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx "
"mbufs allocated\n", sc->debug_tx_mbuf_alloc);
BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used "
"tx_bd's\n", sc->used_tx_bd);
BCE_PRINTF(" 0x%04X/0x%04X - (sc->tx_hi_watermark)/"
"(sc->max_tx_bd)\n", sc->tx_hi_watermark, sc->max_tx_bd);
BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer "
"index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer "
"index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer "
"byte seq index\n", sc->rx_prod_bseq);
BCE_PRINTF(" 0x%04X/0x%04X - (sc->rx_low_watermark)/"
"(sc->max_rx_bd)\n", sc->rx_low_watermark, sc->max_rx_bd);
BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx "
"mbufs allocated\n", sc->debug_rx_mbuf_alloc);
BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free "
"rx_bd's\n", sc->free_rx_bd);
if (bce_hdr_split == TRUE) {
BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer "
"index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer "
"index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page "
"mbufs allocated\n", sc->debug_pg_mbuf_alloc);
}
BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page "
"rx_bd's\n", sc->free_pg_bd);
BCE_PRINTF(" 0x%04X/0x%04X - (sc->pg_low_watermark)/"
"(sc->max_pg_bd)\n", sc->pg_low_watermark, sc->max_pg_bd);
BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) "
"mbuf alloc failures\n", sc->mbuf_alloc_failed_count);
BCE_PRINTF(" 0x%08X - (sc->bce_flags) "
"bce mac flags\n", sc->bce_flags);
BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) "
"bce phy flags\n", sc->bce_phy_flags);
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the hardware state through a summary of important register, */
/* followed by a complete register dump. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_hw_state(struct bce_softc *sc)
{
u32 val;
BCE_PRINTF(
"----------------------------"
" Hardware State "
"----------------------------\n");
BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
val, BCE_MISC_ENABLE_STATUS_BITS);
val = REG_RD(sc, BCE_DMA_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) dma_status\n",
val, BCE_DMA_STATUS);
val = REG_RD(sc, BCE_CTX_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n",
val, BCE_CTX_STATUS);
val = REG_RD(sc, BCE_EMAC_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) emac_status\n",
val, BCE_EMAC_STATUS);
val = REG_RD(sc, BCE_RPM_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n",
val, BCE_RPM_STATUS);
/* ToDo: Create a #define for this constant. */
val = REG_RD(sc, 0x2004);
BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n",
val, 0x2004);
val = REG_RD(sc, BCE_RV2P_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n",
val, BCE_RV2P_STATUS);
/* ToDo: Create a #define for this constant. */
val = REG_RD(sc, 0x2c04);
BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n",
val, 0x2c04);
val = REG_RD(sc, BCE_TBDR_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n",
val, BCE_TBDR_STATUS);
val = REG_RD(sc, BCE_TDMA_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n",
val, BCE_TDMA_STATUS);
val = REG_RD(sc, BCE_HC_STATUS);
BCE_PRINTF("0x%08X - (0x%06X) hc_status\n",
val, BCE_HC_STATUS);
val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n",
val, BCE_TXP_CPU_STATE);
val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n",
val, BCE_TPAT_CPU_STATE);
val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n",
val, BCE_RXP_CPU_STATE);
val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n",
val, BCE_COM_CPU_STATE);
val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n",
val, BCE_MCP_CPU_STATE);
val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n",
val, BCE_CP_CPU_STATE);
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
BCE_PRINTF(
"----------------------------"
" Register Dump "
"----------------------------\n");
for (int i = 0x400; i < 0x8000; i += 0x10) {
BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the contentst of shared memory which is used for host driver */
/* to bootcode firmware communication. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_shmem_state(struct bce_softc *sc)
{
BCE_PRINTF(
"----------------------------"
" Hardware State "
"----------------------------\n");
BCE_PRINTF("0x%08X - Shared memory base address\n",
sc->bce_shmem_base);
BCE_PRINTF("%s - bootcode version\n",
sc->bce_bc_ver);
BCE_PRINTF(
"----------------------------"
" Shared Mem "
"----------------------------\n");
for (int i = 0x0; i < 0x200; i += 0x10) {
BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
i, bce_shmem_rd(sc, i), bce_shmem_rd(sc, i + 0x4),
bce_shmem_rd(sc, i + 0x8), bce_shmem_rd(sc, i + 0xC));
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the mailbox queue registers. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_mq_regs(struct bce_softc *sc)
{
BCE_PRINTF(
"----------------------------"
" MQ Regs "
"----------------------------\n");
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
for (int i = 0x3c00; i < 0x4000; i += 0x10) {
BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the bootcode state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_bc_state(struct bce_softc *sc)
{
u32 val;
BCE_PRINTF(
"----------------------------"
" Bootcode State "
"----------------------------\n");
BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE);
BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
val, BCE_BC_RESET_TYPE);
val = bce_shmem_rd(sc, BCE_BC_STATE);
BCE_PRINTF("0x%08X - (0x%06X) state\n",
val, BCE_BC_STATE);
val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
BCE_PRINTF("0x%08X - (0x%06X) condition\n",
val, BCE_BC_STATE_CONDITION);
val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD);
BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
val, BCE_BC_STATE_DEBUG_CMD);
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the TXP processor state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_txp_state(struct bce_softc *sc, int regs)
{
u32 val;
u32 fw_version[3];
BCE_PRINTF(
"----------------------------"
" TXP State "
"----------------------------\n");
for (int i = 0; i < 3; i++)
fw_version[i] = htonl(REG_RD_IND(sc,
(BCE_TXP_SCRATCH + 0x10 + i * 4)));
BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
val = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n",
val, BCE_TXP_CPU_MODE);
val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n",
val, BCE_TXP_CPU_STATE);
val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n",
val, BCE_TXP_CPU_EVENT_MASK);
if (regs) {
BCE_PRINTF(
"----------------------------"
" Register Dump "
"----------------------------\n");
for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
/* Skip the big blank spaces */
if (i < 0x454000 && i > 0x5ffff)
BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
"0x%08X 0x%08X\n", i,
REG_RD_IND(sc, i),
REG_RD_IND(sc, i + 0x4),
REG_RD_IND(sc, i + 0x8),
REG_RD_IND(sc, i + 0xC));
}
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the RXP processor state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_rxp_state(struct bce_softc *sc, int regs)
{
u32 val;
u32 fw_version[3];
BCE_PRINTF(
"----------------------------"
" RXP State "
"----------------------------\n");
for (int i = 0; i < 3; i++)
fw_version[i] = htonl(REG_RD_IND(sc,
(BCE_RXP_SCRATCH + 0x10 + i * 4)));
BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
val = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n",
val, BCE_RXP_CPU_MODE);
val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n",
val, BCE_RXP_CPU_STATE);
val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n",
val, BCE_RXP_CPU_EVENT_MASK);
if (regs) {
BCE_PRINTF(
"----------------------------"
" Register Dump "
"----------------------------\n");
for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
/* Skip the big blank sapces */
if (i < 0xc5400 && i > 0xdffff)
BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
"0x%08X 0x%08X\n", i,
REG_RD_IND(sc, i),
REG_RD_IND(sc, i + 0x4),
REG_RD_IND(sc, i + 0x8),
REG_RD_IND(sc, i + 0xC));
}
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the TPAT processor state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_tpat_state(struct bce_softc *sc, int regs)
{
u32 val;
u32 fw_version[3];
BCE_PRINTF(
"----------------------------"
" TPAT State "
"----------------------------\n");
for (int i = 0; i < 3; i++)
fw_version[i] = htonl(REG_RD_IND(sc,
(BCE_TPAT_SCRATCH + 0x410 + i * 4)));
BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n",
val, BCE_TPAT_CPU_MODE);
val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n",
val, BCE_TPAT_CPU_STATE);
val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n",
val, BCE_TPAT_CPU_EVENT_MASK);
if (regs) {
BCE_PRINTF(
"----------------------------"
" Register Dump "
"----------------------------\n");
for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
/* Skip the big blank spaces */
if (i < 0x854000 && i > 0x9ffff)
BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
"0x%08X 0x%08X\n", i,
REG_RD_IND(sc, i),
REG_RD_IND(sc, i + 0x4),
REG_RD_IND(sc, i + 0x8),
REG_RD_IND(sc, i + 0xC));
}
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the Command Procesor (CP) state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_cp_state(struct bce_softc *sc, int regs)
{
u32 val;
u32 fw_version[3];
BCE_PRINTF(
"----------------------------"
" CP State "
"----------------------------\n");
for (int i = 0; i < 3; i++)
fw_version[i] = htonl(REG_RD_IND(sc,
(BCE_CP_SCRATCH + 0x10 + i * 4)));
BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
val = REG_RD_IND(sc, BCE_CP_CPU_MODE);
BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n",
val, BCE_CP_CPU_MODE);
val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n",
val, BCE_CP_CPU_STATE);
val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK);
BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val,
BCE_CP_CPU_EVENT_MASK);
if (regs) {
BCE_PRINTF(
"----------------------------"
" Register Dump "
"----------------------------\n");
for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
/* Skip the big blank spaces */
if (i < 0x185400 && i > 0x19ffff)
BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
"0x%08X 0x%08X\n", i,
REG_RD_IND(sc, i),
REG_RD_IND(sc, i + 0x4),
REG_RD_IND(sc, i + 0x8),
REG_RD_IND(sc, i + 0xC));
}
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the Completion Procesor (COM) state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_com_state(struct bce_softc *sc, int regs)
{
u32 val;
u32 fw_version[4];
BCE_PRINTF(
"----------------------------"
" COM State "
"----------------------------\n");
for (int i = 0; i < 3; i++)
fw_version[i] = htonl(REG_RD_IND(sc,
(BCE_COM_SCRATCH + 0x10 + i * 4)));
BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
val = REG_RD_IND(sc, BCE_COM_CPU_MODE);
BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n",
val, BCE_COM_CPU_MODE);
val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n",
val, BCE_COM_CPU_STATE);
val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK);
BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val,
BCE_COM_CPU_EVENT_MASK);
if (regs) {
BCE_PRINTF(
"----------------------------"
" Register Dump "
"----------------------------\n");
for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) {
BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
"0x%08X 0x%08X\n", i,
REG_RD_IND(sc, i),
REG_RD_IND(sc, i + 0x4),
REG_RD_IND(sc, i + 0x8),
REG_RD_IND(sc, i + 0xC));
}
}
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the Receive Virtual 2 Physical (RV2P) state. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_dump_rv2p_state(struct bce_softc *sc)
{
u32 val, pc1, pc2, fw_ver_high, fw_ver_low;
BCE_PRINTF(
"----------------------------"
" RV2P State "
"----------------------------\n");
/* Stall the RV2P processors. */
val = REG_RD_IND(sc, BCE_RV2P_CONFIG);
val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2;
REG_WR_IND(sc, BCE_RV2P_CONFIG, val);
/* Read the firmware version. */
val = 0x00000001;
REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW);
fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) &
BCE_RV2P_INSTR_HIGH_HIGH;
BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n",
fw_ver_high, fw_ver_low);
val = 0x00000001;
REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW);
fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) &
BCE_RV2P_INSTR_HIGH_HIGH;
BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n",
fw_ver_high, fw_ver_low);
/* Resume the RV2P processors. */
val = REG_RD_IND(sc, BCE_RV2P_CONFIG);
val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2);
REG_WR_IND(sc, BCE_RV2P_CONFIG, val);
/* Fetch the program counter value. */
val = 0x68007800;
REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val);
val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK);
pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE);
pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16;
BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1);
BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2);
/* Fetch the program counter value again to see if it is advancing. */
val = 0x68007800;
REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val);
val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK);
pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE);
pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16;
BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1);
BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2);
BCE_PRINTF(
"----------------------------"
"----------------"
"----------------------------\n");
}
/****************************************************************************/
/* Prints out the driver state and then enters the debugger. */
/* */
/* Returns: */
/* Nothing. */
/****************************************************************************/
static __attribute__ ((noinline)) void
bce_breakpoint(struct bce_softc *sc)
{
/*
* Unreachable code to silence compiler warnings
* about unused functions.
*/
if (0) {
bce_freeze_controller(sc);
bce_unfreeze_controller(sc);
bce_dump_enet(sc, NULL);
bce_dump_txbd(sc, 0, NULL);
bce_dump_rxbd(sc, 0, NULL);
bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD_ALLOC);
bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC);
bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD_ALLOC);
bce_dump_l2fhdr(sc, 0, NULL);
bce_dump_ctx(sc, RX_CID);
bce_dump_ftqs(sc);
bce_dump_tx_chain(sc, 0, USABLE_TX_BD_ALLOC);
bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD_ALLOC);
bce_dump_pg_chain(sc, 0, USABLE_PG_BD_ALLOC);
bce_dump_status_block(sc);
bce_dump_stats_block(sc);
bce_dump_driver_state(sc);
bce_dump_hw_state(sc);
bce_dump_bc_state(sc);
bce_dump_txp_state(sc, 0);
bce_dump_rxp_state(sc, 0);
bce_dump_tpat_state(sc, 0);
bce_dump_cp_state(sc, 0);
bce_dump_com_state(sc, 0);
bce_dump_rv2p_state(sc);
bce_dump_pgbd(sc, 0, NULL);
}
bce_dump_status_block(sc);
bce_dump_driver_state(sc);
/* Call the debugger. */
breakpoint();
}
#endif
diff --git a/sys/dev/bfe/if_bfe.c b/sys/dev/bfe/if_bfe.c
index 962dd24ae85c..817c867862d0 100644
--- a/sys/dev/bfe/if_bfe.c
+++ b/sys/dev/bfe/if_bfe.c
@@ -1,1963 +1,1958 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2003 Stuart Walsh<stu@ipng.org.uk>
* and Duncan Barclay<dmlb@dmlb.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <dev/bfe/if_bfereg.h>
MODULE_DEPEND(bfe, pci, 1, 1, 1);
MODULE_DEPEND(bfe, ether, 1, 1, 1);
MODULE_DEPEND(bfe, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#define BFE_DEVDESC_MAX 64 /* Maximum device description length */
static struct bfe_type bfe_devs[] = {
{ BCOM_VENDORID, BCOM_DEVICEID_BCM4401,
"Broadcom BCM4401 Fast Ethernet" },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM4401B0,
"Broadcom BCM4401-B0 Fast Ethernet" },
{ 0, 0, NULL }
};
static int bfe_probe (device_t);
static int bfe_attach (device_t);
static int bfe_detach (device_t);
static int bfe_suspend (device_t);
static int bfe_resume (device_t);
static void bfe_release_resources (struct bfe_softc *);
static void bfe_intr (void *);
static int bfe_encap (struct bfe_softc *, struct mbuf **);
static void bfe_start (if_t);
static void bfe_start_locked (if_t);
static int bfe_ioctl (if_t, u_long, caddr_t);
static void bfe_init (void *);
static void bfe_init_locked (void *);
static void bfe_stop (struct bfe_softc *);
static void bfe_watchdog (struct bfe_softc *);
static int bfe_shutdown (device_t);
static void bfe_tick (void *);
static void bfe_txeof (struct bfe_softc *);
static void bfe_rxeof (struct bfe_softc *);
static void bfe_set_rx_mode (struct bfe_softc *);
static int bfe_list_rx_init (struct bfe_softc *);
static void bfe_list_tx_init (struct bfe_softc *);
static void bfe_discard_buf (struct bfe_softc *, int);
static int bfe_list_newbuf (struct bfe_softc *, int);
static void bfe_rx_ring_free (struct bfe_softc *);
static void bfe_pci_setup (struct bfe_softc *, u_int32_t);
static int bfe_ifmedia_upd (if_t);
static void bfe_ifmedia_sts (if_t, struct ifmediareq *);
static int bfe_miibus_readreg (device_t, int, int);
static int bfe_miibus_writereg (device_t, int, int, int);
static void bfe_miibus_statchg (device_t);
static int bfe_wait_bit (struct bfe_softc *, u_int32_t, u_int32_t,
u_long, const int);
static void bfe_get_config (struct bfe_softc *sc);
static void bfe_read_eeprom (struct bfe_softc *, u_int8_t *);
static void bfe_stats_update (struct bfe_softc *);
static void bfe_clear_stats (struct bfe_softc *);
static int bfe_readphy (struct bfe_softc *, u_int32_t, u_int32_t*);
static int bfe_writephy (struct bfe_softc *, u_int32_t, u_int32_t);
static int bfe_resetphy (struct bfe_softc *);
static int bfe_setupphy (struct bfe_softc *);
static void bfe_chip_reset (struct bfe_softc *);
static void bfe_chip_halt (struct bfe_softc *);
static void bfe_core_reset (struct bfe_softc *);
static void bfe_core_disable (struct bfe_softc *);
static int bfe_dma_alloc (struct bfe_softc *);
static void bfe_dma_free (struct bfe_softc *sc);
static void bfe_dma_map (void *, bus_dma_segment_t *, int, int);
static void bfe_cam_write (struct bfe_softc *, u_char *, int);
static int sysctl_bfe_stats (SYSCTL_HANDLER_ARGS);
static device_method_t bfe_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, bfe_probe),
DEVMETHOD(device_attach, bfe_attach),
DEVMETHOD(device_detach, bfe_detach),
DEVMETHOD(device_shutdown, bfe_shutdown),
DEVMETHOD(device_suspend, bfe_suspend),
DEVMETHOD(device_resume, bfe_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, bfe_miibus_readreg),
DEVMETHOD(miibus_writereg, bfe_miibus_writereg),
DEVMETHOD(miibus_statchg, bfe_miibus_statchg),
DEVMETHOD_END
};
static driver_t bfe_driver = {
"bfe",
bfe_methods,
sizeof(struct bfe_softc)
};
DRIVER_MODULE(bfe, pci, bfe_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, bfe, bfe_devs,
nitems(bfe_devs) - 1);
DRIVER_MODULE(miibus, bfe, miibus_driver, 0, 0);
/*
* Probe for a Broadcom 4401 chip.
*/
static int
bfe_probe(device_t dev)
{
struct bfe_type *t;
t = bfe_devs;
while (t->bfe_name != NULL) {
if (pci_get_vendor(dev) == t->bfe_vid &&
pci_get_device(dev) == t->bfe_did) {
device_set_desc(dev, t->bfe_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
struct bfe_dmamap_arg {
bus_addr_t bfe_busaddr;
};
static int
bfe_dma_alloc(struct bfe_softc *sc)
{
struct bfe_dmamap_arg ctx;
struct bfe_rx_data *rd;
struct bfe_tx_data *td;
int error, i;
/*
* parent tag. Apparently the chip cannot handle any DMA address
* greater than 1GB.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->bfe_dev), /* parent */
1, 0, /* alignment, boundary */
BFE_DMA_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->bfe_parent_tag);
if (error != 0) {
device_printf(sc->bfe_dev, "cannot create parent DMA tag.\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
BFE_TX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BFE_TX_LIST_SIZE, /* maxsize */
1, /* nsegments */
BFE_TX_LIST_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->bfe_tx_tag);
if (error != 0) {
device_printf(sc->bfe_dev, "cannot create Tx ring DMA tag.\n");
goto fail;
}
/* Create tag for Rx ring. */
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
BFE_RX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BFE_RX_LIST_SIZE, /* maxsize */
1, /* nsegments */
BFE_RX_LIST_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->bfe_rx_tag);
if (error != 0) {
device_printf(sc->bfe_dev, "cannot create Rx ring DMA tag.\n");
goto fail;
}
/* Create tag for Tx buffers. */
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES * BFE_MAXTXSEGS, /* maxsize */
BFE_MAXTXSEGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->bfe_txmbuf_tag);
if (error != 0) {
device_printf(sc->bfe_dev,
"cannot create Tx buffer DMA tag.\n");
goto fail;
}
/* Create tag for Rx buffers. */
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->bfe_rxmbuf_tag);
if (error != 0) {
device_printf(sc->bfe_dev,
"cannot create Rx buffer DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load DMA map. */
error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_tx_map);
if (error != 0) {
device_printf(sc->bfe_dev,
"cannot allocate DMA'able memory for Tx ring.\n");
goto fail;
}
ctx.bfe_busaddr = 0;
error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map,
sc->bfe_tx_list, BFE_TX_LIST_SIZE, bfe_dma_map, &ctx,
BUS_DMA_NOWAIT);
if (error != 0 || ctx.bfe_busaddr == 0) {
device_printf(sc->bfe_dev,
"cannot load DMA'able memory for Tx ring.\n");
goto fail;
}
sc->bfe_tx_dma = BFE_ADDR_LO(ctx.bfe_busaddr);
error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_rx_map);
if (error != 0) {
device_printf(sc->bfe_dev,
"cannot allocate DMA'able memory for Rx ring.\n");
goto fail;
}
ctx.bfe_busaddr = 0;
error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map,
sc->bfe_rx_list, BFE_RX_LIST_SIZE, bfe_dma_map, &ctx,
BUS_DMA_NOWAIT);
if (error != 0 || ctx.bfe_busaddr == 0) {
device_printf(sc->bfe_dev,
"cannot load DMA'able memory for Rx ring.\n");
goto fail;
}
sc->bfe_rx_dma = BFE_ADDR_LO(ctx.bfe_busaddr);
/* Create DMA maps for Tx buffers. */
for (i = 0; i < BFE_TX_LIST_CNT; i++) {
td = &sc->bfe_tx_ring[i];
td->bfe_mbuf = NULL;
td->bfe_map = NULL;
error = bus_dmamap_create(sc->bfe_txmbuf_tag, 0, &td->bfe_map);
if (error != 0) {
device_printf(sc->bfe_dev,
"cannot create DMA map for Tx.\n");
goto fail;
}
}
/* Create spare DMA map for Rx buffers. */
error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &sc->bfe_rx_sparemap);
if (error != 0) {
device_printf(sc->bfe_dev, "cannot create spare DMA map for Rx.\n");
goto fail;
}
/* Create DMA maps for Rx buffers. */
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
rd = &sc->bfe_rx_ring[i];
rd->bfe_mbuf = NULL;
rd->bfe_map = NULL;
rd->bfe_ctrl = 0;
error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &rd->bfe_map);
if (error != 0) {
device_printf(sc->bfe_dev,
"cannot create DMA map for Rx.\n");
goto fail;
}
}
fail:
return (error);
}
static void
bfe_dma_free(struct bfe_softc *sc)
{
struct bfe_tx_data *td;
struct bfe_rx_data *rd;
int i;
/* Tx ring. */
if (sc->bfe_tx_tag != NULL) {
if (sc->bfe_tx_dma != 0)
bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map);
if (sc->bfe_tx_list != NULL)
bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list,
sc->bfe_tx_map);
sc->bfe_tx_dma = 0;
sc->bfe_tx_list = NULL;
bus_dma_tag_destroy(sc->bfe_tx_tag);
sc->bfe_tx_tag = NULL;
}
/* Rx ring. */
if (sc->bfe_rx_tag != NULL) {
if (sc->bfe_rx_dma != 0)
bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map);
if (sc->bfe_rx_list != NULL)
bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list,
sc->bfe_rx_map);
sc->bfe_rx_dma = 0;
sc->bfe_rx_list = NULL;
bus_dma_tag_destroy(sc->bfe_rx_tag);
sc->bfe_rx_tag = NULL;
}
/* Tx buffers. */
if (sc->bfe_txmbuf_tag != NULL) {
for (i = 0; i < BFE_TX_LIST_CNT; i++) {
td = &sc->bfe_tx_ring[i];
if (td->bfe_map != NULL) {
bus_dmamap_destroy(sc->bfe_txmbuf_tag,
td->bfe_map);
td->bfe_map = NULL;
}
}
bus_dma_tag_destroy(sc->bfe_txmbuf_tag);
sc->bfe_txmbuf_tag = NULL;
}
/* Rx buffers. */
if (sc->bfe_rxmbuf_tag != NULL) {
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
rd = &sc->bfe_rx_ring[i];
if (rd->bfe_map != NULL) {
bus_dmamap_destroy(sc->bfe_rxmbuf_tag,
rd->bfe_map);
rd->bfe_map = NULL;
}
}
if (sc->bfe_rx_sparemap != NULL) {
bus_dmamap_destroy(sc->bfe_rxmbuf_tag,
sc->bfe_rx_sparemap);
sc->bfe_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc->bfe_rxmbuf_tag);
sc->bfe_rxmbuf_tag = NULL;
}
if (sc->bfe_parent_tag != NULL) {
bus_dma_tag_destroy(sc->bfe_parent_tag);
sc->bfe_parent_tag = NULL;
}
}
static int
bfe_attach(device_t dev)
{
if_t ifp = NULL;
struct bfe_softc *sc;
int error = 0, rid;
sc = device_get_softc(dev);
mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->bfe_stat_co, &sc->bfe_mtx, 0);
sc->bfe_dev = dev;
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
rid = PCIR_BAR(0);
sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->bfe_res == NULL) {
device_printf(dev, "couldn't map memory\n");
error = ENXIO;
goto fail;
}
/* Allocate interrupt */
rid = 0;
sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->bfe_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
if (bfe_dma_alloc(sc) != 0) {
device_printf(dev, "failed to allocate DMA resources\n");
error = ENXIO;
goto fail;
}
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
sysctl_bfe_stats, "I", "Statistics");
/* Set up ifnet structure */
ifp = sc->bfe_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "failed to if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, bfe_ioctl);
if_setstartfn(ifp, bfe_start);
if_setinitfn(ifp, bfe_init);
if_setsendqlen(ifp, BFE_TX_QLEN);
if_setsendqready(ifp);
bfe_get_config(sc);
/* Reset the chip and turn on the PHY */
BFE_LOCK(sc);
bfe_chip_reset(sc);
BFE_UNLOCK(sc);
error = mii_attach(dev, &sc->bfe_miibus, ifp, bfe_ifmedia_upd,
bfe_ifmedia_sts, BMSR_DEFCAPMASK, sc->bfe_phyaddr, MII_OFFSET_ANY,
0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->bfe_enaddr);
/*
* Tell the upper layer(s) we support long frames.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
/*
* Hook interrupt last to avoid having to lock softc
*/
error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, bfe_intr, sc, &sc->bfe_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
goto fail;
}
fail:
if (error != 0)
bfe_detach(dev);
return (error);
}
static int
bfe_detach(device_t dev)
{
struct bfe_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->bfe_ifp;
if (device_is_attached(dev)) {
BFE_LOCK(sc);
sc->bfe_flags |= BFE_FLAG_DETACH;
bfe_stop(sc);
BFE_UNLOCK(sc);
callout_drain(&sc->bfe_stat_co);
if (ifp != NULL)
ether_ifdetach(ifp);
}
BFE_LOCK(sc);
bfe_chip_reset(sc);
BFE_UNLOCK(sc);
bus_generic_detach(dev);
if (sc->bfe_miibus != NULL)
device_delete_child(dev, sc->bfe_miibus);
bfe_release_resources(sc);
bfe_dma_free(sc);
mtx_destroy(&sc->bfe_mtx);
return (0);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
bfe_shutdown(device_t dev)
{
struct bfe_softc *sc;
sc = device_get_softc(dev);
BFE_LOCK(sc);
bfe_stop(sc);
BFE_UNLOCK(sc);
return (0);
}
static int
bfe_suspend(device_t dev)
{
struct bfe_softc *sc;
sc = device_get_softc(dev);
BFE_LOCK(sc);
bfe_stop(sc);
BFE_UNLOCK(sc);
return (0);
}
static int
bfe_resume(device_t dev)
{
struct bfe_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->bfe_ifp;
BFE_LOCK(sc);
bfe_chip_reset(sc);
if (if_getflags(ifp) & IFF_UP) {
bfe_init_locked(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
!if_sendq_empty(ifp))
bfe_start_locked(ifp);
}
BFE_UNLOCK(sc);
return (0);
}
static int
bfe_miibus_readreg(device_t dev, int phy, int reg)
{
struct bfe_softc *sc;
u_int32_t ret;
sc = device_get_softc(dev);
bfe_readphy(sc, reg, &ret);
return (ret);
}
static int
bfe_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct bfe_softc *sc;
sc = device_get_softc(dev);
bfe_writephy(sc, reg, val);
return (0);
}
static void
bfe_miibus_statchg(device_t dev)
{
struct bfe_softc *sc;
struct mii_data *mii;
u_int32_t val;
#ifdef notyet
u_int32_t flow;
#endif
sc = device_get_softc(dev);
mii = device_get_softc(sc->bfe_miibus);
sc->bfe_flags &= ~BFE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->bfe_flags |= BFE_FLAG_LINK;
break;
default:
break;
}
}
/* XXX Should stop Rx/Tx engine prior to touching MAC. */
val = CSR_READ_4(sc, BFE_TX_CTRL);
val &= ~BFE_TX_DUPLEX;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
val |= BFE_TX_DUPLEX;
#ifdef notyet
flow = CSR_READ_4(sc, BFE_RXCONF);
flow &= ~BFE_RXCONF_FLOW;
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
flow |= BFE_RXCONF_FLOW;
CSR_WRITE_4(sc, BFE_RXCONF, flow);
/*
* It seems that the hardware has Tx pause issues
* so enable only Rx pause.
*/
flow = CSR_READ_4(sc, BFE_MAC_FLOW);
flow &= ~BFE_FLOW_PAUSE_ENAB;
CSR_WRITE_4(sc, BFE_MAC_FLOW, flow);
#endif
}
CSR_WRITE_4(sc, BFE_TX_CTRL, val);
}
static void
bfe_tx_ring_free(struct bfe_softc *sc)
{
int i;
for(i = 0; i < BFE_TX_LIST_CNT; i++) {
if (sc->bfe_tx_ring[i].bfe_mbuf != NULL) {
bus_dmamap_sync(sc->bfe_txmbuf_tag,
sc->bfe_tx_ring[i].bfe_map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bfe_txmbuf_tag,
sc->bfe_tx_ring[i].bfe_map);
m_freem(sc->bfe_tx_ring[i].bfe_mbuf);
sc->bfe_tx_ring[i].bfe_mbuf = NULL;
}
}
bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
bfe_rx_ring_free(struct bfe_softc *sc)
{
int i;
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) {
bus_dmamap_sync(sc->bfe_rxmbuf_tag,
sc->bfe_rx_ring[i].bfe_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bfe_rxmbuf_tag,
sc->bfe_rx_ring[i].bfe_map);
m_freem(sc->bfe_rx_ring[i].bfe_mbuf);
sc->bfe_rx_ring[i].bfe_mbuf = NULL;
}
}
bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
bfe_list_rx_init(struct bfe_softc *sc)
{
struct bfe_rx_data *rd;
int i;
sc->bfe_rx_prod = sc->bfe_rx_cons = 0;
bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
rd = &sc->bfe_rx_ring[i];
rd->bfe_mbuf = NULL;
rd->bfe_ctrl = 0;
if (bfe_list_newbuf(sc, i) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc)));
return (0);
}
static void
bfe_list_tx_init(struct bfe_softc *sc)
{
int i;
sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0;
bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
for (i = 0; i < BFE_TX_LIST_CNT; i++)
sc->bfe_tx_ring[i].bfe_mbuf = NULL;
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
bfe_discard_buf(struct bfe_softc *sc, int c)
{
struct bfe_rx_data *r;
struct bfe_desc *d;
r = &sc->bfe_rx_ring[c];
d = &sc->bfe_rx_list[c];
d->bfe_ctrl = htole32(r->bfe_ctrl);
}
static int
bfe_list_newbuf(struct bfe_softc *sc, int c)
{
struct bfe_rxheader *rx_header;
struct bfe_desc *d;
struct bfe_rx_data *r;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
u_int32_t ctrl;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
if (bus_dmamap_load_mbuf_sg(sc->bfe_rxmbuf_tag, sc->bfe_rx_sparemap,
m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
r = &sc->bfe_rx_ring[c];
if (r->bfe_mbuf != NULL) {
bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bfe_rxmbuf_tag, r->bfe_map);
}
map = r->bfe_map;
r->bfe_map = sc->bfe_rx_sparemap;
sc->bfe_rx_sparemap = map;
r->bfe_mbuf = m;
rx_header = mtod(m, struct bfe_rxheader *);
rx_header->len = 0;
rx_header->flags = 0;
bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map, BUS_DMASYNC_PREREAD);
ctrl = segs[0].ds_len & BFE_DESC_LEN;
KASSERT(ctrl > ETHER_MAX_LEN + 32, ("%s: buffer size too small(%d)!",
__func__, ctrl));
if (c == BFE_RX_LIST_CNT - 1)
ctrl |= BFE_DESC_EOT;
r->bfe_ctrl = ctrl;
d = &sc->bfe_rx_list[c];
d->bfe_ctrl = htole32(ctrl);
/* The chip needs all addresses to be added to BFE_PCI_DMA. */
d->bfe_addr = htole32(BFE_ADDR_LO(segs[0].ds_addr) + BFE_PCI_DMA);
return (0);
}
static void
bfe_get_config(struct bfe_softc *sc)
{
u_int8_t eeprom[128];
bfe_read_eeprom(sc, eeprom);
sc->bfe_enaddr[0] = eeprom[79];
sc->bfe_enaddr[1] = eeprom[78];
sc->bfe_enaddr[2] = eeprom[81];
sc->bfe_enaddr[3] = eeprom[80];
sc->bfe_enaddr[4] = eeprom[83];
sc->bfe_enaddr[5] = eeprom[82];
sc->bfe_phyaddr = eeprom[90] & 0x1f;
sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1;
sc->bfe_core_unit = 0;
sc->bfe_dma_offset = BFE_PCI_DMA;
}
static void
bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores)
{
u_int32_t bar_orig, val;
bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4);
pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4);
val = CSR_READ_4(sc, BFE_SBINTVEC);
val |= cores;
CSR_WRITE_4(sc, BFE_SBINTVEC, val);
val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2);
val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val);
pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4);
}
static void
bfe_clear_stats(struct bfe_softc *sc)
{
uint32_t reg;
BFE_LOCK_ASSERT(sc);
CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
CSR_READ_4(sc, reg);
for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
CSR_READ_4(sc, reg);
}
static int
bfe_resetphy(struct bfe_softc *sc)
{
u_int32_t val;
bfe_writephy(sc, 0, BMCR_RESET);
DELAY(100);
bfe_readphy(sc, 0, &val);
if (val & BMCR_RESET) {
device_printf(sc->bfe_dev, "PHY Reset would not complete.\n");
return (ENXIO);
}
return (0);
}
static void
bfe_chip_halt(struct bfe_softc *sc)
{
BFE_LOCK_ASSERT(sc);
/* disable interrupts - not that it actually does..*/
CSR_WRITE_4(sc, BFE_IMASK, 0);
CSR_READ_4(sc, BFE_IMASK);
CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1);
CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
DELAY(10);
}
static void
bfe_chip_reset(struct bfe_softc *sc)
{
u_int32_t val;
BFE_LOCK_ASSERT(sc);
/* Set the interrupt vector for the enet core */
bfe_pci_setup(sc, BFE_INTVEC_ENET0);
/* is core up? */
val = CSR_READ_4(sc, BFE_SBTMSLOW) &
(BFE_RESET | BFE_REJECT | BFE_CLOCK);
if (val == BFE_CLOCK) {
/* It is, so shut it down */
CSR_WRITE_4(sc, BFE_RCV_LAZY, 0);
CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1);
CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK)
bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE,
100, 0);
CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
}
bfe_core_reset(sc);
bfe_clear_stats(sc);
/*
* We want the phy registers to be accessible even when
* the driver is "downed" so initialize MDC preamble, frequency,
* and whether internal or external phy here.
*/
/* 4402 has 62.5Mhz SB clock and internal phy */
CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d);
/* Internal or external PHY? */
val = CSR_READ_4(sc, BFE_DEVCTRL);
if (!(val & BFE_IPP))
CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL);
else if (CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) {
BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR);
DELAY(100);
}
/* Enable CRC32 generation and set proper LED modes */
BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
/* Reset or clear powerdown control bit */
BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
BFE_LAZY_FC_MASK));
/*
* We don't want lazy interrupts, so just send them at
* the end of a frame, please
*/
BFE_OR(sc, BFE_RCV_LAZY, 0);
/* Set max lengths, accounting for VLAN tags */
CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32);
CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32);
/* Set watermark XXX - magic */
CSR_WRITE_4(sc, BFE_TX_WMARK, 56);
/*
* Initialise DMA channels
* - not forgetting dma addresses need to be added to BFE_PCI_DMA
*/
CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA);
CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) |
BFE_RX_CTRL_ENABLE);
CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA);
bfe_resetphy(sc);
bfe_setupphy(sc);
}
static void
bfe_core_disable(struct bfe_softc *sc)
{
if ((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET)
return;
/*
* Set reject, wait for it set, then wait for the core to stop
* being busy, then set reset and reject and enable the clocks.
*/
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0);
bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1);
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT |
BFE_RESET));
CSR_READ_4(sc, BFE_SBTMSLOW);
DELAY(10);
/* Leave reset and reject set */
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
DELAY(10);
}
static void
bfe_core_reset(struct bfe_softc *sc)
{
u_int32_t val;
/* Disable the core */
bfe_core_disable(sc);
/* and bring it back up */
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
CSR_READ_4(sc, BFE_SBTMSLOW);
DELAY(10);
/* Chip bug, clear SERR, IB and TO if they are set. */
if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR)
CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0);
val = CSR_READ_4(sc, BFE_SBIMSTATE);
if (val & (BFE_IBE | BFE_TO))
CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
/* Clear reset and allow it to move through the core */
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
CSR_READ_4(sc, BFE_SBTMSLOW);
DELAY(10);
/* Leave the clock set */
CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK);
CSR_READ_4(sc, BFE_SBTMSLOW);
DELAY(10);
}
static void
bfe_cam_write(struct bfe_softc *sc, u_char *data, int index)
{
u_int32_t val;
val = ((u_int32_t) data[2]) << 24;
val |= ((u_int32_t) data[3]) << 16;
val |= ((u_int32_t) data[4]) << 8;
val |= ((u_int32_t) data[5]);
CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val);
val = (BFE_CAM_HI_VALID |
(((u_int32_t) data[0]) << 8) |
(((u_int32_t) data[1])));
CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val);
CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE |
((u_int32_t) index << BFE_CAM_INDEX_SHIFT)));
bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1);
}
static u_int
bfe_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct bfe_softc *sc = arg;
bfe_cam_write(sc, LLADDR(sdl), cnt + 1);
return (1);
}
static void
bfe_set_rx_mode(struct bfe_softc *sc)
{
if_t ifp = sc->bfe_ifp;
u_int32_t val;
BFE_LOCK_ASSERT(sc);
val = CSR_READ_4(sc, BFE_RXCONF);
if (if_getflags(ifp) & IFF_PROMISC)
val |= BFE_RXCONF_PROMISC;
else
val &= ~BFE_RXCONF_PROMISC;
if (if_getflags(ifp) & IFF_BROADCAST)
val &= ~BFE_RXCONF_DBCAST;
else
val |= BFE_RXCONF_DBCAST;
CSR_WRITE_4(sc, BFE_CAM_CTRL, 0);
bfe_cam_write(sc, if_getlladdr(sc->bfe_ifp), 0);
if (if_getflags(ifp) & IFF_ALLMULTI)
val |= BFE_RXCONF_ALLMULTI;
else {
val &= ~BFE_RXCONF_ALLMULTI;
if_foreach_llmaddr(ifp, bfe_write_maddr, sc);
}
CSR_WRITE_4(sc, BFE_RXCONF, val);
BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE);
}
static void
bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct bfe_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nseg == 1, ("%s : %d segments returned!", __func__, nseg));
ctx = (struct bfe_dmamap_arg *)arg;
ctx->bfe_busaddr = segs[0].ds_addr;
}
static void
bfe_release_resources(struct bfe_softc *sc)
{
if (sc->bfe_intrhand != NULL)
bus_teardown_intr(sc->bfe_dev, sc->bfe_irq, sc->bfe_intrhand);
if (sc->bfe_irq != NULL)
bus_release_resource(sc->bfe_dev, SYS_RES_IRQ, 0, sc->bfe_irq);
if (sc->bfe_res != NULL)
bus_release_resource(sc->bfe_dev, SYS_RES_MEMORY, PCIR_BAR(0),
sc->bfe_res);
if (sc->bfe_ifp != NULL)
if_free(sc->bfe_ifp);
}
static void
bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data)
{
long i;
u_int16_t *ptr = (u_int16_t *)data;
for(i = 0; i < 128; i += 2)
ptr[i/2] = CSR_READ_4(sc, 4096 + i);
}
static int
bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit,
u_long timeout, const int clear)
{
u_long i;
for (i = 0; i < timeout; i++) {
u_int32_t val = CSR_READ_4(sc, reg);
if (clear && !(val & bit))
break;
if (!clear && (val & bit))
break;
DELAY(10);
}
if (i == timeout) {
device_printf(sc->bfe_dev,
"BUG! Timeout waiting for bit %08x of register "
"%x to %s.\n", bit, reg, (clear ? "clear" : "set"));
return (-1);
}
return (0);
}
static int
bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val)
{
int err;
/* Clear MII ISR */
CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
(BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
(sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
(reg << BFE_MDIO_RA_SHIFT) |
(BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
*val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA;
return (err);
}
static int
bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val)
{
int status;
CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
(BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
(sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
(reg << BFE_MDIO_RA_SHIFT) |
(BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
(val & BFE_MDIO_DATA_DATA)));
status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
return (status);
}
/*
* XXX - I think this is handled by the PHY driver, but it can't hurt to do it
* twice
*/
static int
bfe_setupphy(struct bfe_softc *sc)
{
u_int32_t val;
/* Enable activity LED */
bfe_readphy(sc, 26, &val);
bfe_writephy(sc, 26, val & 0x7fff);
bfe_readphy(sc, 26, &val);
/* Enable traffic meter LED mode */
bfe_readphy(sc, 27, &val);
bfe_writephy(sc, 27, val | (1 << 6));
return (0);
}
static void
bfe_stats_update(struct bfe_softc *sc)
{
struct bfe_hw_stats *stats;
if_t ifp;
uint32_t mib[BFE_MIB_CNT];
uint32_t reg, *val;
BFE_LOCK_ASSERT(sc);
val = mib;
CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
*val++ = CSR_READ_4(sc, reg);
for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
*val++ = CSR_READ_4(sc, reg);
ifp = sc->bfe_ifp;
stats = &sc->bfe_stats;
/* Tx stat. */
stats->tx_good_octets += mib[MIB_TX_GOOD_O];
stats->tx_good_frames += mib[MIB_TX_GOOD_P];
stats->tx_octets += mib[MIB_TX_O];
stats->tx_frames += mib[MIB_TX_P];
stats->tx_bcast_frames += mib[MIB_TX_BCAST];
stats->tx_mcast_frames += mib[MIB_TX_MCAST];
stats->tx_pkts_64 += mib[MIB_TX_64];
stats->tx_pkts_65_127 += mib[MIB_TX_65_127];
stats->tx_pkts_128_255 += mib[MIB_TX_128_255];
stats->tx_pkts_256_511 += mib[MIB_TX_256_511];
stats->tx_pkts_512_1023 += mib[MIB_TX_512_1023];
stats->tx_pkts_1024_max += mib[MIB_TX_1024_MAX];
stats->tx_jabbers += mib[MIB_TX_JABBER];
stats->tx_oversize_frames += mib[MIB_TX_OSIZE];
stats->tx_frag_frames += mib[MIB_TX_FRAG];
stats->tx_underruns += mib[MIB_TX_URUNS];
stats->tx_colls += mib[MIB_TX_TCOLS];
stats->tx_single_colls += mib[MIB_TX_SCOLS];
stats->tx_multi_colls += mib[MIB_TX_MCOLS];
stats->tx_excess_colls += mib[MIB_TX_ECOLS];
stats->tx_late_colls += mib[MIB_TX_LCOLS];
stats->tx_deferrals += mib[MIB_TX_DEFERED];
stats->tx_carrier_losts += mib[MIB_TX_CLOST];
stats->tx_pause_frames += mib[MIB_TX_PAUSE];
/* Rx stat. */
stats->rx_good_octets += mib[MIB_RX_GOOD_O];
stats->rx_good_frames += mib[MIB_RX_GOOD_P];
stats->rx_octets += mib[MIB_RX_O];
stats->rx_frames += mib[MIB_RX_P];
stats->rx_bcast_frames += mib[MIB_RX_BCAST];
stats->rx_mcast_frames += mib[MIB_RX_MCAST];
stats->rx_pkts_64 += mib[MIB_RX_64];
stats->rx_pkts_65_127 += mib[MIB_RX_65_127];
stats->rx_pkts_128_255 += mib[MIB_RX_128_255];
stats->rx_pkts_256_511 += mib[MIB_RX_256_511];
stats->rx_pkts_512_1023 += mib[MIB_RX_512_1023];
stats->rx_pkts_1024_max += mib[MIB_RX_1024_MAX];
stats->rx_jabbers += mib[MIB_RX_JABBER];
stats->rx_oversize_frames += mib[MIB_RX_OSIZE];
stats->rx_frag_frames += mib[MIB_RX_FRAG];
stats->rx_missed_frames += mib[MIB_RX_MISS];
stats->rx_crc_align_errs += mib[MIB_RX_CRCA];
stats->rx_runts += mib[MIB_RX_USIZE];
stats->rx_crc_errs += mib[MIB_RX_CRC];
stats->rx_align_errs += mib[MIB_RX_ALIGN];
stats->rx_symbol_errs += mib[MIB_RX_SYM];
stats->rx_pause_frames += mib[MIB_RX_PAUSE];
stats->rx_control_frames += mib[MIB_RX_NPAUSE];
/* Update counters in ifnet. */
if_inc_counter(ifp, IFCOUNTER_OPACKETS, (u_long)mib[MIB_TX_GOOD_P]);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (u_long)mib[MIB_TX_TCOLS]);
if_inc_counter(ifp, IFCOUNTER_OERRORS, (u_long)mib[MIB_TX_URUNS] +
(u_long)mib[MIB_TX_ECOLS] +
(u_long)mib[MIB_TX_DEFERED] +
(u_long)mib[MIB_TX_CLOST]);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, (u_long)mib[MIB_RX_GOOD_P]);
if_inc_counter(ifp, IFCOUNTER_IERRORS, mib[MIB_RX_JABBER] +
mib[MIB_RX_MISS] +
mib[MIB_RX_CRCA] +
mib[MIB_RX_USIZE] +
mib[MIB_RX_CRC] +
mib[MIB_RX_ALIGN] +
mib[MIB_RX_SYM]);
}
static void
bfe_txeof(struct bfe_softc *sc)
{
struct bfe_tx_data *r;
if_t ifp;
int i, chipidx;
BFE_LOCK_ASSERT(sc);
ifp = sc->bfe_ifp;
chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
chipidx /= sizeof(struct bfe_desc);
i = sc->bfe_tx_cons;
if (i == chipidx)
return;
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Go through the mbufs and free those that have been transmitted */
for (; i != chipidx; BFE_INC(i, BFE_TX_LIST_CNT)) {
r = &sc->bfe_tx_ring[i];
sc->bfe_tx_cnt--;
if (r->bfe_mbuf == NULL)
continue;
bus_dmamap_sync(sc->bfe_txmbuf_tag, r->bfe_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map);
m_freem(r->bfe_mbuf);
r->bfe_mbuf = NULL;
}
if (i != sc->bfe_tx_cons) {
/* we freed up some mbufs */
sc->bfe_tx_cons = i;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
if (sc->bfe_tx_cnt == 0)
sc->bfe_watchdog_timer = 0;
}
/* Pass a received packet up the stack */
static void
bfe_rxeof(struct bfe_softc *sc)
{
struct mbuf *m;
if_t ifp;
struct bfe_rxheader *rxheader;
struct bfe_rx_data *r;
int cons, prog;
u_int32_t status, current, len, flags;
BFE_LOCK_ASSERT(sc);
cons = sc->bfe_rx_cons;
status = CSR_READ_4(sc, BFE_DMARX_STAT);
current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc);
ifp = sc->bfe_ifp;
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (prog = 0; current != cons; prog++,
BFE_INC(cons, BFE_RX_LIST_CNT)) {
r = &sc->bfe_rx_ring[cons];
m = r->bfe_mbuf;
/*
* Rx status should be read from mbuf such that we can't
* delay bus_dmamap_sync(9). This hardware limiation
* results in inefficient mbuf usage as bfe(4) couldn't
* reuse mapped buffer from errored frame.
*/
if (bfe_list_newbuf(sc, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
bfe_discard_buf(sc, cons);
continue;
}
rxheader = mtod(m, struct bfe_rxheader*);
len = le16toh(rxheader->len);
flags = le16toh(rxheader->flags);
/* Remove CRC bytes. */
len -= ETHER_CRC_LEN;
/* flag an error and try again */
if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) {
m_freem(m);
continue;
}
/* Make sure to skip header bytes written by hardware. */
m_adj(m, BFE_RX_OFFSET);
m->m_len = m->m_pkthdr.len = len;
m->m_pkthdr.rcvif = ifp;
BFE_UNLOCK(sc);
if_input(ifp, m);
BFE_LOCK(sc);
}
if (prog > 0) {
sc->bfe_rx_cons = cons;
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
}
static void
bfe_intr(void *xsc)
{
struct bfe_softc *sc = xsc;
if_t ifp;
u_int32_t istat;
ifp = sc->bfe_ifp;
BFE_LOCK(sc);
istat = CSR_READ_4(sc, BFE_ISTAT);
/*
* Defer unsolicited interrupts - This is necessary because setting the
* chips interrupt mask register to 0 doesn't actually stop the
* interrupts
*/
istat &= BFE_IMASK_DEF;
CSR_WRITE_4(sc, BFE_ISTAT, istat);
CSR_READ_4(sc, BFE_ISTAT);
/* not expecting this interrupt, disregard it */
if (istat == 0 || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
BFE_UNLOCK(sc);
return;
}
/* A packet was received */
if (istat & BFE_ISTAT_RX)
bfe_rxeof(sc);
/* A packet was sent */
if (istat & BFE_ISTAT_TX)
bfe_txeof(sc);
if (istat & BFE_ISTAT_ERRORS) {
if (istat & BFE_ISTAT_DSCE) {
device_printf(sc->bfe_dev, "Descriptor Error\n");
bfe_stop(sc);
BFE_UNLOCK(sc);
return;
}
if (istat & BFE_ISTAT_DPE) {
device_printf(sc->bfe_dev,
"Descriptor Protocol Error\n");
bfe_stop(sc);
BFE_UNLOCK(sc);
return;
}
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bfe_init_locked(sc);
}
/* We have packets pending, fire them out */
if (!if_sendq_empty(ifp))
bfe_start_locked(ifp);
BFE_UNLOCK(sc);
}
static int
bfe_encap(struct bfe_softc *sc, struct mbuf **m_head)
{
struct bfe_desc *d;
struct bfe_tx_data *r, *r1;
struct mbuf *m;
bus_dmamap_t map;
bus_dma_segment_t txsegs[BFE_MAXTXSEGS];
uint32_t cur, si;
int error, i, nsegs;
BFE_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
si = cur = sc->bfe_tx_prod;
r = &sc->bfe_tx_ring[cur];
error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, *m_head,
txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, BFE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map,
*m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
if (sc->bfe_tx_cnt + nsegs > BFE_TX_LIST_CNT - 1) {
bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map);
return (ENOBUFS);
}
for (i = 0; i < nsegs; i++) {
d = &sc->bfe_tx_list[cur];
d->bfe_ctrl = htole32(txsegs[i].ds_len & BFE_DESC_LEN);
d->bfe_ctrl |= htole32(BFE_DESC_IOC);
if (cur == BFE_TX_LIST_CNT - 1)
/*
* Tell the chip to wrap to the start of
* the descriptor list.
*/
d->bfe_ctrl |= htole32(BFE_DESC_EOT);
/* The chip needs all addresses to be added to BFE_PCI_DMA. */
d->bfe_addr = htole32(BFE_ADDR_LO(txsegs[i].ds_addr) +
BFE_PCI_DMA);
BFE_INC(cur, BFE_TX_LIST_CNT);
}
/* Update producer index. */
sc->bfe_tx_prod = cur;
/* Set EOF on the last descriptor. */
cur = (cur + BFE_TX_LIST_CNT - 1) % BFE_TX_LIST_CNT;
d = &sc->bfe_tx_list[cur];
d->bfe_ctrl |= htole32(BFE_DESC_EOF);
/* Lastly set SOF on the first descriptor to avoid races. */
d = &sc->bfe_tx_list[si];
d->bfe_ctrl |= htole32(BFE_DESC_SOF);
r1 = &sc->bfe_tx_ring[cur];
map = r->bfe_map;
r->bfe_map = r1->bfe_map;
r1->bfe_map = map;
r1->bfe_mbuf = *m_head;
sc->bfe_tx_cnt += nsegs;
bus_dmamap_sync(sc->bfe_txmbuf_tag, map, BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* Set up to transmit a packet.
*/
static void
bfe_start(if_t ifp)
{
BFE_LOCK((struct bfe_softc *)if_getsoftc(ifp));
bfe_start_locked(ifp);
BFE_UNLOCK((struct bfe_softc *)if_getsoftc(ifp));
}
/*
* Set up to transmit a packet. The softc is already locked.
*/
static void
bfe_start_locked(if_t ifp)
{
struct bfe_softc *sc;
struct mbuf *m_head;
int queued;
sc = if_getsoftc(ifp);
BFE_LOCK_ASSERT(sc);
/*
* Not much point trying to send if the link is down
* or we have nothing to send.
*/
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->bfe_flags & BFE_FLAG_LINK) == 0)
return;
for (queued = 0; !if_sendq_empty(ifp) &&
sc->bfe_tx_cnt < BFE_TX_LIST_CNT - 1;) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the tx ring. If we dont have
* enough room, let the chip drain the ring.
*/
if (bfe_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
queued++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, m_head);
}
if (queued) {
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Transmit - twice due to apparent hardware bug */
CSR_WRITE_4(sc, BFE_DMATX_PTR,
sc->bfe_tx_prod * sizeof(struct bfe_desc));
/*
* XXX It seems the following write is not necessary
* to kick Tx command. What might be required would be
* a way flushing PCI posted write. Reading the register
* back ensures the flush operation. In addition,
* hardware will execute PCI posted write in the long
* run and watchdog timer for the kick command was set
* to 5 seconds. Therefore I think the second write
* access is not necessary or could be replaced with
* read operation.
*/
CSR_WRITE_4(sc, BFE_DMATX_PTR,
sc->bfe_tx_prod * sizeof(struct bfe_desc));
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->bfe_watchdog_timer = 5;
}
}
static void
bfe_init(void *xsc)
{
BFE_LOCK((struct bfe_softc *)xsc);
bfe_init_locked(xsc);
BFE_UNLOCK((struct bfe_softc *)xsc);
}
static void
bfe_init_locked(void *xsc)
{
struct bfe_softc *sc = (struct bfe_softc*)xsc;
if_t ifp = sc->bfe_ifp;
struct mii_data *mii;
BFE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->bfe_miibus);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
bfe_stop(sc);
bfe_chip_reset(sc);
if (bfe_list_rx_init(sc) == ENOBUFS) {
device_printf(sc->bfe_dev,
"%s: Not enough memory for list buffers\n", __func__);
bfe_stop(sc);
return;
}
bfe_list_tx_init(sc);
bfe_set_rx_mode(sc);
/* Enable the chip and core */
BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE);
/* Enable interrupts */
CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF);
/* Clear link state and change media. */
sc->bfe_flags &= ~BFE_FLAG_LINK;
mii_mediachg(mii);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc);
}
/*
* Set media options.
*/
static int
bfe_ifmedia_upd(if_t ifp)
{
struct bfe_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
BFE_LOCK(sc);
mii = device_get_softc(sc->bfe_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
BFE_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
bfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct bfe_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
BFE_LOCK(sc);
mii = device_get_softc(sc->bfe_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
BFE_UNLOCK(sc);
}
static int
bfe_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct bfe_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int error = 0;
switch (command) {
case SIOCSIFFLAGS:
BFE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
bfe_set_rx_mode(sc);
else if ((sc->bfe_flags & BFE_FLAG_DETACH) == 0)
bfe_init_locked(sc);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
bfe_stop(sc);
BFE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
BFE_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
bfe_set_rx_mode(sc);
BFE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->bfe_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
bfe_watchdog(struct bfe_softc *sc)
{
if_t ifp;
BFE_LOCK_ASSERT(sc);
if (sc->bfe_watchdog_timer == 0 || --sc->bfe_watchdog_timer)
return;
ifp = sc->bfe_ifp;
device_printf(sc->bfe_dev, "watchdog timeout -- resetting\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bfe_init_locked(sc);
if (!if_sendq_empty(ifp))
bfe_start_locked(ifp);
}
static void
bfe_tick(void *xsc)
{
struct bfe_softc *sc = xsc;
struct mii_data *mii;
BFE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->bfe_miibus);
mii_tick(mii);
bfe_stats_update(sc);
bfe_watchdog(sc);
callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
bfe_stop(struct bfe_softc *sc)
{
if_t ifp;
BFE_LOCK_ASSERT(sc);
ifp = sc->bfe_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->bfe_flags &= ~BFE_FLAG_LINK;
callout_stop(&sc->bfe_stat_co);
sc->bfe_watchdog_timer = 0;
bfe_chip_halt(sc);
bfe_tx_ring_free(sc);
bfe_rx_ring_free(sc);
}
static int
sysctl_bfe_stats(SYSCTL_HANDLER_ARGS)
{
struct bfe_softc *sc;
struct bfe_hw_stats *stats;
int error, result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (result != 1)
return (error);
sc = (struct bfe_softc *)arg1;
stats = &sc->bfe_stats;
printf("%s statistics:\n", device_get_nameunit(sc->bfe_dev));
printf("Transmit good octets : %ju\n",
(uintmax_t)stats->tx_good_octets);
printf("Transmit good frames : %ju\n",
(uintmax_t)stats->tx_good_frames);
printf("Transmit octets : %ju\n",
(uintmax_t)stats->tx_octets);
printf("Transmit frames : %ju\n",
(uintmax_t)stats->tx_frames);
printf("Transmit broadcast frames : %ju\n",
(uintmax_t)stats->tx_bcast_frames);
printf("Transmit multicast frames : %ju\n",
(uintmax_t)stats->tx_mcast_frames);
printf("Transmit frames 64 bytes : %ju\n",
(uint64_t)stats->tx_pkts_64);
printf("Transmit frames 65 to 127 bytes : %ju\n",
(uint64_t)stats->tx_pkts_65_127);
printf("Transmit frames 128 to 255 bytes : %ju\n",
(uint64_t)stats->tx_pkts_128_255);
printf("Transmit frames 256 to 511 bytes : %ju\n",
(uint64_t)stats->tx_pkts_256_511);
printf("Transmit frames 512 to 1023 bytes : %ju\n",
(uint64_t)stats->tx_pkts_512_1023);
printf("Transmit frames 1024 to max bytes : %ju\n",
(uint64_t)stats->tx_pkts_1024_max);
printf("Transmit jabber errors : %u\n", stats->tx_jabbers);
printf("Transmit oversized frames : %ju\n",
(uint64_t)stats->tx_oversize_frames);
printf("Transmit fragmented frames : %ju\n",
(uint64_t)stats->tx_frag_frames);
printf("Transmit underruns : %u\n", stats->tx_colls);
printf("Transmit total collisions : %u\n", stats->tx_single_colls);
printf("Transmit single collisions : %u\n", stats->tx_single_colls);
printf("Transmit multiple collisions : %u\n", stats->tx_multi_colls);
printf("Transmit excess collisions : %u\n", stats->tx_excess_colls);
printf("Transmit late collisions : %u\n", stats->tx_late_colls);
printf("Transmit deferrals : %u\n", stats->tx_deferrals);
printf("Transmit carrier losts : %u\n", stats->tx_carrier_losts);
printf("Transmit pause frames : %u\n", stats->tx_pause_frames);
printf("Receive good octets : %ju\n",
(uintmax_t)stats->rx_good_octets);
printf("Receive good frames : %ju\n",
(uintmax_t)stats->rx_good_frames);
printf("Receive octets : %ju\n",
(uintmax_t)stats->rx_octets);
printf("Receive frames : %ju\n",
(uintmax_t)stats->rx_frames);
printf("Receive broadcast frames : %ju\n",
(uintmax_t)stats->rx_bcast_frames);
printf("Receive multicast frames : %ju\n",
(uintmax_t)stats->rx_mcast_frames);
printf("Receive frames 64 bytes : %ju\n",
(uint64_t)stats->rx_pkts_64);
printf("Receive frames 65 to 127 bytes : %ju\n",
(uint64_t)stats->rx_pkts_65_127);
printf("Receive frames 128 to 255 bytes : %ju\n",
(uint64_t)stats->rx_pkts_128_255);
printf("Receive frames 256 to 511 bytes : %ju\n",
(uint64_t)stats->rx_pkts_256_511);
printf("Receive frames 512 to 1023 bytes : %ju\n",
(uint64_t)stats->rx_pkts_512_1023);
printf("Receive frames 1024 to max bytes : %ju\n",
(uint64_t)stats->rx_pkts_1024_max);
printf("Receive jabber errors : %u\n", stats->rx_jabbers);
printf("Receive oversized frames : %ju\n",
(uint64_t)stats->rx_oversize_frames);
printf("Receive fragmented frames : %ju\n",
(uint64_t)stats->rx_frag_frames);
printf("Receive missed frames : %u\n", stats->rx_missed_frames);
printf("Receive CRC align errors : %u\n", stats->rx_crc_align_errs);
printf("Receive undersized frames : %u\n", stats->rx_runts);
printf("Receive CRC errors : %u\n", stats->rx_crc_errs);
printf("Receive align errors : %u\n", stats->rx_align_errs);
printf("Receive symbol errors : %u\n", stats->rx_symbol_errs);
printf("Receive pause frames : %u\n", stats->rx_pause_frames);
printf("Receive control frames : %u\n", stats->rx_control_frames);
return (error);
}
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c
index 2662568c850a..23259179cc62 100644
--- a/sys/dev/bge/if_bge.c
+++ b/sys/dev/bge/if_bge.c
@@ -1,6850 +1,6845 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2001 Wind River Systems
* Copyright (c) 1997, 1998, 1999, 2001
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver
*
* The Broadcom BCM5700 is based on technology originally developed by
* Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet
* MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
* two on-board MIPS R4000 CPUs and can have as much as 16MB of external
* SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
* frames, highly configurable RX filtering, and 16 RX and TX queues
* (which, along with RX filter rules, can be used for QOS applications).
* Other features, such as TCP segmentation, may be available as part
* of value-added firmware updates. Unlike the Tigon I and Tigon II,
* firmware images can be stored in hardware and need not be compiled
* into the driver.
*
* The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
* function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
*
* The BCM5701 is a single-chip solution incorporating both the BCM5700
* MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
* does not support external SSRAM.
*
* Broadcom also produces a variation of the BCM5700 under the "Altima"
* brand name, which is functionally similar but lacks PCI-X support.
*
* Without external SSRAM, you can only have at most 4 TX rings,
* and the use of the mini RX ring is disabled. This seems to imply
* that these features are simply not available on the BCM5701. As a
* result, this driver does not implement any support for the mini RX
* ring.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/debugnet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include "miidevs.h"
#include <dev/mii/brgphyreg.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/bge/if_bgereg.h>
#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
MODULE_DEPEND(bge, pci, 1, 1, 1);
MODULE_DEPEND(bge, ether, 1, 1, 1);
MODULE_DEPEND(bge, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/*
* Various supported device vendors/types and their names. Note: the
* spec seems to indicate that the hardware still has Alteon's vendor
* ID burned into it, though it will always be overridden by the vendor
* ID in the EEPROM. Just to be safe, we cover all possibilities.
*/
static const struct bge_type {
uint16_t bge_vid;
uint16_t bge_did;
} bge_devs[] = {
{ ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
{ ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
{ APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5717C },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5725 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5727 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5762 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57762 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57764 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57766 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57767 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57782 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57786 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57787 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
{ SK_VENDORID, SK_DEVICEID_ALTIMA },
{ TC_VENDORID, TC_DEVICEID_3C996 },
{ FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
{ FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
{ 0, 0 }
};
static const struct bge_vendor {
uint16_t v_id;
const char *v_name;
} bge_vendors[] = {
{ ALTEON_VENDORID, "Alteon" },
{ ALTIMA_VENDORID, "Altima" },
{ APPLE_VENDORID, "Apple" },
{ BCOM_VENDORID, "Broadcom" },
{ SK_VENDORID, "SysKonnect" },
{ TC_VENDORID, "3Com" },
{ FJTSU_VENDORID, "Fujitsu" },
{ 0, NULL }
};
static const struct bge_revision {
uint32_t br_chipid;
const char *br_name;
} bge_revisions[] = {
{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
{ BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
{ BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
{ BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
{ BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
{ BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
{ BGE_CHIPID_BCM5717_C0, "BCM5717 C0" },
{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
{ BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
{ BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
/* 5754 and 5787 share the same ASIC ID */
{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
{ BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
{ BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
{ 0, NULL }
};
/*
* Some defaults for major revisions, so that newer steppings
* that we don't know about have a shot at working.
*/
static const struct bge_revision bge_majorrevs[] = {
{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
/* 5754 and 5787 share the same ASIC ID */
{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
{ BGE_ASICREV_BCM57766, "unknown BCM57766" },
{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
{ BGE_ASICREV_BCM5720, "unknown BCM5720" },
{ BGE_ASICREV_BCM5762, "unknown BCM5762" },
{ 0, NULL }
};
#define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
#define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
#define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
#define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS)
static uint32_t bge_chipid(device_t);
static const struct bge_vendor * bge_lookup_vendor(uint16_t);
static const struct bge_revision * bge_lookup_rev(uint32_t);
typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
static int bge_probe(device_t);
static int bge_attach(device_t);
static int bge_detach(device_t);
static int bge_suspend(device_t);
static int bge_resume(device_t);
static void bge_release_resources(struct bge_softc *);
static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int bge_dma_alloc(struct bge_softc *);
static void bge_dma_free(struct bge_softc *);
static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
static void bge_devinfo(struct bge_softc *);
static int bge_mbox_reorder(struct bge_softc *);
static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
static void bge_txeof(struct bge_softc *, uint16_t);
static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
static int bge_rxeof(struct bge_softc *, uint16_t, int);
static void bge_asf_driver_up (struct bge_softc *);
static void bge_tick(void *);
static void bge_stats_clear_regs(struct bge_softc *);
static void bge_stats_update(struct bge_softc *);
static void bge_stats_update_regs(struct bge_softc *);
static struct mbuf *bge_check_short_dma(struct mbuf *);
static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
uint16_t *, uint16_t *);
static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
static void bge_intr(void *);
static int bge_msi_intr(void *);
static void bge_intr_task(void *, int);
static void bge_start(if_t);
static void bge_start_locked(if_t);
static void bge_start_tx(struct bge_softc *, uint32_t);
static int bge_ioctl(if_t, u_long, caddr_t);
static void bge_init_locked(struct bge_softc *);
static void bge_init(void *);
static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
static void bge_stop(struct bge_softc *);
static void bge_watchdog(struct bge_softc *);
static int bge_shutdown(device_t);
static int bge_ifmedia_upd_locked(if_t);
static int bge_ifmedia_upd(if_t);
static void bge_ifmedia_sts(if_t, struct ifmediareq *);
static uint64_t bge_get_counter(if_t, ift_counter);
static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
static void bge_setpromisc(struct bge_softc *);
static void bge_setmulti(struct bge_softc *);
static void bge_setvlan(struct bge_softc *);
static __inline void bge_rxreuse_std(struct bge_softc *, int);
static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
static int bge_newbuf_std(struct bge_softc *, int);
static int bge_newbuf_jumbo(struct bge_softc *, int);
static int bge_init_rx_ring_std(struct bge_softc *);
static void bge_free_rx_ring_std(struct bge_softc *);
static int bge_init_rx_ring_jumbo(struct bge_softc *);
static void bge_free_rx_ring_jumbo(struct bge_softc *);
static void bge_free_tx_ring(struct bge_softc *);
static int bge_init_tx_ring(struct bge_softc *);
static int bge_chipinit(struct bge_softc *);
static int bge_blockinit(struct bge_softc *);
static uint32_t bge_dma_swap_options(struct bge_softc *);
static int bge_has_eaddr(struct bge_softc *);
static uint32_t bge_readmem_ind(struct bge_softc *, int);
static void bge_writemem_ind(struct bge_softc *, int, int);
static void bge_writembx(struct bge_softc *, int, int);
#ifdef notdef
static uint32_t bge_readreg_ind(struct bge_softc *, int);
#endif
static void bge_writemem_direct(struct bge_softc *, int, int);
static void bge_writereg_ind(struct bge_softc *, int, int);
static int bge_miibus_readreg(device_t, int, int);
static int bge_miibus_writereg(device_t, int, int, int);
static void bge_miibus_statchg(device_t);
#ifdef DEVICE_POLLING
static int bge_poll(if_t ifp, enum poll_cmd cmd, int count);
#endif
#define BGE_RESET_SHUTDOWN 0
#define BGE_RESET_START 1
#define BGE_RESET_SUSPEND 2
static void bge_sig_post_reset(struct bge_softc *, int);
static void bge_sig_legacy(struct bge_softc *, int);
static void bge_sig_pre_reset(struct bge_softc *, int);
static void bge_stop_fw(struct bge_softc *);
static int bge_reset(struct bge_softc *);
static void bge_link_upd(struct bge_softc *);
static void bge_ape_lock_init(struct bge_softc *);
static void bge_ape_read_fw_ver(struct bge_softc *);
static int bge_ape_lock(struct bge_softc *, int);
static void bge_ape_unlock(struct bge_softc *, int);
static void bge_ape_send_event(struct bge_softc *, uint32_t);
static void bge_ape_driver_state_change(struct bge_softc *, int);
/*
* The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
* leak information to untrusted users. It is also known to cause alignment
* traps on certain architectures.
*/
#ifdef BGE_REGISTER_DEBUG
static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS);
static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
#endif
static void bge_add_sysctls(struct bge_softc *);
static void bge_add_sysctl_stats_regs(struct bge_softc *,
struct sysctl_ctx_list *, struct sysctl_oid_list *);
static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
struct sysctl_oid_list *);
static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
DEBUGNET_DEFINE(bge);
static device_method_t bge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, bge_probe),
DEVMETHOD(device_attach, bge_attach),
DEVMETHOD(device_detach, bge_detach),
DEVMETHOD(device_shutdown, bge_shutdown),
DEVMETHOD(device_suspend, bge_suspend),
DEVMETHOD(device_resume, bge_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, bge_miibus_readreg),
DEVMETHOD(miibus_writereg, bge_miibus_writereg),
DEVMETHOD(miibus_statchg, bge_miibus_statchg),
DEVMETHOD_END
};
static driver_t bge_driver = {
"bge",
bge_methods,
sizeof(struct bge_softc)
};
DRIVER_MODULE(bge, pci, bge_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device", pci, bge, bge_devs,
nitems(bge_devs) - 1);
DRIVER_MODULE(miibus, bge, miibus_driver, 0, 0);
static int bge_allow_asf = 1;
static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"BGE driver parameters");
SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0,
"Allow ASF mode if available");
static int
bge_has_eaddr(struct bge_softc *sc)
{
return (1);
}
static uint32_t
bge_readmem_ind(struct bge_softc *sc, int off)
{
device_t dev;
uint32_t val;
if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
return (0);
dev = sc->bge_dev;
pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
return (val);
}
static void
bge_writemem_ind(struct bge_softc *sc, int off, int val)
{
device_t dev;
if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
return;
dev = sc->bge_dev;
pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
}
#ifdef notdef
static uint32_t
bge_readreg_ind(struct bge_softc *sc, int off)
{
device_t dev;
dev = sc->bge_dev;
pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
}
#endif
static void
bge_writereg_ind(struct bge_softc *sc, int off, int val)
{
device_t dev;
dev = sc->bge_dev;
pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
}
static void
bge_writemem_direct(struct bge_softc *sc, int off, int val)
{
CSR_WRITE_4(sc, off, val);
}
static void
bge_writembx(struct bge_softc *sc, int off, int val)
{
if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
CSR_WRITE_4(sc, off, val);
if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0)
CSR_READ_4(sc, off);
}
/*
* Clear all stale locks and select the lock for this driver instance.
*/
static void
bge_ape_lock_init(struct bge_softc *sc)
{
uint32_t bit, regbase;
int i;
if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
regbase = BGE_APE_LOCK_GRANT;
else
regbase = BGE_APE_PER_LOCK_GRANT;
/* Clear any stale locks. */
for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
switch (i) {
case BGE_APE_LOCK_PHY0:
case BGE_APE_LOCK_PHY1:
case BGE_APE_LOCK_PHY2:
case BGE_APE_LOCK_PHY3:
bit = BGE_APE_LOCK_GRANT_DRIVER0;
break;
default:
if (sc->bge_func_addr == 0)
bit = BGE_APE_LOCK_GRANT_DRIVER0;
else
bit = (1 << sc->bge_func_addr);
}
APE_WRITE_4(sc, regbase + 4 * i, bit);
}
/* Select the PHY lock based on the device's function number. */
switch (sc->bge_func_addr) {
case 0:
sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
break;
case 1:
sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
break;
case 2:
sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
break;
case 3:
sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
break;
default:
device_printf(sc->bge_dev,
"PHY lock not supported on this function\n");
}
}
/*
* Check for APE firmware, set flags, and print version info.
*/
static void
bge_ape_read_fw_ver(struct bge_softc *sc)
{
const char *fwtype;
uint32_t apedata, features;
/* Check for a valid APE signature in shared memory. */
apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
if (apedata != BGE_APE_SEG_SIG_MAGIC) {
sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
return;
}
/* Check if APE firmware is running. */
apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
device_printf(sc->bge_dev, "APE signature found "
"but FW status not ready! 0x%08x\n", apedata);
return;
}
sc->bge_mfw_flags |= BGE_MFW_ON_APE;
/* Fetch the APE firmware type and version. */
apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
fwtype = "NCSI";
} else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
fwtype = "DASH";
} else
fwtype = "UNKN";
/* Print the APE firmware version. */
device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n",
fwtype,
(apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
(apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
(apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
(apedata & BGE_APE_FW_VERSION_BLDMSK));
}
static int
bge_ape_lock(struct bge_softc *sc, int locknum)
{
uint32_t bit, gnt, req, status;
int i, off;
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
return (0);
/* Lock request/grant registers have different bases. */
if (sc->bge_asicrev == BGE_ASICREV_BCM5761) {
req = BGE_APE_LOCK_REQ;
gnt = BGE_APE_LOCK_GRANT;
} else {
req = BGE_APE_PER_LOCK_REQ;
gnt = BGE_APE_PER_LOCK_GRANT;
}
off = 4 * locknum;
switch (locknum) {
case BGE_APE_LOCK_GPIO:
/* Lock required when using GPIO. */
if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
return (0);
if (sc->bge_func_addr == 0)
bit = BGE_APE_LOCK_REQ_DRIVER0;
else
bit = (1 << sc->bge_func_addr);
break;
case BGE_APE_LOCK_GRC:
/* Lock required to reset the device. */
if (sc->bge_func_addr == 0)
bit = BGE_APE_LOCK_REQ_DRIVER0;
else
bit = (1 << sc->bge_func_addr);
break;
case BGE_APE_LOCK_MEM:
/* Lock required when accessing certain APE memory. */
if (sc->bge_func_addr == 0)
bit = BGE_APE_LOCK_REQ_DRIVER0;
else
bit = (1 << sc->bge_func_addr);
break;
case BGE_APE_LOCK_PHY0:
case BGE_APE_LOCK_PHY1:
case BGE_APE_LOCK_PHY2:
case BGE_APE_LOCK_PHY3:
/* Lock required when accessing PHYs. */
bit = BGE_APE_LOCK_REQ_DRIVER0;
break;
default:
return (EINVAL);
}
/* Request a lock. */
APE_WRITE_4(sc, req + off, bit);
/* Wait up to 1 second to acquire lock. */
for (i = 0; i < 20000; i++) {
status = APE_READ_4(sc, gnt + off);
if (status == bit)
break;
DELAY(50);
}
/* Handle any errors. */
if (status != bit) {
device_printf(sc->bge_dev, "APE lock %d request failed! "
"request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
locknum, req + off, bit & 0xFFFF, gnt + off,
status & 0xFFFF);
/* Revoke the lock request. */
APE_WRITE_4(sc, gnt + off, bit);
return (EBUSY);
}
return (0);
}
static void
bge_ape_unlock(struct bge_softc *sc, int locknum)
{
uint32_t bit, gnt;
int off;
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
return;
if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
gnt = BGE_APE_LOCK_GRANT;
else
gnt = BGE_APE_PER_LOCK_GRANT;
off = 4 * locknum;
switch (locknum) {
case BGE_APE_LOCK_GPIO:
if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
return;
if (sc->bge_func_addr == 0)
bit = BGE_APE_LOCK_GRANT_DRIVER0;
else
bit = (1 << sc->bge_func_addr);
break;
case BGE_APE_LOCK_GRC:
if (sc->bge_func_addr == 0)
bit = BGE_APE_LOCK_GRANT_DRIVER0;
else
bit = (1 << sc->bge_func_addr);
break;
case BGE_APE_LOCK_MEM:
if (sc->bge_func_addr == 0)
bit = BGE_APE_LOCK_GRANT_DRIVER0;
else
bit = (1 << sc->bge_func_addr);
break;
case BGE_APE_LOCK_PHY0:
case BGE_APE_LOCK_PHY1:
case BGE_APE_LOCK_PHY2:
case BGE_APE_LOCK_PHY3:
bit = BGE_APE_LOCK_GRANT_DRIVER0;
break;
default:
return;
}
APE_WRITE_4(sc, gnt + off, bit);
}
/*
* Send an event to the APE firmware.
*/
static void
bge_ape_send_event(struct bge_softc *sc, uint32_t event)
{
uint32_t apedata;
int i;
/* NCSI does not support APE events. */
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
return;
/* Wait up to 1ms for APE to service previous event. */
for (i = 10; i > 0; i--) {
if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
break;
apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
BGE_APE_EVENT_STATUS_EVENT_PENDING);
bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
break;
}
bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
DELAY(100);
}
if (i == 0)
device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n",
event);
}
static void
bge_ape_driver_state_change(struct bge_softc *sc, int kind)
{
uint32_t apedata, event;
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
return;
switch (kind) {
case BGE_RESET_START:
/* If this is the first load, clear the load counter. */
apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
else {
apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
}
APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
BGE_APE_HOST_SEG_SIG_MAGIC);
APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
BGE_APE_HOST_SEG_LEN_MAGIC);
/* Add some version info if bge(4) supports it. */
APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
BGE_APE_HOST_BEHAV_NO_PHYLOCK);
APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
BGE_APE_HOST_DRVR_STATE_START);
event = BGE_APE_EVENT_STATUS_STATE_START;
break;
case BGE_RESET_SHUTDOWN:
APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
BGE_APE_HOST_DRVR_STATE_UNLOAD);
event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
break;
case BGE_RESET_SUSPEND:
event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
break;
default:
return;
}
bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
BGE_APE_EVENT_STATUS_STATE_CHNGE);
}
/*
* Map a single buffer address.
*/
static void
bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct bge_dmamap_arg *ctx;
if (error)
return;
KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
ctx = arg;
ctx->bge_busaddr = segs->ds_addr;
}
static uint8_t
bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
{
uint32_t access, byte = 0;
int i;
/* Lock. */
CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
for (i = 0; i < 8000; i++) {
if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
break;
DELAY(20);
}
if (i == 8000)
return (1);
/* Enable access. */
access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
for (i = 0; i < BGE_TIMEOUT * 10; i++) {
DELAY(10);
if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
DELAY(10);
break;
}
}
if (i == BGE_TIMEOUT * 10) {
if_printf(sc->bge_ifp, "nvram read timed out\n");
return (1);
}
/* Get result. */
byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
*dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
/* Disable access. */
CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
/* Unlock. */
CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
CSR_READ_4(sc, BGE_NVRAM_SWARB);
return (0);
}
/*
* Read a sequence of bytes from NVRAM.
*/
static int
bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
{
int err = 0, i;
uint8_t byte = 0;
if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
return (1);
for (i = 0; i < cnt; i++) {
err = bge_nvram_getbyte(sc, off + i, &byte);
if (err)
break;
*(dest + i) = byte;
}
return (err ? 1 : 0);
}
/*
* Read a byte of data stored in the EEPROM at address 'addr.' The
* BCM570x supports both the traditional bitbang interface and an
* auto access interface for reading the EEPROM. We use the auto
* access method.
*/
static uint8_t
bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
{
int i;
uint32_t byte = 0;
/*
* Enable use of auto EEPROM access so we can avoid
* having to use the bitbang method.
*/
BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
/* Reset the EEPROM, load the clock period. */
CSR_WRITE_4(sc, BGE_EE_ADDR,
BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
DELAY(20);
/* Issue the read EEPROM command. */
CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
/* Wait for completion */
for(i = 0; i < BGE_TIMEOUT * 10; i++) {
DELAY(10);
if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
break;
}
if (i == BGE_TIMEOUT * 10) {
device_printf(sc->bge_dev, "EEPROM read timed out\n");
return (1);
}
/* Get result. */
byte = CSR_READ_4(sc, BGE_EE_DATA);
*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
return (0);
}
/*
* Read a sequence of bytes from the EEPROM.
*/
static int
bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
{
int i, error = 0;
uint8_t byte = 0;
for (i = 0; i < cnt; i++) {
error = bge_eeprom_getbyte(sc, off + i, &byte);
if (error)
break;
*(dest + i) = byte;
}
return (error ? 1 : 0);
}
static int
bge_miibus_readreg(device_t dev, int phy, int reg)
{
struct bge_softc *sc;
uint32_t val;
int i;
sc = device_get_softc(dev);
if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
return (0);
/* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
CSR_WRITE_4(sc, BGE_MI_MODE,
sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
DELAY(80);
}
CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
BGE_MIPHY(phy) | BGE_MIREG(reg));
/* Poll for the PHY register access to complete. */
for (i = 0; i < BGE_TIMEOUT; i++) {
DELAY(10);
val = CSR_READ_4(sc, BGE_MI_COMM);
if ((val & BGE_MICOMM_BUSY) == 0) {
DELAY(5);
val = CSR_READ_4(sc, BGE_MI_COMM);
break;
}
}
if (i == BGE_TIMEOUT) {
device_printf(sc->bge_dev,
"PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
phy, reg, val);
val = 0;
}
/* Restore the autopoll bit if necessary. */
if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
DELAY(80);
}
bge_ape_unlock(sc, sc->bge_phy_ape_lock);
if (val & BGE_MICOMM_READFAIL)
return (0);
return (val & 0xFFFF);
}
static int
bge_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct bge_softc *sc;
int i;
sc = device_get_softc(dev);
if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
(reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
return (0);
if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
return (0);
/* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
CSR_WRITE_4(sc, BGE_MI_MODE,
sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
DELAY(80);
}
CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
for (i = 0; i < BGE_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
DELAY(5);
CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
break;
}
}
/* Restore the autopoll bit if necessary. */
if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
DELAY(80);
}
bge_ape_unlock(sc, sc->bge_phy_ape_lock);
if (i == BGE_TIMEOUT)
device_printf(sc->bge_dev,
"PHY write timed out (phy %d, reg %d, val 0x%04x)\n",
phy, reg, val);
return (0);
}
static void
bge_miibus_statchg(device_t dev)
{
struct bge_softc *sc;
struct mii_data *mii;
uint32_t mac_mode, rx_mode, tx_mode;
sc = device_get_softc(dev);
if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0)
return;
mii = device_get_softc(sc->bge_miibus);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->bge_link = 1;
break;
case IFM_1000_T:
case IFM_1000_SX:
case IFM_2500_SX:
if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
sc->bge_link = 1;
else
sc->bge_link = 0;
break;
default:
sc->bge_link = 0;
break;
}
} else
sc->bge_link = 0;
if (sc->bge_link == 0)
return;
/*
* APE firmware touches these registers to keep the MAC
* connected to the outside world. Try to keep the
* accesses atomic.
*/
/* Set the port mode (MII/GMII) to match the link speed. */
mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
mac_mode |= BGE_PORTMODE_GMII;
else
mac_mode |= BGE_PORTMODE_MII;
/* Set MAC flow control behavior to match link flow control settings. */
tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
} else
mac_mode |= BGE_MACMODE_HALF_DUPLEX;
CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
DELAY(40);
CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
}
/*
* Intialize a standard receive ring descriptor.
*/
static int
bge_newbuf_std(struct bge_softc *sc, int i)
{
struct mbuf *m;
struct bge_rx_bd *r;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int error, nsegs;
if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
(if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
} else {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
}
if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
m_adj(m, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
if (error != 0) {
m_freem(m);
return (error);
}
if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i]);
}
map = sc->bge_cdata.bge_rx_std_dmamap[i];
sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
sc->bge_cdata.bge_rx_std_sparemap = map;
sc->bge_cdata.bge_rx_std_chain[i] = m;
sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
r->bge_flags = BGE_RXBDFLAG_END;
r->bge_len = segs[0].ds_len;
r->bge_idx = i;
bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
return (0);
}
/*
* Initialize a jumbo receive ring descriptor. This allocates
* a jumbo buffer from the pool managed internally by the driver.
*/
static int
bge_newbuf_jumbo(struct bge_softc *sc, int i)
{
bus_dma_segment_t segs[BGE_NSEG_JUMBO];
bus_dmamap_t map;
struct bge_extrx_bd *r;
struct mbuf *m;
int error, nsegs;
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOBUFS);
if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) {
m_freem(m);
return (ENOBUFS);
}
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
m_adj(m, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
if (error != 0) {
m_freem(m);
return (error);
}
if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
}
map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
sc->bge_cdata.bge_rx_jumbo_sparemap;
sc->bge_cdata.bge_rx_jumbo_sparemap = map;
sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
/*
* Fill in the extended RX buffer descriptor.
*/
r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
r->bge_idx = i;
r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
switch (nsegs) {
case 4:
r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
r->bge_len3 = segs[3].ds_len;
sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
case 3:
r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
r->bge_len2 = segs[2].ds_len;
sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
case 2:
r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
r->bge_len1 = segs[1].ds_len;
sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
case 1:
r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
r->bge_len0 = segs[0].ds_len;
sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
break;
default:
panic("%s: %d segments\n", __func__, nsegs);
}
bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
return (0);
}
static int
bge_init_rx_ring_std(struct bge_softc *sc)
{
int error, i;
bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
sc->bge_std = 0;
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
if ((error = bge_newbuf_std(sc, i)) != 0)
return (error);
BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
}
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
sc->bge_std = 0;
bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
return (0);
}
static void
bge_free_rx_ring_std(struct bge_softc *sc)
{
int i;
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i]);
m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
sc->bge_cdata.bge_rx_std_chain[i] = NULL;
}
bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
sizeof(struct bge_rx_bd));
}
}
static int
bge_init_rx_ring_jumbo(struct bge_softc *sc)
{
struct bge_rcb *rcb;
int error, i;
bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
sc->bge_jumbo = 0;
for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
if ((error = bge_newbuf_jumbo(sc, i)) != 0)
return (error);
BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
}
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
sc->bge_jumbo = 0;
/* Enable the jumbo receive producer ring. */
rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
rcb->bge_maxlen_flags =
BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
return (0);
}
static void
bge_free_rx_ring_jumbo(struct bge_softc *sc)
{
int i;
for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_dmamap[i],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
}
bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
sizeof(struct bge_extrx_bd));
}
}
static void
bge_free_tx_ring(struct bge_softc *sc)
{
int i;
if (sc->bge_ldata.bge_tx_ring == NULL)
return;
for (i = 0; i < BGE_TX_RING_CNT; i++) {
if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[i],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[i]);
m_freem(sc->bge_cdata.bge_tx_chain[i]);
sc->bge_cdata.bge_tx_chain[i] = NULL;
}
bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
sizeof(struct bge_tx_bd));
}
}
static int
bge_init_tx_ring(struct bge_softc *sc)
{
sc->bge_txcnt = 0;
sc->bge_tx_saved_considx = 0;
bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
/* Initialize transmit producer index for host-memory send ring. */
sc->bge_tx_prodidx = 0;
bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
/* 5700 b2 errata */
if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
/* NIC-memory send ring not used; initialize to zero. */
bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
/* 5700 b2 errata */
if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
return (0);
}
static void
bge_setpromisc(struct bge_softc *sc)
{
if_t ifp;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
/* Enable or disable promiscuous mode as needed. */
if (if_getflags(ifp) & IFF_PROMISC)
BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
else
BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
}
static u_int
bge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *hashes = arg;
int h;
h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7F;
hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
return (1);
}
static void
bge_setmulti(struct bge_softc *sc)
{
if_t ifp;
uint32_t hashes[4] = { 0, 0, 0, 0 };
int i;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
return;
}
/* First, zot all the existing filters. */
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
if_foreach_llmaddr(ifp, bge_hash_maddr, hashes);
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
}
static void
bge_setvlan(struct bge_softc *sc)
{
if_t ifp;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
/* Enable or disable VLAN tag stripping as needed. */
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
else
BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
}
static void
bge_sig_pre_reset(struct bge_softc *sc, int type)
{
/*
* Some chips don't like this so only do this if ASF is enabled
*/
if (sc->bge_asf_mode)
bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
switch (type) {
case BGE_RESET_START:
bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
BGE_FW_DRV_STATE_START);
break;
case BGE_RESET_SHUTDOWN:
bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
BGE_FW_DRV_STATE_UNLOAD);
break;
case BGE_RESET_SUSPEND:
bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
BGE_FW_DRV_STATE_SUSPEND);
break;
}
}
if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
bge_ape_driver_state_change(sc, type);
}
static void
bge_sig_post_reset(struct bge_softc *sc, int type)
{
if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
switch (type) {
case BGE_RESET_START:
bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
BGE_FW_DRV_STATE_START_DONE);
/* START DONE */
break;
case BGE_RESET_SHUTDOWN:
bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
BGE_FW_DRV_STATE_UNLOAD_DONE);
break;
}
}
if (type == BGE_RESET_SHUTDOWN)
bge_ape_driver_state_change(sc, type);
}
static void
bge_sig_legacy(struct bge_softc *sc, int type)
{
if (sc->bge_asf_mode) {
switch (type) {
case BGE_RESET_START:
bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
BGE_FW_DRV_STATE_START);
break;
case BGE_RESET_SHUTDOWN:
bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
BGE_FW_DRV_STATE_UNLOAD);
break;
}
}
}
static void
bge_stop_fw(struct bge_softc *sc)
{
int i;
if (sc->bge_asf_mode) {
bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
for (i = 0; i < 100; i++ ) {
if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
BGE_RX_CPU_DRV_EVENT))
break;
DELAY(10);
}
}
}
static uint32_t
bge_dma_swap_options(struct bge_softc *sc)
{
uint32_t dma_options;
dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
#if BYTE_ORDER == BIG_ENDIAN
dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
#endif
return (dma_options);
}
/*
* Do endian, PCI and DMA initialization.
*/
static int
bge_chipinit(struct bge_softc *sc)
{
uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
uint16_t val;
int i;
/* Set endianness before we access any non-PCI registers. */
misc_ctl = BGE_INIT;
if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
/*
* Clear the MAC statistics block in the NIC's
* internal memory.
*/
for (i = BGE_STATS_BLOCK;
i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
BGE_MEMWIN_WRITE(sc, i, 0);
for (i = BGE_STATUS_BLOCK;
i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
BGE_MEMWIN_WRITE(sc, i, 0);
if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
/*
* Fix data corruption caused by non-qword write with WB.
* Fix master abort in PCI mode.
* Fix PCI latency timer.
*/
val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
val |= (1 << 10) | (1 << 12) | (1 << 13);
pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
}
if (sc->bge_asicrev == BGE_ASICREV_BCM57765 ||
sc->bge_asicrev == BGE_ASICREV_BCM57766) {
/*
* For the 57766 and non Ax versions of 57765, bootcode
* needs to setup the PCIE Fast Training Sequence (FTS)
* value to prevent transmit hangs.
*/
if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) {
CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
BGE_CPMU_PADRNG_CTL_RDIV2);
}
}
/*
* Set up the PCI DMA control register.
*/
dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
if (sc->bge_flags & BGE_FLAG_PCIE) {
if (sc->bge_mps >= 256)
dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
else
dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
} else if (sc->bge_flags & BGE_FLAG_PCIX) {
if (BGE_IS_5714_FAMILY(sc)) {
/* 256 bytes for read and write. */
dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
} else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
/*
* In the BCM5703, the DMA read watermark should
* be set to less than or equal to the maximum
* memory read byte count of the PCI-X command
* register.
*/
dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
/* 1536 bytes for read, 384 bytes for write. */
dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
} else {
/* 384 bytes for read and write. */
dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
0x0F;
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
sc->bge_asicrev == BGE_ASICREV_BCM5704) {
uint32_t tmp;
/* Set ONE_DMA_AT_ONCE for hardware workaround. */
tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
if (tmp == 6 || tmp == 7)
dma_rw_ctl |=
BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
/* Set PCI-X DMA write workaround. */
dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
}
} else {
/* Conventional PCI bus: 256 bytes for read and write. */
dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
sc->bge_asicrev != BGE_ASICREV_BCM5750)
dma_rw_ctl |= 0x0F;
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
sc->bge_asicrev == BGE_ASICREV_BCM5701)
dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
BGE_PCIDMARWCTL_ASRT_ALL_BE;
if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
sc->bge_asicrev == BGE_ASICREV_BCM5704)
dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
if (BGE_IS_5717_PLUS(sc)) {
dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
/*
* Enable HW workaround for controllers that misinterpret
* a status tag update and leave interrupts permanently
* disabled.
*/
if (!BGE_IS_57765_PLUS(sc) &&
sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
sc->bge_asicrev != BGE_ASICREV_BCM5762)
dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
}
pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
/*
* Set up general mode register.
*/
mode_ctl = bge_dma_swap_options(sc);
if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
sc->bge_asicrev == BGE_ASICREV_BCM5762) {
/* Retain Host-2-BMC settings written by APE firmware. */
mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
(BGE_MODECTL_BYTESWAP_B2HRX_DATA |
BGE_MODECTL_WORDSWAP_B2HRX_DATA |
BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
}
mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
BGE_MODECTL_TX_NO_PHDR_CSUM;
/*
* BCM5701 B5 have a bug causing data corruption when using
* 64-bit DMA reads, which can be terminated early and then
* completed later as 32-bit accesses, in combination with
* certain bridges.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
mode_ctl |= BGE_MODECTL_FORCE_PCI32;
/*
* Tell the firmware the driver is running
*/
if (sc->bge_asf_mode & ASF_STACKUP)
mode_ctl |= BGE_MODECTL_STACKUP;
CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
/*
* Disable memory write invalidate. Apparently it is not supported
* properly by these devices.
*/
PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
/* Set the timer prescaler (always 66 MHz). */
CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
/* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
DELAY(40); /* XXX */
/* Put PHY into ready state */
BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
DELAY(40);
}
return (0);
}
static int
bge_blockinit(struct bge_softc *sc)
{
struct bge_rcb *rcb;
bus_size_t vrcb;
caddr_t lladdr;
bge_hostaddr taddr;
uint32_t dmactl, rdmareg, val;
int i, limit;
/*
* Initialize the memory window pointer register so that
* we can access the first 32K of internal NIC RAM. This will
* allow us to set up the TX send ring RCBs and the RX return
* ring RCBs, plus other things which live in NIC memory.
*/
CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
/* Note: the BCM5704 has a smaller mbuf space than other chips. */
if (!(BGE_IS_5705_PLUS(sc))) {
/* Configure mbuf memory pool */
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
else
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
/* Configure DMA resource pool */
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
BGE_DMA_DESCRIPTORS);
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
}
/* Configure mbuf pool watermarks */
if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
if (if_getmtu(sc->bge_ifp) > ETHERMTU) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
} else {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
}
} else if (!BGE_IS_5705_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
} else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
} else {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
}
/* Configure DMA resource watermarks */
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
/* Enable buffer manager */
val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
/*
* Change the arbitration algorithm of TXMBUF read request to
* round-robin instead of priority based for BCM5719. When
* TXFIFO is almost empty, RDMA will hold its request until
* TXFIFO is not almost empty.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
val |= BGE_BMANMODE_NO_TX_UNDERRUN;
CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
/* Poll for buffer manager start indication */
for (i = 0; i < BGE_TIMEOUT; i++) {
DELAY(10);
if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
break;
}
if (i == BGE_TIMEOUT) {
device_printf(sc->bge_dev, "buffer manager failed to start\n");
return (ENXIO);
}
/* Enable flow-through queues */
CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
/* Wait until queue initialization is complete */
for (i = 0; i < BGE_TIMEOUT; i++) {
DELAY(10);
if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
break;
}
if (i == BGE_TIMEOUT) {
device_printf(sc->bge_dev, "flow-through queue init failed\n");
return (ENXIO);
}
/*
* Summary of rings supported by the controller:
*
* Standard Receive Producer Ring
* - This ring is used to feed receive buffers for "standard"
* sized frames (typically 1536 bytes) to the controller.
*
* Jumbo Receive Producer Ring
* - This ring is used to feed receive buffers for jumbo sized
* frames (i.e. anything bigger than the "standard" frames)
* to the controller.
*
* Mini Receive Producer Ring
* - This ring is used to feed receive buffers for "mini"
* sized frames to the controller.
* - This feature required external memory for the controller
* but was never used in a production system. Should always
* be disabled.
*
* Receive Return Ring
* - After the controller has placed an incoming frame into a
* receive buffer that buffer is moved into a receive return
* ring. The driver is then responsible to passing the
* buffer up to the stack. Many versions of the controller
* support multiple RR rings.
*
* Send Ring
* - This ring is used for outgoing frames. Many versions of
* the controller support multiple send rings.
*/
/* Initialize the standard receive producer ring control block. */
rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
rcb->bge_hostaddr.bge_addr_lo =
BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
rcb->bge_hostaddr.bge_addr_hi =
BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
if (BGE_IS_5717_PLUS(sc)) {
/*
* Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
* Bits 15-2 : Maximum RX frame size
* Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
* Bit 0 : Reserved
*/
rcb->bge_maxlen_flags =
BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
} else if (BGE_IS_5705_PLUS(sc)) {
/*
* Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
* Bits 15-2 : Reserved (should be 0)
* Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
* Bit 0 : Reserved
*/
rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
} else {
/*
* Ring size is always XXX entries
* Bits 31-16: Maximum RX frame size
* Bits 15-2 : Reserved (should be 0)
* Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
* Bit 0 : Reserved
*/
rcb->bge_maxlen_flags =
BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
sc->bge_asicrev == BGE_ASICREV_BCM5720)
rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
else
rcb->bge_nicaddr = BGE_STD_RX_RINGS;
/* Write the standard receive producer ring control block. */
CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
/* Reset the standard receive producer ring producer index. */
bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
/*
* Initialize the jumbo RX producer ring control
* block. We set the 'ring disabled' bit in the
* flags field until we're actually ready to start
* using this ring (i.e. once we set the MTU
* high enough to require it).
*/
if (BGE_IS_JUMBO_CAPABLE(sc)) {
rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
/* Get the jumbo receive producer ring RCB parameters. */
rcb->bge_hostaddr.bge_addr_lo =
BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
rcb->bge_hostaddr.bge_addr_hi =
BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map,
BUS_DMASYNC_PREREAD);
rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
sc->bge_asicrev == BGE_ASICREV_BCM5720)
rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
else
rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
rcb->bge_hostaddr.bge_addr_hi);
CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
rcb->bge_hostaddr.bge_addr_lo);
/* Program the jumbo receive producer ring RCB parameters. */
CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
rcb->bge_maxlen_flags);
CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
/* Reset the jumbo receive producer ring producer index. */
bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
}
/* Disable the mini receive producer ring RCB. */
if (BGE_IS_5700_FAMILY(sc)) {
rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
rcb->bge_maxlen_flags =
BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
rcb->bge_maxlen_flags);
/* Reset the mini receive producer ring producer index. */
bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
}
/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
(CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
}
/*
* The BD ring replenish thresholds control how often the
* hardware fetches new BD's from the producer rings in host
* memory. Setting the value too low on a busy system can
* starve the hardware and reduce the throughput.
*
* Set the BD ring replentish thresholds. The recommended
* values are 1/8th the number of descriptors allocated to
* each ring.
* XXX The 5754 requires a lower threshold, so it might be a
* requirement of all 575x family chips. The Linux driver sets
* the lower threshold for all 5705 family chips as well, but there
* are reports that it might not need to be so strict.
*
* XXX Linux does some extra fiddling here for the 5906 parts as
* well.
*/
if (BGE_IS_5705_PLUS(sc))
val = 8;
else
val = BGE_STD_RX_RING_CNT / 8;
CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
if (BGE_IS_JUMBO_CAPABLE(sc))
CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
BGE_JUMBO_RX_RING_CNT/8);
if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
}
/*
* Disable all send rings by setting the 'ring disabled' bit
* in the flags field of all the TX send ring control blocks,
* located in NIC memory.
*/
if (!BGE_IS_5705_PLUS(sc))
/* 5700 to 5704 had 16 send rings. */
limit = BGE_TX_RINGS_EXTSSRAM_MAX;
else if (BGE_IS_57765_PLUS(sc) ||
sc->bge_asicrev == BGE_ASICREV_BCM5762)
limit = 2;
else if (BGE_IS_5717_PLUS(sc))
limit = 4;
else
limit = 1;
vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
for (i = 0; i < limit; i++) {
RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
vrcb += sizeof(struct bge_rcb);
}
/* Configure send ring RCB 0 (we use only the first ring) */
vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
sc->bge_asicrev == BGE_ASICREV_BCM5720)
RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
else
RCB_WRITE_4(sc, vrcb, bge_nicaddr,
BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
/*
* Disable all receive return rings by setting the
* 'ring diabled' bit in the flags field of all the receive
* return ring control blocks, located in NIC memory.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
sc->bge_asicrev == BGE_ASICREV_BCM5720) {
/* Should be 17, use 16 until we get an SRAM map. */
limit = 16;
} else if (!BGE_IS_5705_PLUS(sc))
limit = BGE_RX_RINGS_MAX;
else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
sc->bge_asicrev == BGE_ASICREV_BCM5762 ||
BGE_IS_57765_PLUS(sc))
limit = 4;
else
limit = 1;
/* Disable all receive return rings. */
vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
for (i = 0; i < limit; i++) {
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
BGE_RCB_FLAG_RING_DISABLED);
RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
(i * (sizeof(uint64_t))), 0);
vrcb += sizeof(struct bge_rcb);
}
/*
* Set up receive return ring 0. Note that the NIC address
* for RX return rings is 0x0. The return rings live entirely
* within the host, so the nicaddr field in the RCB isn't used.
*/
vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
lladdr = if_getlladdr(sc->bge_ifp);
/* Set random backoff seed for TX */
CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
(lladdr[0] + lladdr[1] +
lladdr[2] + lladdr[3] +
lladdr[4] + lladdr[5]) &
BGE_TX_BACKOFF_SEED_MASK);
/* Set inter-packet gap */
val = 0x2620;
if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
sc->bge_asicrev == BGE_ASICREV_BCM5762)
val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
(BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
/*
* Specify which ring to use for packets that don't match
* any RX rules.
*/
CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
/*
* Configure number of RX lists. One interrupt distribution
* list, sixteen active lists, one bad frames class.
*/
CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
/* Initialize RX list placement stats mask. */
CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
/* Disable host coalescing until we get it set up */
CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
/* Poll to make sure it's shut down. */
for (i = 0; i < BGE_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
break;
}
if (i == BGE_TIMEOUT) {
device_printf(sc->bge_dev,
"host coalescing engine failed to idle\n");
return (ENXIO);
}
/* Set up host coalescing defaults */
CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
if (!(BGE_IS_5705_PLUS(sc))) {
CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
}
CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
/* Set up address of statistics block */
if (!(BGE_IS_5705_PLUS(sc))) {
CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
}
/* Set up address of status block */
CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
/* Set up status block size. */
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
val = BGE_STATBLKSZ_FULL;
bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
} else {
val = BGE_STATBLKSZ_32BYTE;
bzero(sc->bge_ldata.bge_status_block, 32);
}
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Turn on host coalescing state machine */
CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
/* Turn on RX BD completion state machine and enable attentions */
CSR_WRITE_4(sc, BGE_RBDC_MODE,
BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
/* Turn on RX list placement state machine */
CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
/* Turn on RX list selector state machine. */
if (!(BGE_IS_5705_PLUS(sc)))
CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
/* Turn on DMA, clear stats. */
val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
BGE_MACMODE_FRMHDR_DMA_ENB;
if (sc->bge_flags & BGE_FLAG_TBI)
val |= BGE_PORTMODE_TBI;
else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
val |= BGE_PORTMODE_GMII;
else
val |= BGE_PORTMODE_MII;
/* Allow APE to send/receive frames. */
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
CSR_WRITE_4(sc, BGE_MAC_MODE, val);
DELAY(40);
/* Set misc. local control, enable interrupts on attentions */
BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
#ifdef notdef
/* Assert GPIO pins for PHY reset */
BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
#endif
/* Turn on DMA completion state machine */
if (!(BGE_IS_5705_PLUS(sc)))
CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
/* Enable host coalescing bug fix. */
if (BGE_IS_5755_PLUS(sc))
val |= BGE_WDMAMODE_STATUS_TAG_FIX;
/* Request larger DMA burst size to get better performance. */
if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
val |= BGE_WDMAMODE_BURST_ALL_DATA;
/* Turn on write DMA state machine */
CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
DELAY(40);
/* Turn on read DMA state machine */
val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
sc->bge_asicrev == BGE_ASICREV_BCM57780)
val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
if (sc->bge_flags & BGE_FLAG_PCIE)
val |= BGE_RDMAMODE_FIFO_LONG_BURST;
if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
val |= BGE_RDMAMODE_TSO4_ENABLE;
if (sc->bge_flags & BGE_FLAG_TSO3 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
sc->bge_asicrev == BGE_ASICREV_BCM57780)
val |= BGE_RDMAMODE_TSO6_ENABLE;
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
sc->bge_asicrev == BGE_ASICREV_BCM5762) {
val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
BGE_RDMAMODE_H2BNC_VLAN_DET;
/*
* Allow multiple outstanding read requests from
* non-LSO read DMA engine.
*/
val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
rdmareg = BGE_RDMA_RSRVCTRL_REG2;
else
rdmareg = BGE_RDMA_RSRVCTRL;
dmactl = CSR_READ_4(sc, rdmareg);
/*
* Adjust tx margin to prevent TX data corruption and
* fix internal FIFO overflow.
*/
if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
sc->bge_asicrev == BGE_ASICREV_BCM5762) {
dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
BGE_RDMA_RSRVCTRL_TXMRGN_320B;
}
/*
* Enable fix for read DMA FIFO overruns.
* The fix is to limit the number of RX BDs
* the hardware would fetch at a fime.
*/
CSR_WRITE_4(sc, rdmareg, dmactl |
BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
} else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
/*
* Allow 4KB burst length reads for non-LSO frames.
* Enable 512B burst length reads for buffer descriptors.
*/
CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
} else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) {
CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
}
CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
DELAY(40);
if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
if ((val & 0xFFFF) > BGE_FRAMELEN)
break;
if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
break;
}
if (i != BGE_NUM_RDMA_CHANNELS / 2) {
val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
val |= BGE_RDMA_TX_LENGTH_WA_5719;
else
val |= BGE_RDMA_TX_LENGTH_WA_5720;
CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
}
}
/* Turn on RX data completion state machine */
CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
/* Turn on RX BD initiator state machine */
CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
/* Turn on RX data and RX BD initiator state machine */
CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
/* Turn on Mbuf cluster free state machine */
if (!(BGE_IS_5705_PLUS(sc)))
CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
/* Turn on send BD completion state machine */
CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
/* Turn on send data completion state machine */
val = BGE_SDCMODE_ENABLE;
if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
val |= BGE_SDCMODE_CDELAY;
CSR_WRITE_4(sc, BGE_SDC_MODE, val);
/* Turn on send data initiator state machine */
if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
BGE_SDIMODE_HW_LSO_PRE_DMA);
else
CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
/* Turn on send BD initiator state machine */
CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
/* Turn on send BD selector state machine */
CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
/* ack/clear link change events */
CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
BGE_MACSTAT_LINK_CHANGED);
CSR_WRITE_4(sc, BGE_MI_STS, 0);
/*
* Enable attention when the link has changed state for
* devices that use auto polling.
*/
if (sc->bge_flags & BGE_FLAG_TBI) {
CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
} else {
if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
DELAY(80);
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
BGE_EVTENB_MI_INTERRUPT);
}
/*
* Clear any pending link state attention.
* Otherwise some link state change events may be lost until attention
* is cleared by bge_intr() -> bge_link_upd() sequence.
* It's not necessary on newer BCM chips - perhaps enabling link
* state change attentions implies clearing pending attention.
*/
CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
BGE_MACSTAT_LINK_CHANGED);
/* Enable link state change attentions. */
BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
return (0);
}
static const struct bge_revision *
bge_lookup_rev(uint32_t chipid)
{
const struct bge_revision *br;
for (br = bge_revisions; br->br_name != NULL; br++) {
if (br->br_chipid == chipid)
return (br);
}
for (br = bge_majorrevs; br->br_name != NULL; br++) {
if (br->br_chipid == BGE_ASICREV(chipid))
return (br);
}
return (NULL);
}
static const struct bge_vendor *
bge_lookup_vendor(uint16_t vid)
{
const struct bge_vendor *v;
for (v = bge_vendors; v->v_name != NULL; v++)
if (v->v_id == vid)
return (v);
return (NULL);
}
static uint32_t
bge_chipid(device_t dev)
{
uint32_t id;
id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
BGE_PCIMISCCTL_ASICREV_SHIFT;
if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
/*
* Find the ASCI revision. Different chips use different
* registers.
*/
switch (pci_get_device(dev)) {
case BCOM_DEVICEID_BCM5717C:
/* 5717 C0 seems to belong to 5720 line. */
id = BGE_CHIPID_BCM5720_A0;
break;
case BCOM_DEVICEID_BCM5717:
case BCOM_DEVICEID_BCM5718:
case BCOM_DEVICEID_BCM5719:
case BCOM_DEVICEID_BCM5720:
case BCOM_DEVICEID_BCM5725:
case BCOM_DEVICEID_BCM5727:
case BCOM_DEVICEID_BCM5762:
case BCOM_DEVICEID_BCM57764:
case BCOM_DEVICEID_BCM57767:
case BCOM_DEVICEID_BCM57787:
id = pci_read_config(dev,
BGE_PCI_GEN2_PRODID_ASICREV, 4);
break;
case BCOM_DEVICEID_BCM57761:
case BCOM_DEVICEID_BCM57762:
case BCOM_DEVICEID_BCM57765:
case BCOM_DEVICEID_BCM57766:
case BCOM_DEVICEID_BCM57781:
case BCOM_DEVICEID_BCM57782:
case BCOM_DEVICEID_BCM57785:
case BCOM_DEVICEID_BCM57786:
case BCOM_DEVICEID_BCM57791:
case BCOM_DEVICEID_BCM57795:
id = pci_read_config(dev,
BGE_PCI_GEN15_PRODID_ASICREV, 4);
break;
default:
id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
}
}
return (id);
}
/*
* Probe for a Broadcom chip. Check the PCI vendor and device IDs
* against our list and return its name if we find a match.
*
* Note that since the Broadcom controller contains VPD support, we
* try to get the device name string from the controller itself instead
* of the compiled-in string. It guarantees we'll always announce the
* right product name. We fall back to the compiled-in string when
* VPD is unavailable or corrupt.
*/
static int
bge_probe(device_t dev)
{
char model[64];
const struct bge_revision *br;
const char *pname;
struct bge_softc *sc;
const struct bge_type *t = bge_devs;
const struct bge_vendor *v;
uint32_t id;
uint16_t did, vid;
sc = device_get_softc(dev);
sc->bge_dev = dev;
vid = pci_get_vendor(dev);
did = pci_get_device(dev);
while(t->bge_vid != 0) {
if ((vid == t->bge_vid) && (did == t->bge_did)) {
id = bge_chipid(dev);
br = bge_lookup_rev(id);
if (bge_has_eaddr(sc) &&
pci_get_vpd_ident(dev, &pname) == 0)
snprintf(model, sizeof(model), "%s", pname);
else {
v = bge_lookup_vendor(vid);
snprintf(model, sizeof(model), "%s %s",
v != NULL ? v->v_name : "Unknown",
br != NULL ? br->br_name :
"NetXtreme/NetLink Ethernet Controller");
}
device_set_descf(dev, "%s, %sASIC rev. %#08x",
model, br != NULL ? "" : "unknown ", id);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
static void
bge_dma_free(struct bge_softc *sc)
{
int i;
/* Destroy DMA maps for RX buffers. */
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_std_dmamap[i])
bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i]);
}
if (sc->bge_cdata.bge_rx_std_sparemap)
bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_sparemap);
/* Destroy DMA maps for jumbo RX buffers. */
for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
}
if (sc->bge_cdata.bge_rx_jumbo_sparemap)
bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_sparemap);
/* Destroy DMA maps for TX buffers. */
for (i = 0; i < BGE_TX_RING_CNT; i++) {
if (sc->bge_cdata.bge_tx_dmamap[i])
bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[i]);
}
if (sc->bge_cdata.bge_rx_mtag)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
if (sc->bge_cdata.bge_mtag_jumbo)
bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
if (sc->bge_cdata.bge_tx_mtag)
bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
/* Destroy standard RX ring. */
if (sc->bge_ldata.bge_rx_std_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map);
if (sc->bge_ldata.bge_rx_std_ring)
bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_ldata.bge_rx_std_ring,
sc->bge_cdata.bge_rx_std_ring_map);
if (sc->bge_cdata.bge_rx_std_ring_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
/* Destroy jumbo RX ring. */
if (sc->bge_ldata.bge_rx_jumbo_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map);
if (sc->bge_ldata.bge_rx_jumbo_ring)
bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_ldata.bge_rx_jumbo_ring,
sc->bge_cdata.bge_rx_jumbo_ring_map);
if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
/* Destroy RX return ring. */
if (sc->bge_ldata.bge_rx_return_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
sc->bge_cdata.bge_rx_return_ring_map);
if (sc->bge_ldata.bge_rx_return_ring)
bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
sc->bge_ldata.bge_rx_return_ring,
sc->bge_cdata.bge_rx_return_ring_map);
if (sc->bge_cdata.bge_rx_return_ring_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
/* Destroy TX ring. */
if (sc->bge_ldata.bge_tx_ring_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
sc->bge_cdata.bge_tx_ring_map);
if (sc->bge_ldata.bge_tx_ring)
bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
sc->bge_ldata.bge_tx_ring,
sc->bge_cdata.bge_tx_ring_map);
if (sc->bge_cdata.bge_tx_ring_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
/* Destroy status block. */
if (sc->bge_ldata.bge_status_block_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map);
if (sc->bge_ldata.bge_status_block)
bus_dmamem_free(sc->bge_cdata.bge_status_tag,
sc->bge_ldata.bge_status_block,
sc->bge_cdata.bge_status_map);
if (sc->bge_cdata.bge_status_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
/* Destroy statistics block. */
if (sc->bge_ldata.bge_stats_paddr)
bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
sc->bge_cdata.bge_stats_map);
if (sc->bge_ldata.bge_stats)
bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
sc->bge_ldata.bge_stats,
sc->bge_cdata.bge_stats_map);
if (sc->bge_cdata.bge_stats_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
if (sc->bge_cdata.bge_buffer_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
/* Destroy the parent tag. */
if (sc->bge_cdata.bge_parent_tag)
bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
}
static int
bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
bus_addr_t *paddr, const char *msg)
{
struct bge_dmamap_arg ctx;
bus_addr_t lowaddr;
bus_size_t ring_end;
int error;
lowaddr = BUS_SPACE_MAXADDR;
again:
error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
if (error != 0) {
device_printf(sc->bge_dev,
"could not create %s dma tag\n", msg);
return (ENOMEM);
}
/* Allocate DMA'able memory for ring. */
error = bus_dmamem_alloc(*tag, (void **)ring,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
if (error != 0) {
device_printf(sc->bge_dev,
"could not allocate DMA'able memory for %s\n", msg);
return (ENOMEM);
}
/* Load the address of the ring. */
ctx.bge_busaddr = 0;
error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
&ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->bge_dev,
"could not load DMA'able memory for %s\n", msg);
return (ENOMEM);
}
*paddr = ctx.bge_busaddr;
ring_end = *paddr + maxsize;
if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
/*
* 4GB boundary crossed. Limit maximum allowable DMA
* address space to 32bit and try again.
*/
bus_dmamap_unload(*tag, *map);
bus_dmamem_free(*tag, *ring, *map);
bus_dma_tag_destroy(*tag);
if (bootverbose)
device_printf(sc->bge_dev, "4GB boundary crossed, "
"limit DMA address space to 32bit for %s\n", msg);
*ring = NULL;
*tag = NULL;
*map = NULL;
lowaddr = BUS_SPACE_MAXADDR_32BIT;
goto again;
}
return (0);
}
static int
bge_dma_alloc(struct bge_softc *sc)
{
bus_addr_t lowaddr;
bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
int i, error;
lowaddr = BUS_SPACE_MAXADDR;
if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
lowaddr = BGE_DMA_MAXADDR;
/*
* Allocate the parent bus DMA tag appropriate for PCI.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
if (error != 0) {
device_printf(sc->bge_dev,
"could not allocate parent dma tag\n");
return (ENOMEM);
}
/* Create tag for standard RX ring. */
error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
&sc->bge_cdata.bge_rx_std_ring_tag,
(uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
&sc->bge_cdata.bge_rx_std_ring_map,
&sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
if (error)
return (error);
/* Create tag for RX return ring. */
error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
&sc->bge_cdata.bge_rx_return_ring_tag,
(uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
&sc->bge_cdata.bge_rx_return_ring_map,
&sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
if (error)
return (error);
/* Create tag for TX ring. */
error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
&sc->bge_cdata.bge_tx_ring_tag,
(uint8_t **)&sc->bge_ldata.bge_tx_ring,
&sc->bge_cdata.bge_tx_ring_map,
&sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
if (error)
return (error);
/*
* Create tag for status block.
* Because we only use single Tx/Rx/Rx return ring, use
* minimum status block size except BCM5700 AX/BX which
* seems to want to see full status block size regardless
* of configured number of ring.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
sbsz = BGE_STATUS_BLK_SZ;
else
sbsz = 32;
error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
&sc->bge_cdata.bge_status_tag,
(uint8_t **)&sc->bge_ldata.bge_status_block,
&sc->bge_cdata.bge_status_map,
&sc->bge_ldata.bge_status_block_paddr, "status block");
if (error)
return (error);
/* Create tag for statistics block. */
error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
&sc->bge_cdata.bge_stats_tag,
(uint8_t **)&sc->bge_ldata.bge_stats,
&sc->bge_cdata.bge_stats_map,
&sc->bge_ldata.bge_stats_paddr, "statistics block");
if (error)
return (error);
/* Create tag for jumbo RX ring. */
if (BGE_IS_JUMBO_CAPABLE(sc)) {
error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
&sc->bge_cdata.bge_rx_jumbo_ring_tag,
(uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
&sc->bge_cdata.bge_rx_jumbo_ring_map,
&sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
if (error)
return (error);
}
/* Create parent tag for buffers. */
boundary = 0;
if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
boundary = BGE_DMA_BNDRY;
/*
* XXX
* watchdog timeout issue was observed on BCM5704 which
* lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
* Both limiting DMA address space to 32bits and flushing
* mailbox write seem to address the issue.
*/
if (sc->bge_pcixcap != 0)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
if (error != 0) {
device_printf(sc->bge_dev,
"could not allocate buffer dma tag\n");
return (ENOMEM);
}
/* Create tag for Tx mbufs. */
if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
txsegsz = BGE_TSOSEG_SZ;
txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
} else {
txsegsz = MCLBYTES;
txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
}
error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
&sc->bge_cdata.bge_tx_mtag);
if (error) {
device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
return (ENOMEM);
}
/* Create tag for Rx mbufs. */
if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
rxmaxsegsz = MJUM9BYTES;
else
rxmaxsegsz = MCLBYTES;
error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
if (error) {
device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
return (ENOMEM);
}
/* Create DMA maps for RX buffers. */
error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
&sc->bge_cdata.bge_rx_std_sparemap);
if (error) {
device_printf(sc->bge_dev,
"can't create spare DMA map for RX\n");
return (ENOMEM);
}
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
&sc->bge_cdata.bge_rx_std_dmamap[i]);
if (error) {
device_printf(sc->bge_dev,
"can't create DMA map for RX\n");
return (ENOMEM);
}
}
/* Create DMA maps for TX buffers. */
for (i = 0; i < BGE_TX_RING_CNT; i++) {
error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
&sc->bge_cdata.bge_tx_dmamap[i]);
if (error) {
device_printf(sc->bge_dev,
"can't create DMA map for TX\n");
return (ENOMEM);
}
}
/* Create tags for jumbo RX buffers. */
if (BGE_IS_JUMBO_CAPABLE(sc)) {
error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
if (error) {
device_printf(sc->bge_dev,
"could not allocate jumbo dma tag\n");
return (ENOMEM);
}
/* Create DMA maps for jumbo RX buffers. */
error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
if (error) {
device_printf(sc->bge_dev,
"can't create spare DMA map for jumbo RX\n");
return (ENOMEM);
}
for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
if (error) {
device_printf(sc->bge_dev,
"can't create DMA map for jumbo RX\n");
return (ENOMEM);
}
}
}
return (0);
}
/*
* Return true if this device has more than one port.
*/
static int
bge_has_multiple_ports(struct bge_softc *sc)
{
device_t dev = sc->bge_dev;
u_int b, d, f, fscan, s;
d = pci_get_domain(dev);
b = pci_get_bus(dev);
s = pci_get_slot(dev);
f = pci_get_function(dev);
for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
return (1);
return (0);
}
/*
* Return true if MSI can be used with this device.
*/
static int
bge_can_use_msi(struct bge_softc *sc)
{
int can_use_msi = 0;
if (sc->bge_msi == 0)
return (0);
/* Disable MSI for polling(4). */
#ifdef DEVICE_POLLING
return (0);
#endif
switch (sc->bge_asicrev) {
case BGE_ASICREV_BCM5714_A0:
case BGE_ASICREV_BCM5714:
/*
* Apparently, MSI doesn't work when these chips are
* configured in single-port mode.
*/
if (bge_has_multiple_ports(sc))
can_use_msi = 1;
break;
case BGE_ASICREV_BCM5750:
if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
sc->bge_chiprev != BGE_CHIPREV_5750_BX)
can_use_msi = 1;
break;
case BGE_ASICREV_BCM5784:
/*
* Prevent infinite "watchdog timeout" errors
* in some MacBook Pro and make it work out-of-the-box.
*/
if (sc->bge_chiprev == BGE_CHIPREV_5784_AX)
break;
/* FALLTHROUGH */
default:
if (BGE_IS_575X_PLUS(sc))
can_use_msi = 1;
}
return (can_use_msi);
}
static int
bge_mbox_reorder(struct bge_softc *sc)
{
/* Lists of PCI bridges that are known to reorder mailbox writes. */
static const struct mbox_reorder {
const uint16_t vendor;
const uint16_t device;
const char *desc;
} mbox_reorder_lists[] = {
{ 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
};
devclass_t pci, pcib;
device_t bus, dev;
int i;
pci = devclass_find("pci");
pcib = devclass_find("pcib");
dev = sc->bge_dev;
bus = device_get_parent(dev);
for (;;) {
dev = device_get_parent(bus);
bus = device_get_parent(dev);
if (device_get_devclass(dev) != pcib)
break;
if (device_get_devclass(bus) != pci)
break;
for (i = 0; i < nitems(mbox_reorder_lists); i++) {
if (pci_get_vendor(dev) ==
mbox_reorder_lists[i].vendor &&
pci_get_device(dev) ==
mbox_reorder_lists[i].device) {
device_printf(sc->bge_dev,
"enabling MBOX workaround for %s\n",
mbox_reorder_lists[i].desc);
return (1);
}
}
}
return (0);
}
static void
bge_devinfo(struct bge_softc *sc)
{
uint32_t cfg, clk;
device_printf(sc->bge_dev,
"CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev);
if (sc->bge_flags & BGE_FLAG_PCIE)
printf("PCI-E\n");
else if (sc->bge_flags & BGE_FLAG_PCIX) {
printf("PCI-X ");
cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE)
clk = 133;
else {
clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
switch (clk) {
case 0:
clk = 33;
break;
case 2:
clk = 50;
break;
case 4:
clk = 66;
break;
case 6:
clk = 100;
break;
case 7:
clk = 133;
break;
}
}
printf("%u MHz\n", clk);
} else {
if (sc->bge_pcixcap != 0)
printf("PCI on PCI-X ");
else
printf("PCI ");
cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
if (cfg & BGE_PCISTATE_PCI_BUSSPEED)
clk = 66;
else
clk = 33;
if (cfg & BGE_PCISTATE_32BIT_BUS)
printf("%u MHz; 32bit\n", clk);
else
printf("%u MHz; 64bit\n", clk);
}
}
static int
bge_attach(device_t dev)
{
if_t ifp;
struct bge_softc *sc;
uint32_t hwcfg = 0, misccfg, pcistate;
u_char eaddr[ETHER_ADDR_LEN];
int capmask, error, reg, rid, trys;
sc = device_get_softc(dev);
sc->bge_dev = dev;
BGE_LOCK_INIT(sc, device_get_nameunit(dev));
NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
pci_enable_busmaster(dev);
/*
* Allocate control/status registers.
*/
rid = PCIR_BAR(0);
sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->bge_res == NULL) {
device_printf (sc->bge_dev, "couldn't map BAR0 memory\n");
error = ENXIO;
goto fail;
}
/* Save various chip information. */
sc->bge_func_addr = pci_get_function(dev);
sc->bge_chipid = bge_chipid(dev);
sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
/* Set default PHY address. */
sc->bge_phy_addr = 1;
/*
* PHY address mapping for various devices.
*
* | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
* ---------+-------+-------+-------+-------+
* BCM57XX | 1 | X | X | X |
* BCM5704 | 1 | X | 1 | X |
* BCM5717 | 1 | 8 | 2 | 9 |
* BCM5719 | 1 | 8 | 2 | 9 |
* BCM5720 | 1 | 8 | 2 | 9 |
*
* | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
* ---------+-------+-------+-------+-------+
* BCM57XX | X | X | X | X |
* BCM5704 | X | X | X | X |
* BCM5717 | X | X | X | X |
* BCM5719 | 3 | 10 | 4 | 11 |
* BCM5720 | X | X | X | X |
*
* Other addresses may respond but they are not
* IEEE compliant PHYs and should be ignored.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
sc->bge_asicrev == BGE_ASICREV_BCM5720) {
if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
if (CSR_READ_4(sc, BGE_SGDIG_STS) &
BGE_SGDIGSTS_IS_SERDES)
sc->bge_phy_addr = sc->bge_func_addr + 8;
else
sc->bge_phy_addr = sc->bge_func_addr + 1;
} else {
if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
BGE_CPMU_PHY_STRAP_IS_SERDES)
sc->bge_phy_addr = sc->bge_func_addr + 8;
else
sc->bge_phy_addr = sc->bge_func_addr + 1;
}
}
if (bge_has_eaddr(sc))
sc->bge_flags |= BGE_FLAG_EADDR;
/* Save chipset family. */
switch (sc->bge_asicrev) {
case BGE_ASICREV_BCM5762:
case BGE_ASICREV_BCM57765:
case BGE_ASICREV_BCM57766:
sc->bge_flags |= BGE_FLAG_57765_PLUS;
/* FALLTHROUGH */
case BGE_ASICREV_BCM5717:
case BGE_ASICREV_BCM5719:
case BGE_ASICREV_BCM5720:
sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
BGE_FLAG_JUMBO_FRAME;
if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
sc->bge_asicrev == BGE_ASICREV_BCM5720) {
/*
* Enable work around for DMA engine miscalculation
* of TXMBUF available space.
*/
sc->bge_flags |= BGE_FLAG_RDMA_BUG;
if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
/* Jumbo frame on BCM5719 A0 does not work. */
sc->bge_flags &= ~BGE_FLAG_JUMBO;
}
}
break;
case BGE_ASICREV_BCM5755:
case BGE_ASICREV_BCM5761:
case BGE_ASICREV_BCM5784:
case BGE_ASICREV_BCM5785:
case BGE_ASICREV_BCM5787:
case BGE_ASICREV_BCM57780:
sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
BGE_FLAG_5705_PLUS;
break;
case BGE_ASICREV_BCM5700:
case BGE_ASICREV_BCM5701:
case BGE_ASICREV_BCM5703:
case BGE_ASICREV_BCM5704:
sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
break;
case BGE_ASICREV_BCM5714_A0:
case BGE_ASICREV_BCM5780:
case BGE_ASICREV_BCM5714:
sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
/* FALLTHROUGH */
case BGE_ASICREV_BCM5750:
case BGE_ASICREV_BCM5752:
case BGE_ASICREV_BCM5906:
sc->bge_flags |= BGE_FLAG_575X_PLUS;
/* FALLTHROUGH */
case BGE_ASICREV_BCM5705:
sc->bge_flags |= BGE_FLAG_5705_PLUS;
break;
}
/* Identify chips with APE processor. */
switch (sc->bge_asicrev) {
case BGE_ASICREV_BCM5717:
case BGE_ASICREV_BCM5719:
case BGE_ASICREV_BCM5720:
case BGE_ASICREV_BCM5761:
case BGE_ASICREV_BCM5762:
sc->bge_flags |= BGE_FLAG_APE;
break;
}
/* Chips with APE need BAR2 access for APE registers/memory. */
if ((sc->bge_flags & BGE_FLAG_APE) != 0) {
rid = PCIR_BAR(2);
sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->bge_res2 == NULL) {
device_printf (sc->bge_dev,
"couldn't map BAR2 memory\n");
error = ENXIO;
goto fail;
}
/* Enable APE register/memory access by host driver. */
pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
bge_ape_lock_init(sc);
bge_ape_read_fw_ver(sc);
}
/* Add SYSCTLs, requires the chipset family to be set. */
bge_add_sysctls(sc);
/* Identify the chips that use an CPMU. */
if (BGE_IS_5717_PLUS(sc) ||
sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
sc->bge_asicrev == BGE_ASICREV_BCM57780)
sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
else
sc->bge_mi_mode = BGE_MIMODE_BASE;
/* Enable auto polling for BCM570[0-5]. */
if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
/*
* All Broadcom controllers have 4GB boundary DMA bug.
* Whenever an address crosses a multiple of the 4GB boundary
* (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
* from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
* state machine will lockup and cause the device to hang.
*/
sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
/* BCM5755 or higher and BCM5906 have short DMA bug. */
if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
/*
* BCM5719 cannot handle DMA requests for DMA segments that
* have larger than 4KB in size. However the maximum DMA
* segment size created in DMA tag is 4KB for TSO, so we
* wouldn't encounter the issue here.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
misccfg == BGE_MISCCFG_BOARD_ID_5788M)
sc->bge_flags |= BGE_FLAG_5788;
}
capmask = BMSR_DEFCAPMASK;
if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
(misccfg == 0x4000 || misccfg == 0x8000)) ||
(sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
pci_get_vendor(dev) == BCOM_VENDORID &&
(pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
(pci_get_vendor(dev) == BCOM_VENDORID &&
(pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
pci_get_device(dev) == BCOM_DEVICEID_BCM57791 ||
pci_get_device(dev) == BCOM_DEVICEID_BCM57795 ||
sc->bge_asicrev == BGE_ASICREV_BCM5906) {
/* These chips are 10/100 only. */
capmask &= ~BMSR_EXTSTAT;
sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
}
/*
* Some controllers seem to require a special firmware to use
* TSO. But the firmware is not available to FreeBSD and Linux
* claims that the TSO performed by the firmware is slower than
* hardware based TSO. Moreover the firmware based TSO has one
* known bug which can't handle TSO if Ethernet header + IP/TCP
* header is greater than 80 bytes. A workaround for the TSO
* bug exist but it seems it's too expensive than not using
* TSO at all. Some hardware also have the TSO bug so limit
* the TSO to the controllers that are not affected TSO issues
* (e.g. 5755 or higher).
*/
if (BGE_IS_5717_PLUS(sc)) {
/* BCM5717 requires different TSO configuration. */
sc->bge_flags |= BGE_FLAG_TSO3;
if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
/* TSO on BCM5719 A0 does not work. */
sc->bge_flags &= ~BGE_FLAG_TSO3;
}
} else if (BGE_IS_5755_PLUS(sc)) {
/*
* BCM5754 and BCM5787 shares the same ASIC id so
* explicit device id check is required.
* Due to unknown reason TSO does not work on BCM5755M.
*/
if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
sc->bge_flags |= BGE_FLAG_TSO;
}
/*
* Check if this is a PCI-X or PCI Express device.
*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
/*
* Found a PCI Express capabilities register, this
* must be a PCI Express device.
*/
sc->bge_flags |= BGE_FLAG_PCIE;
sc->bge_expcap = reg;
/* Extract supported maximum payload size. */
sc->bge_mps = pci_read_config(dev, sc->bge_expcap +
PCIER_DEVICE_CAP, 2);
sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD);
if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
sc->bge_asicrev == BGE_ASICREV_BCM5720)
sc->bge_expmrq = 2048;
else
sc->bge_expmrq = 4096;
pci_set_max_read_req(dev, sc->bge_expmrq);
} else {
/*
* Check if the device is in PCI-X Mode.
* (This bit is not valid on PCI Express controllers.)
*/
if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
sc->bge_pcixcap = reg;
if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
BGE_PCISTATE_PCI_BUSMODE) == 0)
sc->bge_flags |= BGE_FLAG_PCIX;
}
/*
* The 40bit DMA bug applies to the 5714/5715 controllers and is
* not actually a MAC controller bug but an issue with the embedded
* PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
*/
if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
sc->bge_flags |= BGE_FLAG_40BIT_BUG;
/*
* Some PCI-X bridges are known to trigger write reordering to
* the mailbox registers. Typical phenomena is watchdog timeouts
* caused by out-of-order TX completions. Enable workaround for
* PCI-X devices that live behind these bridges.
* Note, PCI-X controllers can run in PCI mode so we can't use
* BGE_FLAG_PCIX flag to detect PCI-X controllers.
*/
if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0)
sc->bge_flags |= BGE_FLAG_MBOX_REORDER;
/*
* Allocate the interrupt, using MSI if possible. These devices
* support 8 MSI messages, but only the first one is used in
* normal operation.
*/
rid = 0;
if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
sc->bge_msicap = reg;
reg = 1;
if (bge_can_use_msi(sc) && pci_alloc_msi(dev, &reg) == 0) {
rid = 1;
sc->bge_flags |= BGE_FLAG_MSI;
}
}
/*
* All controllers except BCM5700 supports tagged status but
* we use tagged status only for MSI case on BCM5717. Otherwise
* MSI on BCM5717 does not work.
*/
#ifndef DEVICE_POLLING
if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
#endif
sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE));
if (sc->bge_irq == NULL) {
device_printf(sc->bge_dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
bge_devinfo(sc);
sc->bge_asf_mode = 0;
/* No ASF if APE present. */
if ((sc->bge_flags & BGE_FLAG_APE) == 0) {
if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
BGE_SRAM_DATA_SIG_MAGIC)) {
if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
BGE_HWCFG_ASF) {
sc->bge_asf_mode |= ASF_ENABLE;
sc->bge_asf_mode |= ASF_STACKUP;
if (BGE_IS_575X_PLUS(sc))
sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
}
}
}
bge_stop_fw(sc);
bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
if (bge_reset(sc)) {
device_printf(sc->bge_dev, "chip reset failed\n");
error = ENXIO;
goto fail;
}
bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
if (bge_chipinit(sc)) {
device_printf(sc->bge_dev, "chip initialization failed\n");
error = ENXIO;
goto fail;
}
error = bge_get_eaddr(sc, eaddr);
if (error) {
device_printf(sc->bge_dev,
"failed to read station address\n");
error = ENXIO;
goto fail;
}
/* 5705 limits RX return ring to 512 entries. */
if (BGE_IS_5717_PLUS(sc))
sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
else if (BGE_IS_5705_PLUS(sc))
sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
else
sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
if (bge_dma_alloc(sc)) {
device_printf(sc->bge_dev,
"failed to allocate DMA resources\n");
error = ENXIO;
goto fail;
}
/* Set default tuneable values. */
sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
sc->bge_rx_coal_ticks = 150;
sc->bge_tx_coal_ticks = 150;
sc->bge_rx_max_coal_bds = 10;
sc->bge_tx_max_coal_bds = 10;
/* Initialize checksum features to use. */
sc->bge_csum_features = BGE_CSUM_FEATURES;
if (sc->bge_forced_udpcsum != 0)
sc->bge_csum_features |= CSUM_UDP;
/* Set up ifnet structure */
ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->bge_dev, "failed to if_alloc()\n");
- error = ENXIO;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, bge_ioctl);
if_setstartfn(ifp, bge_start);
if_setinitfn(ifp, bge_init);
if_setgetcounterfn(ifp, bge_get_counter);
if_setsendqlen(ifp, BGE_TX_RING_CNT - 1);
if_setsendqready(ifp);
if_sethwassist(ifp, sc->bge_csum_features);
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_MTU);
if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
if_sethwassistbits(ifp, CSUM_TSO, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0);
}
#ifdef IFCAP_VLAN_HWCSUM
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
#endif
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM);
if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
if_sethwassist(ifp, 0);
}
/*
* Figure out what sort of media we have by checking the
* hardware config word in the first 32k of NIC internal memory,
* or fall back to examining the EEPROM if necessary.
* Note: on some BCM5700 cards, this value appears to be unset.
* If that's the case, we have to rely on identifying the NIC
* by its PCI subsystem ID, as we do below for the SysKonnect
* SK-9D41.
*/
if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
(sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
sizeof(hwcfg))) {
device_printf(sc->bge_dev, "failed to read EEPROM\n");
error = ENXIO;
goto fail;
}
hwcfg = ntohl(hwcfg);
}
/* The SysKonnect SK-9D41 is a 1000baseSX card. */
if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
if (BGE_IS_5705_PLUS(sc)) {
sc->bge_flags |= BGE_FLAG_MII_SERDES;
sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
} else
sc->bge_flags |= BGE_FLAG_TBI;
}
/* Set various PHY bug flags. */
if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
sc->bge_chiprev == BGE_CHIPREV_5704_AX)
sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
if (pci_get_subvendor(dev) == DELL_VENDORID)
sc->bge_phy_flags |= BGE_PHY_NO_3LED;
if ((BGE_IS_5705_PLUS(sc)) &&
sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
sc->bge_asicrev != BGE_ASICREV_BCM57780 &&
!BGE_IS_5717_PLUS(sc)) {
if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5787) {
if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
} else
sc->bge_phy_flags |= BGE_PHY_BER_BUG;
}
/*
* Don't enable Ethernet@WireSpeed for the 5700 or the
* 5705 A0 and A1 chips.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
(sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
(sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
if (sc->bge_flags & BGE_FLAG_TBI) {
ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
bge_ifmedia_sts);
ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
0, NULL);
ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
} else {
/*
* Do transceiver setup and tell the firmware the
* driver is down so we can try to get access the
* probe if ASF is running. Retry a couple of times
* if we get a conflict with the ASF firmware accessing
* the PHY.
*/
trys = 0;
BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
again:
bge_asf_driver_up(sc);
error = mii_attach(dev, &sc->bge_miibus, ifp,
(ifm_change_cb_t)bge_ifmedia_upd,
(ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr,
MII_OFFSET_ANY, MIIF_DOPAUSE);
if (error != 0) {
if (trys++ < 4) {
device_printf(sc->bge_dev, "Try again\n");
bge_miibus_writereg(sc->bge_dev,
sc->bge_phy_addr, MII_BMCR, BMCR_RESET);
goto again;
}
device_printf(sc->bge_dev, "attaching PHYs failed\n");
goto fail;
}
/*
* Now tell the firmware we are going up after probing the PHY
*/
if (sc->bge_asf_mode & ASF_STACKUP)
BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
}
/*
* When using the BCM5701 in PCI-X mode, data corruption has
* been observed in the first few bytes of some received packets.
* Aligning the packet buffer in memory eliminates the corruption.
* Unfortunately, this misaligns the packet payloads. On platforms
* which do not support unaligned accesses, we will realign the
* payloads by copying the received packets.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
sc->bge_flags & BGE_FLAG_PCIX)
sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/* Tell upper layer we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* Hookup IRQ last.
*/
if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
/* Take advantage of single-shot MSI. */
CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
~BGE_MSIMODE_ONE_SHOT_DISABLE);
sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->bge_tq);
if (sc->bge_tq == NULL) {
device_printf(dev, "could not create taskqueue.\n");
ether_ifdetach(ifp);
error = ENOMEM;
goto fail;
}
error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET,
"%s taskq", device_get_nameunit(sc->bge_dev));
if (error != 0) {
device_printf(dev, "could not start threads.\n");
ether_ifdetach(ifp);
goto fail;
}
error = bus_setup_intr(dev, sc->bge_irq,
INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
&sc->bge_intrhand);
} else
error = bus_setup_intr(dev, sc->bge_irq,
INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
&sc->bge_intrhand);
if (error) {
ether_ifdetach(ifp);
device_printf(sc->bge_dev, "couldn't set up irq\n");
goto fail;
}
/* Attach driver debugnet methods. */
DEBUGNET_SET(ifp, bge);
fail:
if (error)
bge_detach(dev);
return (error);
}
static int
bge_detach(device_t dev)
{
struct bge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
BGE_LOCK(sc);
bge_stop(sc);
BGE_UNLOCK(sc);
callout_drain(&sc->bge_stat_ch);
}
if (sc->bge_tq)
taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
if (sc->bge_flags & BGE_FLAG_TBI)
ifmedia_removeall(&sc->bge_ifmedia);
else if (sc->bge_miibus != NULL) {
bus_generic_detach(dev);
device_delete_child(dev, sc->bge_miibus);
}
bge_release_resources(sc);
return (0);
}
static void
bge_release_resources(struct bge_softc *sc)
{
device_t dev;
dev = sc->bge_dev;
if (sc->bge_tq != NULL)
taskqueue_free(sc->bge_tq);
if (sc->bge_intrhand != NULL)
bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
if (sc->bge_irq != NULL) {
bus_release_resource(dev, SYS_RES_IRQ,
rman_get_rid(sc->bge_irq), sc->bge_irq);
pci_release_msi(dev);
}
if (sc->bge_res != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->bge_res), sc->bge_res);
if (sc->bge_res2 != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->bge_res2), sc->bge_res2);
if (sc->bge_ifp != NULL)
if_free(sc->bge_ifp);
bge_dma_free(sc);
if (mtx_initialized(&sc->bge_mtx)) /* XXX */
BGE_LOCK_DESTROY(sc);
}
static int
bge_reset(struct bge_softc *sc)
{
device_t dev;
uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val;
void (*write_op)(struct bge_softc *, int, int);
uint16_t devctl;
int i;
dev = sc->bge_dev;
mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
(sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
if (sc->bge_flags & BGE_FLAG_PCIE)
write_op = bge_writemem_direct;
else
write_op = bge_writemem_ind;
} else
write_op = bge_writereg_ind;
if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
sc->bge_asicrev != BGE_ASICREV_BCM5701) {
CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
for (i = 0; i < 8000; i++) {
if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
BGE_NVRAMSWARB_GNT1)
break;
DELAY(20);
}
if (i == 8000) {
if (bootverbose)
device_printf(dev, "NVRAM lock timedout!\n");
}
}
/* Take APE lock when performing reset. */
bge_ape_lock(sc, BGE_APE_LOCK_GRC);
/* Save some important PCI state. */
cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
command = pci_read_config(dev, BGE_PCI_CMD, 4);
pci_write_config(dev, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
/* Disable fastboot on controllers that support it. */
if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
BGE_IS_5755_PLUS(sc)) {
if (bootverbose)
device_printf(dev, "Disabling fastboot\n");
CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
}
/*
* Write the magic number to SRAM at offset 0xB50.
* When firmware finishes its initialization it will
* write ~BGE_SRAM_FW_MB_MAGIC to the same location.
*/
bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
/* XXX: Broadcom Linux driver. */
if (sc->bge_flags & BGE_FLAG_PCIE) {
if (sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
(sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) {
if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
CSR_WRITE_4(sc, 0x7E2C, 0x20);
}
if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
/* Prevent PCIE link training during global reset */
CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
reset |= 1 << 29;
}
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
val = CSR_READ_4(sc, BGE_VCPU_STATUS);
CSR_WRITE_4(sc, BGE_VCPU_STATUS,
val | BGE_VCPU_STATUS_DRV_RESET);
val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
}
/*
* Set GPHY Power Down Override to leave GPHY
* powered up in D0 uninitialized.
*/
if (BGE_IS_5705_PLUS(sc) &&
(sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
/* Issue global reset */
write_op(sc, BGE_MISC_CFG, reset);
if (sc->bge_flags & BGE_FLAG_PCIE)
DELAY(100 * 1000);
else
DELAY(1000);
/* XXX: Broadcom Linux driver. */
if (sc->bge_flags & BGE_FLAG_PCIE) {
if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
DELAY(500000); /* wait for link training to complete */
val = pci_read_config(dev, 0xC4, 4);
pci_write_config(dev, 0xC4, val | (1 << 15), 4);
}
devctl = pci_read_config(dev,
sc->bge_expcap + PCIER_DEVICE_CTL, 2);
/* Clear enable no snoop and disable relaxed ordering. */
devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE |
PCIEM_CTL_NOSNOOP_ENABLE);
pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL,
devctl, 2);
pci_set_max_read_req(dev, sc->bge_expmrq);
/* Clear error status. */
pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA,
PCIEM_STA_CORRECTABLE_ERROR |
PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
PCIEM_STA_UNSUPPORTED_REQ, 2);
}
/* Reset some of the PCI state that got zapped by reset. */
pci_write_config(dev, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
(sc->bge_flags & BGE_FLAG_PCIX) != 0)
val |= BGE_PCISTATE_RETRY_SAME_DMA;
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
pci_write_config(dev, BGE_PCI_CMD, command, 4);
/*
* Disable PCI-X relaxed ordering to ensure status block update
* comes first then packet buffer DMA. Otherwise driver may
* read stale status block.
*/
if (sc->bge_flags & BGE_FLAG_PCIX) {
devctl = pci_read_config(dev,
sc->bge_pcixcap + PCIXR_COMMAND, 2);
devctl &= ~PCIXM_COMMAND_ERO;
if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
devctl &= ~PCIXM_COMMAND_MAX_READ;
devctl |= PCIXM_COMMAND_MAX_READ_2048;
} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
PCIXM_COMMAND_MAX_READ);
devctl |= PCIXM_COMMAND_MAX_READ_2048;
}
pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
devctl, 2);
}
/* Re-enable MSI, if necessary, and enable the memory arbiter. */
if (BGE_IS_5714_FAMILY(sc)) {
/* This chip disables MSI on reset. */
if (sc->bge_flags & BGE_FLAG_MSI) {
val = pci_read_config(dev,
sc->bge_msicap + PCIR_MSI_CTRL, 2);
pci_write_config(dev,
sc->bge_msicap + PCIR_MSI_CTRL,
val | PCIM_MSICTRL_MSI_ENABLE, 2);
val = CSR_READ_4(sc, BGE_MSI_MODE);
CSR_WRITE_4(sc, BGE_MSI_MODE,
val | BGE_MSIMODE_ENABLE);
}
val = CSR_READ_4(sc, BGE_MARB_MODE);
CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
} else
CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
/* Fix up byte swapping. */
CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
val = CSR_READ_4(sc, BGE_MAC_MODE);
val = (val & ~mac_mode_mask) | mac_mode;
CSR_WRITE_4(sc, BGE_MAC_MODE, val);
DELAY(40);
bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
for (i = 0; i < BGE_TIMEOUT; i++) {
val = CSR_READ_4(sc, BGE_VCPU_STATUS);
if (val & BGE_VCPU_STATUS_INIT_DONE)
break;
DELAY(100);
}
if (i == BGE_TIMEOUT) {
device_printf(dev, "reset timed out\n");
return (1);
}
} else {
/*
* Poll until we see the 1's complement of the magic number.
* This indicates that the firmware initialization is complete.
* We expect this to fail if no chip containing the Ethernet
* address is fitted though.
*/
for (i = 0; i < BGE_TIMEOUT; i++) {
DELAY(10);
val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
if (val == ~BGE_SRAM_FW_MB_MAGIC)
break;
}
if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
device_printf(dev,
"firmware handshake timed out, found 0x%08x\n",
val);
/* BCM57765 A0 needs additional time before accessing. */
if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
DELAY(10 * 1000); /* XXX */
}
/*
* The 5704 in TBI mode apparently needs some special
* adjustment to insure the SERDES drive level is set
* to 1.2V.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
sc->bge_flags & BGE_FLAG_TBI) {
val = CSR_READ_4(sc, BGE_SERDES_CFG);
val = (val & ~0xFFF) | 0x880;
CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
}
/* XXX: Broadcom Linux driver. */
if (sc->bge_flags & BGE_FLAG_PCIE &&
!BGE_IS_5717_PLUS(sc) &&
sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
sc->bge_asicrev != BGE_ASICREV_BCM5785) {
/* Enable Data FIFO protection. */
val = CSR_READ_4(sc, 0x7C00);
CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
return (0);
}
static __inline void
bge_rxreuse_std(struct bge_softc *sc, int i)
{
struct bge_rx_bd *r;
r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
r->bge_flags = BGE_RXBDFLAG_END;
r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
r->bge_idx = i;
BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
}
static __inline void
bge_rxreuse_jumbo(struct bge_softc *sc, int i)
{
struct bge_extrx_bd *r;
r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
r->bge_idx = i;
BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
}
/*
* Frame reception handling. This is called if there's a frame
* on the receive return list.
*
* Note: we have to be able to handle two possibilities here:
* 1) the frame is from the jumbo receive ring
* 2) the frame is from the standard receive ring
*/
static int
bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
{
if_t ifp;
int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
uint16_t rx_cons;
rx_cons = sc->bge_rx_saved_considx;
/* Nothing to do. */
if (rx_cons == rx_prod)
return (rx_npkts);
ifp = sc->bge_ifp;
bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
if (BGE_IS_JUMBO_CAPABLE(sc) &&
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
while (rx_cons != rx_prod) {
struct bge_rx_bd *cur_rx;
uint32_t rxidx;
struct mbuf *m = NULL;
uint16_t vlan_tag = 0;
int have_tag = 0;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
}
#endif
cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
rxidx = cur_rx->bge_idx;
BGE_INC(rx_cons, sc->bge_return_ring_cnt);
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING &&
cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
have_tag = 1;
vlan_tag = cur_rx->bge_vlan_tag;
}
if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
jumbocnt++;
m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
bge_rxreuse_jumbo(sc, rxidx);
continue;
}
if (bge_newbuf_jumbo(sc, rxidx) != 0) {
bge_rxreuse_jumbo(sc, rxidx);
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
} else {
stdcnt++;
m = sc->bge_cdata.bge_rx_std_chain[rxidx];
if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
bge_rxreuse_std(sc, rxidx);
continue;
}
if (bge_newbuf_std(sc, rxidx) != 0) {
bge_rxreuse_std(sc, rxidx);
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
#ifndef __NO_STRICT_ALIGNMENT
/*
* For architectures with strict alignment we must make sure
* the payload is aligned.
*/
if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
bcopy(m->m_data, m->m_data + ETHER_ALIGN,
cur_rx->bge_len);
m->m_data += ETHER_ALIGN;
}
#endif
m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
m->m_pkthdr.rcvif = ifp;
if (if_getcapenable(ifp) & IFCAP_RXCSUM)
bge_rxcsum(sc, cur_rx, m);
/*
* If we received a packet with a vlan tag,
* attach that information to the packet.
*/
if (have_tag) {
m->m_pkthdr.ether_vtag = vlan_tag;
m->m_flags |= M_VLANTAG;
}
if (holdlck != 0) {
BGE_UNLOCK(sc);
if_input(ifp, m);
BGE_LOCK(sc);
} else
if_input(ifp, m);
rx_npkts++;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return (rx_npkts);
}
bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
if (stdcnt > 0)
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
if (jumbocnt > 0)
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
sc->bge_rx_saved_considx = rx_cons;
bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
if (stdcnt)
bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
if (jumbocnt)
bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
#ifdef notyet
/*
* This register wraps very quickly under heavy packet drops.
* If you need correct statistics, you can enable this check.
*/
if (BGE_IS_5705_PLUS(sc))
if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS));
#endif
return (rx_npkts);
}
static void
bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
{
if (BGE_IS_5717_PLUS(sc)) {
if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((cur_rx->bge_error_flag &
BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
}
if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
m->m_pkthdr.csum_data =
cur_rx->bge_tcp_udp_csum;
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
}
}
} else {
if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
}
if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
m->m_pkthdr.csum_data =
cur_rx->bge_tcp_udp_csum;
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
}
}
}
static void
bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
{
struct bge_tx_bd *cur_tx;
if_t ifp;
BGE_LOCK_ASSERT(sc);
/* Nothing to do. */
if (sc->bge_tx_saved_considx == tx_cons)
return;
ifp = sc->bge_ifp;
bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
/*
* Go through our tx ring and free mbufs for those
* frames that have been sent.
*/
while (sc->bge_tx_saved_considx != tx_cons) {
uint32_t idx;
idx = sc->bge_tx_saved_considx;
cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[idx],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[idx]);
m_freem(sc->bge_cdata.bge_tx_chain[idx]);
sc->bge_cdata.bge_tx_chain[idx] = NULL;
}
sc->bge_txcnt--;
BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
}
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (sc->bge_txcnt == 0)
sc->bge_timer = 0;
}
#ifdef DEVICE_POLLING
static int
bge_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct bge_softc *sc = if_getsoftc(ifp);
uint16_t rx_prod, tx_cons;
uint32_t statusword;
int rx_npkts = 0;
BGE_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BGE_UNLOCK(sc);
return (rx_npkts);
}
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Fetch updates from the status block. */
rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
statusword = sc->bge_ldata.bge_status_block->bge_status;
/* Clear the status so the next pass only sees the changes. */
sc->bge_ldata.bge_status_block->bge_status = 0;
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
sc->bge_link_evt++;
if (cmd == POLL_AND_CHECK_STATUS)
if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
bge_link_upd(sc);
sc->rxcycles = count;
rx_npkts = bge_rxeof(sc, rx_prod, 1);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BGE_UNLOCK(sc);
return (rx_npkts);
}
bge_txeof(sc, tx_cons);
if (!if_sendq_empty(ifp))
bge_start_locked(ifp);
BGE_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static int
bge_msi_intr(void *arg)
{
struct bge_softc *sc;
sc = (struct bge_softc *)arg;
/*
* This interrupt is not shared and controller already
* disabled further interrupt.
*/
taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
return (FILTER_HANDLED);
}
static void
bge_intr_task(void *arg, int pending)
{
struct bge_softc *sc;
if_t ifp;
uint32_t status, status_tag;
uint16_t rx_prod, tx_cons;
sc = (struct bge_softc *)arg;
ifp = sc->bge_ifp;
BGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
BGE_UNLOCK(sc);
return;
}
/* Get updated status block. */
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Save producer/consumer indices. */
rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
status = sc->bge_ldata.bge_status_block->bge_status;
status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
/* Dirty the status flag. */
sc->bge_ldata.bge_status_block->bge_status = 0;
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
status_tag = 0;
if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
bge_link_upd(sc);
/* Let controller work. */
bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
sc->bge_rx_saved_considx != rx_prod) {
/* Check RX return ring producer/consumer. */
BGE_UNLOCK(sc);
bge_rxeof(sc, rx_prod, 0);
BGE_LOCK(sc);
}
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check TX ring producer/consumer. */
bge_txeof(sc, tx_cons);
if (!if_sendq_empty(ifp))
bge_start_locked(ifp);
}
BGE_UNLOCK(sc);
}
static void
bge_intr(void *xsc)
{
struct bge_softc *sc;
if_t ifp;
uint32_t statusword;
uint16_t rx_prod, tx_cons;
sc = xsc;
BGE_LOCK(sc);
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
BGE_UNLOCK(sc);
return;
}
#endif
/*
* Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
* disable interrupts by writing nonzero like we used to, since with
* our current organization this just gives complications and
* pessimizations for re-enabling interrupts. We used to have races
* instead of the necessary complications. Disabling interrupts
* would just reduce the chance of a status update while we are
* running (by switching to the interrupt-mode coalescence
* parameters), but this chance is already very low so it is more
* efficient to get another interrupt than prevent it.
*
* We do the ack first to ensure another interrupt if there is a
* status update after the ack. We don't check for the status
* changing later because it is more efficient to get another
* interrupt than prevent it, not quite as above (not checking is
* a smaller optimization than not toggling the interrupt enable,
* since checking doesn't involve PCI accesses and toggling require
* the status check). So toggling would probably be a pessimization
* even with MSI. It would only be needed for using a task queue.
*/
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
/*
* Do the mandatory PCI flush as well as get the link status.
*/
statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
/* Make sure the descriptor ring indexes are coherent. */
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
sc->bge_ldata.bge_status_block->bge_status = 0;
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
statusword || sc->bge_link_evt)
bge_link_upd(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check RX return ring producer/consumer. */
bge_rxeof(sc, rx_prod, 1);
}
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check TX ring producer/consumer. */
bge_txeof(sc, tx_cons);
}
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
!if_sendq_empty(ifp))
bge_start_locked(ifp);
BGE_UNLOCK(sc);
}
static void
bge_asf_driver_up(struct bge_softc *sc)
{
if (sc->bge_asf_mode & ASF_STACKUP) {
/* Send ASF heartbeat aprox. every 2s */
if (sc->bge_asf_count)
sc->bge_asf_count --;
else {
sc->bge_asf_count = 2;
bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
BGE_FW_CMD_DRV_ALIVE);
bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
BGE_FW_HB_TIMEOUT_SEC);
CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
BGE_RX_CPU_DRV_EVENT);
}
}
}
static void
bge_tick(void *xsc)
{
struct bge_softc *sc = xsc;
struct mii_data *mii = NULL;
BGE_LOCK_ASSERT(sc);
/* Synchronize with possible callout reset/stop. */
if (callout_pending(&sc->bge_stat_ch) ||
!callout_active(&sc->bge_stat_ch))
return;
if (BGE_IS_5705_PLUS(sc))
bge_stats_update_regs(sc);
else
bge_stats_update(sc);
/* XXX Add APE heartbeat check here? */
if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
mii = device_get_softc(sc->bge_miibus);
/*
* Do not touch PHY if we have link up. This could break
* IPMI/ASF mode or produce extra input errors
* (extra errors was reported for bcm5701 & bcm5704).
*/
if (!sc->bge_link)
mii_tick(mii);
} else {
/*
* Since in TBI mode auto-polling can't be used we should poll
* link status manually. Here we register pending link event
* and trigger interrupt.
*/
#ifdef DEVICE_POLLING
/* In polling mode we poll link state in bge_poll(). */
if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING))
#endif
{
sc->bge_link_evt++;
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
sc->bge_flags & BGE_FLAG_5788)
BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
else
BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
}
}
bge_asf_driver_up(sc);
bge_watchdog(sc);
callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
}
static void
bge_stats_update_regs(struct bge_softc *sc)
{
struct bge_mac_stats *stats;
uint32_t val;
stats = &sc->bge_mac_stats;
stats->ifHCOutOctets +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
stats->etherStatsCollisions +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
stats->outXonSent +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
stats->outXoffSent +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
stats->dot3StatsInternalMacTransmitErrors +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
stats->dot3StatsSingleCollisionFrames +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
stats->dot3StatsMultipleCollisionFrames +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
stats->dot3StatsDeferredTransmissions +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
stats->dot3StatsExcessiveCollisions +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
stats->dot3StatsLateCollisions +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
stats->ifHCOutUcastPkts +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
stats->ifHCOutMulticastPkts +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
stats->ifHCOutBroadcastPkts +=
CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
stats->ifHCInOctets +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
stats->etherStatsFragments +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
stats->ifHCInUcastPkts +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
stats->ifHCInMulticastPkts +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
stats->ifHCInBroadcastPkts +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
stats->dot3StatsFCSErrors +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
stats->dot3StatsAlignmentErrors +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
stats->xonPauseFramesReceived +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
stats->xoffPauseFramesReceived +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
stats->macControlFramesReceived +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
stats->xoffStateEntered +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
stats->dot3StatsFramesTooLong +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
stats->etherStatsJabbers +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
stats->etherStatsUndersizePkts +=
CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
stats->FramesDroppedDueToFilters +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
stats->DmaWriteQueueFull +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
stats->DmaWriteHighPriQueueFull +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
stats->NoMoreRxBDs +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
/*
* XXX
* Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
* counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
* includes number of unwanted multicast frames. This comes
* from silicon bug and known workaround to get rough(not
* exact) counter is to enable interrupt on MBUF low water
* attention. This can be accomplished by setting
* BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
* BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
* BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
* However that change would generate more interrupts and
* there are still possibilities of losing multiple frames
* during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
* Given that the workaround still would not get correct
* counter I don't think it's worth to implement it. So
* ignore reading the counter on controllers that have the
* silicon bug.
*/
if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
stats->InputDiscards +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
stats->InputErrors +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
stats->RecvThresholdHit +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
/*
* If controller transmitted more than BGE_NUM_RDMA_CHANNELS
* frames, it's safe to disable workaround for DMA engine's
* miscalculation of TXMBUF space.
*/
if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts +
stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) {
val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
else
val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
sc->bge_flags &= ~BGE_FLAG_RDMA_BUG;
}
}
}
static void
bge_stats_clear_regs(struct bge_softc *sc)
{
CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
}
static void
bge_stats_update(struct bge_softc *sc)
{
if_t ifp;
bus_size_t stats;
uint32_t cnt; /* current register value */
ifp = sc->bge_ifp;
stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
#define READ_STAT(sc, stats, stat) \
CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions);
sc->bge_tx_collisions = cnt;
cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds);
sc->bge_rx_nobds = cnt;
cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs);
sc->bge_rx_inerrs = cnt;
cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards);
sc->bge_rx_discards = cnt;
cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards);
sc->bge_tx_discards = cnt;
#undef READ_STAT
}
/*
* Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
* The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
* but when such padded frames employ the bge IP/TCP checksum offload,
* the hardware checksum assist gives incorrect results (possibly
* from incorporating its own padding into the UDP/TCP checksum; who knows).
* If we pad such runts with zeros, the onboard checksum comes out correct.
*/
static __inline int
bge_cksum_pad(struct mbuf *m)
{
int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
struct mbuf *last;
/* If there's only the packet-header and we can pad there, use it. */
if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
M_TRAILINGSPACE(m) >= padlen) {
last = m;
} else {
/*
* Walk packet chain to find last mbuf. We will either
* pad there, or append a new mbuf and pad it.
*/
for (last = m; last->m_next != NULL; last = last->m_next);
if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
/* Allocate new empty mbuf, pad it. Compact later. */
struct mbuf *n;
MGET(n, M_NOWAIT, MT_DATA);
if (n == NULL)
return (ENOBUFS);
n->m_len = 0;
last->m_next = n;
last = n;
}
}
/* Now zero the pad area, to avoid the bge cksum-assist bug. */
memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
last->m_len += padlen;
m->m_pkthdr.len += padlen;
return (0);
}
static struct mbuf *
bge_check_short_dma(struct mbuf *m)
{
struct mbuf *n;
int found;
/*
* If device receive two back-to-back send BDs with less than
* or equal to 8 total bytes then the device may hang. The two
* back-to-back send BDs must in the same frame for this failure
* to occur. Scan mbuf chains and see whether two back-to-back
* send BDs are there. If this is the case, allocate new mbuf
* and copy the frame to workaround the silicon bug.
*/
for (n = m, found = 0; n != NULL; n = n->m_next) {
if (n->m_len < 8) {
found++;
if (found > 1)
break;
continue;
}
found = 0;
}
if (found > 1) {
n = m_defrag(m, M_NOWAIT);
if (n == NULL)
m_freem(m);
} else
n = m;
return (n);
}
static struct mbuf *
bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
uint16_t *flags)
{
struct ip *ip;
struct tcphdr *tcp;
struct mbuf *n;
uint16_t hlen;
uint32_t poff;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
n = m_dup(m, M_NOWAIT);
m_freem(m);
if (n == NULL)
return (NULL);
m = n;
}
m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
if (m == NULL)
return (NULL);
ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
m = m_pullup(m, poff + sizeof(struct tcphdr));
if (m == NULL)
return (NULL);
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
m = m_pullup(m, poff + (tcp->th_off << 2));
if (m == NULL)
return (NULL);
/*
* It seems controller doesn't modify IP length and TCP pseudo
* checksum. These checksum computed by upper stack should be 0.
*/
*mss = m->m_pkthdr.tso_segsz;
ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
ip->ip_sum = 0;
ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
/* Clear pseudo checksum computed by TCP stack. */
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
tcp->th_sum = 0;
/*
* Broadcom controllers uses different descriptor format for
* TSO depending on ASIC revision. Due to TSO-capable firmware
* license issue and lower performance of firmware based TSO
* we only support hardware based TSO.
*/
/* Calculate header length, incl. TCP/IP options, in 32 bit units. */
hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
if (sc->bge_flags & BGE_FLAG_TSO3) {
/*
* For BCM5717 and newer controllers, hardware based TSO
* uses the 14 lower bits of the bge_mss field to store the
* MSS and the upper 2 bits to store the lowest 2 bits of
* the IP/TCP header length. The upper 6 bits of the header
* length are stored in the bge_flags[14:10,4] field. Jumbo
* frames are supported.
*/
*mss |= ((hlen & 0x3) << 14);
*flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
} else {
/*
* For BCM5755 and newer controllers, hardware based TSO uses
* the lower 11 bits to store the MSS and the upper 5 bits to
* store the IP/TCP header length. Jumbo frames are not
* supported.
*/
*mss |= (hlen << 11);
}
return (m);
}
/*
* Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
* pointers to descriptors.
*/
static int
bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
{
bus_dma_segment_t segs[BGE_NSEG_NEW];
bus_dmamap_t map;
struct bge_tx_bd *d;
struct mbuf *m = *m_head;
uint32_t idx = *txidx;
uint16_t csum_flags, mss, vlan_tag;
int nsegs, i, error;
csum_flags = 0;
mss = 0;
vlan_tag = 0;
if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
m->m_next != NULL) {
*m_head = bge_check_short_dma(m);
if (*m_head == NULL)
return (ENOBUFS);
m = *m_head;
}
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
*m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
if (*m_head == NULL)
return (ENOBUFS);
csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
BGE_TXBDFLAG_CPU_POST_DMA;
} else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
if (m->m_pkthdr.csum_flags & CSUM_IP)
csum_flags |= BGE_TXBDFLAG_IP_CSUM;
if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
(error = bge_cksum_pad(m)) != 0) {
m_freem(m);
*m_head = NULL;
return (error);
}
}
}
if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
m->m_pkthdr.len > ETHER_MAX_LEN)
csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
if (sc->bge_forced_collapse > 0 &&
(sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
/*
* Forcedly collapse mbuf chains to overcome hardware
* limitation which only support a single outstanding
* DMA read operation.
*/
if (sc->bge_forced_collapse == 1)
m = m_defrag(m, M_NOWAIT);
else
m = m_collapse(m, M_NOWAIT,
sc->bge_forced_collapse);
if (m == NULL)
m = *m_head;
*m_head = m;
}
}
map = sc->bge_cdata.bge_tx_dmamap[idx];
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
/* Check if we have enough free send BDs. */
if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
return (ENOBUFS);
}
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
if (m->m_flags & M_VLANTAG) {
csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
vlan_tag = m->m_pkthdr.ether_vtag;
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5762 &&
(m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
/*
* 5725 family of devices corrupts TSO packets when TSO DMA
* buffers cross into regions which are within MSS bytes of
* a 4GB boundary. If we encounter the condition, drop the
* packet.
*/
for (i = 0; ; i++) {
d = &sc->bge_ldata.bge_tx_ring[idx];
d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
d->bge_len = segs[i].ds_len;
if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss <
d->bge_addr.bge_addr_lo)
break;
d->bge_flags = csum_flags;
d->bge_vlan_tag = vlan_tag;
d->bge_mss = mss;
if (i == nsegs - 1)
break;
BGE_INC(idx, BGE_TX_RING_CNT);
}
if (i != nsegs - 1) {
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
} else {
for (i = 0; ; i++) {
d = &sc->bge_ldata.bge_tx_ring[idx];
d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
d->bge_len = segs[i].ds_len;
d->bge_flags = csum_flags;
d->bge_vlan_tag = vlan_tag;
d->bge_mss = mss;
if (i == nsegs - 1)
break;
BGE_INC(idx, BGE_TX_RING_CNT);
}
}
/* Mark the last segment as end of packet... */
d->bge_flags |= BGE_TXBDFLAG_END;
/*
* Insure that the map for this transmission
* is placed at the array index of the last descriptor
* in this chain.
*/
sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
sc->bge_cdata.bge_tx_dmamap[idx] = map;
sc->bge_cdata.bge_tx_chain[idx] = m;
sc->bge_txcnt += nsegs;
BGE_INC(idx, BGE_TX_RING_CNT);
*txidx = idx;
return (0);
}
/*
* Main transmit routine. To avoid having to do mbuf copies, we put pointers
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
bge_start_locked(if_t ifp)
{
struct bge_softc *sc;
struct mbuf *m_head;
uint32_t prodidx;
int count;
sc = if_getsoftc(ifp);
BGE_LOCK_ASSERT(sc);
if (!sc->bge_link ||
(if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
prodidx = sc->bge_tx_prodidx;
for (count = 0; !if_sendq_empty(ifp);) {
if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (bge_encap(sc, &m_head, &prodidx)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
++count;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
bpf_mtap_if(ifp, m_head);
}
if (count > 0)
bge_start_tx(sc, prodidx);
}
static void
bge_start_tx(struct bge_softc *sc, uint32_t prodidx)
{
bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
/* Transmit. */
bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
/* 5700 b2 errata */
if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
sc->bge_tx_prodidx = prodidx;
/* Set a timeout in case the chip goes out to lunch. */
sc->bge_timer = BGE_TX_TIMEOUT;
}
/*
* Main transmit routine. To avoid having to do mbuf copies, we put pointers
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
bge_start(if_t ifp)
{
struct bge_softc *sc;
sc = if_getsoftc(ifp);
BGE_LOCK(sc);
bge_start_locked(ifp);
BGE_UNLOCK(sc);
}
static void
bge_init_locked(struct bge_softc *sc)
{
if_t ifp;
uint16_t *m;
uint32_t mode;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
/* Cancel pending I/O and flush buffers. */
bge_stop(sc);
bge_stop_fw(sc);
bge_sig_pre_reset(sc, BGE_RESET_START);
bge_reset(sc);
bge_sig_legacy(sc, BGE_RESET_START);
bge_sig_post_reset(sc, BGE_RESET_START);
bge_chipinit(sc);
/*
* Init the various state machines, ring
* control blocks and firmware.
*/
if (bge_blockinit(sc)) {
device_printf(sc->bge_dev, "initialization failure\n");
return;
}
ifp = sc->bge_ifp;
/* Specify MTU. */
CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN +
(if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
/* Load our MAC address. */
m = (uint16_t *)if_getlladdr(sc->bge_ifp);
CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
/* Program promiscuous mode. */
bge_setpromisc(sc);
/* Program multicast filter. */
bge_setmulti(sc);
/* Program VLAN tag stripping. */
bge_setvlan(sc);
/* Override UDP checksum offloading. */
if (sc->bge_forced_udpcsum == 0)
sc->bge_csum_features &= ~CSUM_UDP;
else
sc->bge_csum_features |= CSUM_UDP;
if (if_getcapabilities(ifp) & IFCAP_TXCSUM &&
if_getcapenable(ifp) & IFCAP_TXCSUM) {
if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP));
if_sethwassistbits(ifp, sc->bge_csum_features, 0);
}
/* Init RX ring. */
if (bge_init_rx_ring_std(sc) != 0) {
device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
bge_stop(sc);
return;
}
/*
* Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
* memory to insure that the chip has in fact read the first
* entry of the ring.
*/
if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
uint32_t v, i;
for (i = 0; i < 10; i++) {
DELAY(20);
v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
if (v == (MCLBYTES - ETHER_ALIGN))
break;
}
if (i == 10)
device_printf (sc->bge_dev,
"5705 A0 chip failed to load RX ring\n");
}
/* Init jumbo RX ring. */
if (BGE_IS_JUMBO_CAPABLE(sc) &&
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) {
if (bge_init_rx_ring_jumbo(sc) != 0) {
device_printf(sc->bge_dev,
"no memory for jumbo Rx buffers.\n");
bge_stop(sc);
return;
}
}
/* Init our RX return ring index. */
sc->bge_rx_saved_considx = 0;
/* Init our RX/TX stat counters. */
sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
/* Init TX ring. */
bge_init_tx_ring(sc);
/* Enable TX MAC state machine lockup fix. */
mode = CSR_READ_4(sc, BGE_TX_MODE);
if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
sc->bge_asicrev == BGE_ASICREV_BCM5762) {
mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
mode |= CSR_READ_4(sc, BGE_TX_MODE) &
(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
}
/* Turn on transmitter. */
CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
DELAY(100);
/* Turn on receiver. */
mode = CSR_READ_4(sc, BGE_RX_MODE);
if (BGE_IS_5755_PLUS(sc))
mode |= BGE_RXMODE_IPV6_ENABLE;
if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
mode |= BGE_RXMODE_IPV4_FRAG_FIX;
CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
DELAY(10);
/*
* Set the number of good frames to receive after RX MBUF
* Low Watermark has been reached. After the RX MAC receives
* this number of frames, it will drop subsequent incoming
* frames until the MBUF High Watermark is reached.
*/
if (BGE_IS_57765_PLUS(sc))
CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
else
CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
/* Clear MAC statistics. */
if (BGE_IS_5705_PLUS(sc))
bge_stats_clear_regs(sc);
/* Tell firmware we're alive. */
BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
if (if_getcapenable(ifp) & IFCAP_POLLING) {
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
} else
#endif
/* Enable host interrupts. */
{
BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
}
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
bge_ifmedia_upd_locked(ifp);
callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
}
static void
bge_init(void *xsc)
{
struct bge_softc *sc = xsc;
BGE_LOCK(sc);
bge_init_locked(sc);
BGE_UNLOCK(sc);
}
/*
* Set media options.
*/
static int
bge_ifmedia_upd(if_t ifp)
{
struct bge_softc *sc = if_getsoftc(ifp);
int res;
BGE_LOCK(sc);
res = bge_ifmedia_upd_locked(ifp);
BGE_UNLOCK(sc);
return (res);
}
static int
bge_ifmedia_upd_locked(if_t ifp)
{
struct bge_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
struct mii_softc *miisc;
struct ifmedia *ifm;
BGE_LOCK_ASSERT(sc);
ifm = &sc->bge_ifmedia;
/* If this is a 1000baseX NIC, enable the TBI port. */
if (sc->bge_flags & BGE_FLAG_TBI) {
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
switch(IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
/*
* The BCM5704 ASIC appears to have a special
* mechanism for programming the autoneg
* advertisement registers in TBI mode.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
uint32_t sgdig;
sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
if (sgdig & BGE_SGDIGSTS_DONE) {
CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
sgdig |= BGE_SGDIGCFG_AUTO |
BGE_SGDIGCFG_PAUSE_CAP |
BGE_SGDIGCFG_ASYM_PAUSE;
CSR_WRITE_4(sc, BGE_SGDIG_CFG,
sgdig | BGE_SGDIGCFG_SEND);
DELAY(5);
CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
}
}
break;
case IFM_1000_SX:
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
BGE_CLRBIT(sc, BGE_MAC_MODE,
BGE_MACMODE_HALF_DUPLEX);
} else {
BGE_SETBIT(sc, BGE_MAC_MODE,
BGE_MACMODE_HALF_DUPLEX);
}
DELAY(40);
break;
default:
return (EINVAL);
}
return (0);
}
sc->bge_link_evt++;
mii = device_get_softc(sc->bge_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
mii_mediachg(mii);
/*
* Force an interrupt so that we will call bge_link_upd
* if needed and clear any pending link state attention.
* Without this we are not getting any further interrupts
* for link state changes and thus will not UP the link and
* not be able to send in bge_start_locked. The only
* way to get things working was to receive a packet and
* get an RX intr.
* bge_tick should help for fiber cards and we might not
* need to do this here if BGE_FLAG_TBI is set but as
* we poll for fiber anyway it should not harm.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
sc->bge_flags & BGE_FLAG_5788)
BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
else
BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
return (0);
}
/*
* Report current media status.
*/
static void
bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct bge_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
BGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
BGE_UNLOCK(sc);
return;
}
if (sc->bge_flags & BGE_FLAG_TBI) {
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (CSR_READ_4(sc, BGE_MAC_STS) &
BGE_MACSTAT_TBI_PCS_SYNCHED)
ifmr->ifm_status |= IFM_ACTIVE;
else {
ifmr->ifm_active |= IFM_NONE;
BGE_UNLOCK(sc);
return;
}
ifmr->ifm_active |= IFM_1000_SX;
if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
ifmr->ifm_active |= IFM_HDX;
else
ifmr->ifm_active |= IFM_FDX;
BGE_UNLOCK(sc);
return;
}
mii = device_get_softc(sc->bge_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
BGE_UNLOCK(sc);
}
static int
bge_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct bge_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int flags, mask, error = 0;
switch (command) {
case SIOCSIFMTU:
if (BGE_IS_JUMBO_CAPABLE(sc) ||
(sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
if (ifr->ifr_mtu < ETHERMIN ||
ifr->ifr_mtu > BGE_JUMBO_MTU) {
error = EINVAL;
break;
}
} else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
error = EINVAL;
break;
}
BGE_LOCK(sc);
if (if_getmtu(ifp) != ifr->ifr_mtu) {
if_setmtu(ifp, ifr->ifr_mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init_locked(sc);
}
}
BGE_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
BGE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
/*
* If only the state of the PROMISC flag changed,
* then just use the 'set promisc mode' command
* instead of reinitializing the entire NIC. Doing
* a full re-init means reloading the firmware and
* waiting for it to start up, which may take a
* second or two. Similarly for ALLMULTI.
*/
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
flags = if_getflags(ifp) ^ sc->bge_if_flags;
if (flags & IFF_PROMISC)
bge_setpromisc(sc);
if (flags & IFF_ALLMULTI)
bge_setmulti(sc);
} else
bge_init_locked(sc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
bge_stop(sc);
}
}
sc->bge_if_flags = if_getflags(ifp);
BGE_UNLOCK(sc);
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
BGE_LOCK(sc);
bge_setmulti(sc);
BGE_UNLOCK(sc);
error = 0;
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
if (sc->bge_flags & BGE_FLAG_TBI) {
error = ifmedia_ioctl(ifp, ifr,
&sc->bge_ifmedia, command);
} else {
mii = device_get_softc(sc->bge_miibus);
error = ifmedia_ioctl(ifp, ifr,
&mii->mii_media, command);
}
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(bge_poll, ifp);
if (error)
return (error);
BGE_LOCK(sc);
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
BGE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupt even in error case */
BGE_LOCK(sc);
BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
BGE_UNLOCK(sc);
}
}
#endif
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp,
sc->bge_csum_features, 0);
else
if_sethwassistbits(ifp, 0,
sc->bge_csum_features);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if (mask & IFCAP_VLAN_MTU) {
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init(sc);
}
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
BGE_LOCK(sc);
bge_setvlan(sc);
BGE_UNLOCK(sc);
}
#ifdef VLAN_CAPABILITIES
if_vlancap(ifp);
#endif
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
bge_watchdog(struct bge_softc *sc)
{
if_t ifp;
uint32_t status;
BGE_LOCK_ASSERT(sc);
if (sc->bge_timer == 0 || --sc->bge_timer)
return;
/* If pause frames are active then don't reset the hardware. */
if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
status = CSR_READ_4(sc, BGE_RX_STS);
if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
/*
* If link partner has us in XOFF state then wait for
* the condition to clear.
*/
CSR_WRITE_4(sc, BGE_RX_STS, status);
sc->bge_timer = BGE_TX_TIMEOUT;
return;
} else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
(status & BGE_RXSTAT_RCVD_XON) != 0) {
/*
* If link partner has us in XOFF state then wait for
* the condition to clear.
*/
CSR_WRITE_4(sc, BGE_RX_STS, status);
sc->bge_timer = BGE_TX_TIMEOUT;
return;
}
/*
* Any other condition is unexpected and the controller
* should be reset.
*/
}
ifp = sc->bge_ifp;
if_printf(ifp, "watchdog timeout -- resetting\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init_locked(sc);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
static void
bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
{
int i;
BGE_CLRBIT(sc, reg, bit);
for (i = 0; i < BGE_TIMEOUT; i++) {
if ((CSR_READ_4(sc, reg) & bit) == 0)
return;
DELAY(100);
}
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
bge_stop(struct bge_softc *sc)
{
if_t ifp;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
callout_stop(&sc->bge_stat_ch);
/* Disable host interrupts. */
BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
/*
* Tell firmware we're shutting down.
*/
bge_stop_fw(sc);
bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
/*
* Disable all of the receiver blocks.
*/
bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
if (BGE_IS_5700_FAMILY(sc))
bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
/*
* Disable all of the transmit blocks.
*/
bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
if (BGE_IS_5700_FAMILY(sc))
bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
/*
* Shut down all of the memory managers and related
* state machines.
*/
bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
if (BGE_IS_5700_FAMILY(sc))
bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
if (!(BGE_IS_5705_PLUS(sc))) {
BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
}
/* Update MAC statistics. */
if (BGE_IS_5705_PLUS(sc))
bge_stats_update_regs(sc);
bge_reset(sc);
bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
/*
* Keep the ASF firmware running if up.
*/
if (sc->bge_asf_mode & ASF_STACKUP)
BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
else
BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
/* Free the RX lists. */
bge_free_rx_ring_std(sc);
/* Free jumbo RX list. */
if (BGE_IS_JUMBO_CAPABLE(sc))
bge_free_rx_ring_jumbo(sc);
/* Free TX buffers. */
bge_free_tx_ring(sc);
sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
/* Clear MAC's link state (PHY may still have link UP). */
if (bootverbose && sc->bge_link)
if_printf(sc->bge_ifp, "link DOWN\n");
sc->bge_link = 0;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
bge_shutdown(device_t dev)
{
struct bge_softc *sc;
sc = device_get_softc(dev);
BGE_LOCK(sc);
bge_stop(sc);
BGE_UNLOCK(sc);
return (0);
}
static int
bge_suspend(device_t dev)
{
struct bge_softc *sc;
sc = device_get_softc(dev);
BGE_LOCK(sc);
bge_stop(sc);
BGE_UNLOCK(sc);
return (0);
}
static int
bge_resume(device_t dev)
{
struct bge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
BGE_LOCK(sc);
ifp = sc->bge_ifp;
if (if_getflags(ifp) & IFF_UP) {
bge_init_locked(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
bge_start_locked(ifp);
}
BGE_UNLOCK(sc);
return (0);
}
static void
bge_link_upd(struct bge_softc *sc)
{
struct mii_data *mii;
uint32_t link, status;
BGE_LOCK_ASSERT(sc);
/* Clear 'pending link event' flag. */
sc->bge_link_evt = 0;
/*
* Process link state changes.
* Grrr. The link status word in the status block does
* not work correctly on the BCM5700 rev AX and BX chips,
* according to all available information. Hence, we have
* to enable MII interrupts in order to properly obtain
* async link changes. Unfortunately, this also means that
* we have to read the MAC status register to detect link
* changes, thereby adding an additional register access to
* the interrupt handler.
*
* XXX: perhaps link state detection procedure used for
* BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
status = CSR_READ_4(sc, BGE_MAC_STS);
if (status & BGE_MACSTAT_MI_INTERRUPT) {
mii = device_get_softc(sc->bge_miibus);
mii_pollstat(mii);
if (!sc->bge_link &&
mii->mii_media_status & IFM_ACTIVE &&
IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
sc->bge_link++;
if (bootverbose)
if_printf(sc->bge_ifp, "link UP\n");
} else if (sc->bge_link &&
(!(mii->mii_media_status & IFM_ACTIVE) ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
sc->bge_link = 0;
if (bootverbose)
if_printf(sc->bge_ifp, "link DOWN\n");
}
/* Clear the interrupt. */
CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
BGE_EVTENB_MI_INTERRUPT);
bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
BRGPHY_MII_ISR);
bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
BRGPHY_MII_IMR, BRGPHY_INTRS);
}
return;
}
if (sc->bge_flags & BGE_FLAG_TBI) {
status = CSR_READ_4(sc, BGE_MAC_STS);
if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
if (!sc->bge_link) {
sc->bge_link++;
if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
BGE_CLRBIT(sc, BGE_MAC_MODE,
BGE_MACMODE_TBI_SEND_CFGS);
DELAY(40);
}
CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
if (bootverbose)
if_printf(sc->bge_ifp, "link UP\n");
if_link_state_change(sc->bge_ifp,
LINK_STATE_UP);
}
} else if (sc->bge_link) {
sc->bge_link = 0;
if (bootverbose)
if_printf(sc->bge_ifp, "link DOWN\n");
if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
}
} else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
/*
* Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
* in status word always set. Workaround this bug by reading
* PHY link status directly.
*/
link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
if (link != sc->bge_link ||
sc->bge_asicrev == BGE_ASICREV_BCM5700) {
mii = device_get_softc(sc->bge_miibus);
mii_pollstat(mii);
if (!sc->bge_link &&
mii->mii_media_status & IFM_ACTIVE &&
IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
sc->bge_link++;
if (bootverbose)
if_printf(sc->bge_ifp, "link UP\n");
} else if (sc->bge_link &&
(!(mii->mii_media_status & IFM_ACTIVE) ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
sc->bge_link = 0;
if (bootverbose)
if_printf(sc->bge_ifp, "link DOWN\n");
}
}
} else {
/*
* For controllers that call mii_tick, we have to poll
* link status.
*/
mii = device_get_softc(sc->bge_miibus);
mii_pollstat(mii);
bge_miibus_statchg(sc->bge_dev);
}
/* Disable MAC attention when link is up. */
CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
BGE_MACSTAT_LINK_CHANGED);
}
static void
bge_add_sysctls(struct bge_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
ctx = device_get_sysctl_ctx(sc->bge_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
#ifdef BGE_REGISTER_DEBUG
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
bge_sysctl_debug_info, "I", "Debug Information");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
bge_sysctl_reg_read, "I", "MAC Register Read");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
bge_sysctl_ape_read, "I", "APE Register Read");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
bge_sysctl_mem_read, "I", "Memory Read");
#endif
/*
* A common design characteristic for many Broadcom client controllers
* is that they only support a single outstanding DMA read operation
* on the PCIe bus. This means that it will take twice as long to fetch
* a TX frame that is split into header and payload buffers as it does
* to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
* these controllers, coalescing buffers to reduce the number of memory
* reads is effective way to get maximum performance(about 940Mbps).
* Without collapsing TX buffers the maximum TCP bulk transfer
* performance is about 850Mbps. However forcing coalescing mbufs
* consumes a lot of CPU cycles, so leave it off by default.
*/
sc->bge_forced_collapse = 0;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0,
"Number of fragmented TX buffers of a frame allowed before "
"forced collapsing");
sc->bge_msi = 1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI");
/*
* It seems all Broadcom controllers have a bug that can generate UDP
* datagrams with checksum value 0 when TX UDP checksum offloading is
* enabled. Generating UDP checksum value 0 is RFC 768 violation.
* Even though the probability of generating such UDP datagrams is
* low, I don't want to see FreeBSD boxes to inject such datagrams
* into network so disable UDP checksum offloading by default. Users
* still override this behavior by setting a sysctl variable,
* dev.bge.0.forced_udpcsum.
*/
sc->bge_forced_udpcsum = 0;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0,
"Enable UDP checksum offloading even if controller can "
"generate UDP checksum value 0");
if (BGE_IS_5705_PLUS(sc))
bge_add_sysctl_stats_regs(sc, ctx, children);
else
bge_add_sysctl_stats(sc, ctx, children);
}
#define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, \
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, \
offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", desc)
static void
bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *parent)
{
struct sysctl_oid *tree;
struct sysctl_oid_list *children, *schildren;
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics");
schildren = children = SYSCTL_CHILDREN(tree);
BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
children, COSFramesDroppedDueToFilters,
"FramesDroppedDueToFilters");
BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
children, nicNoMoreRxBDs, "NoMoreRxBDs");
BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
children, ifInDiscards, "InputDiscards");
BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
children, ifInErrors, "InputErrors");
BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
children, nicRecvThresholdHit, "RecvThresholdHit");
BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
children, nicDmaReadQueueFull, "DmaReadQueueFull");
BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
children, nicRingStatusUpdate, "RingStatusUpdate");
BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
children, nicInterrupts, "Interrupts");
BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
children, nicAvoidedInterrupts, "AvoidedInterrupts");
BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
children, nicSendThresholdHit, "SendThresholdHit");
tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics");
children = SYSCTL_CHILDREN(tree);
BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
children, rxstats.ifHCInOctets, "ifHCInOctets");
BGE_SYSCTL_STAT(sc, ctx, "Fragments",
children, rxstats.etherStatsFragments, "Fragments");
BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
children, rxstats.ifHCInUcastPkts, "UnicastPkts");
BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
children, rxstats.dot3StatsFCSErrors, "FCSErrors");
BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
children, rxstats.xoffPauseFramesReceived,
"xoffPauseFramesReceived");
BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
children, rxstats.macControlFramesReceived,
"ControlFramesReceived");
BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
children, rxstats.xoffStateEntered, "xoffStateEntered");
BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
children, rxstats.etherStatsJabbers, "Jabbers");
BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
children, rxstats.inRangeLengthError, "inRangeLengthError");
BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
children, rxstats.outRangeLengthError, "outRangeLengthError");
tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics");
children = SYSCTL_CHILDREN(tree);
BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
children, txstats.ifHCOutOctets, "ifHCOutOctets");
BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
children, txstats.etherStatsCollisions, "Collisions");
BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
children, txstats.outXonSent, "XonSent");
BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
children, txstats.outXoffSent, "XoffSent");
BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
children, txstats.flowControlDone, "flowControlDone");
BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
children, txstats.dot3StatsInternalMacTransmitErrors,
"InternalMacTransmitErrors");
BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
children, txstats.dot3StatsSingleCollisionFrames,
"SingleCollisionFrames");
BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
children, txstats.dot3StatsMultipleCollisionFrames,
"MultipleCollisionFrames");
BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
children, txstats.dot3StatsDeferredTransmissions,
"DeferredTransmissions");
BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
children, txstats.dot3StatsExcessiveCollisions,
"ExcessiveCollisions");
BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
children, txstats.dot3StatsLateCollisions,
"LateCollisions");
BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
children, txstats.ifHCOutUcastPkts, "UnicastPkts");
BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
children, txstats.dot3StatsCarrierSenseErrors,
"CarrierSenseErrors");
BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
children, txstats.ifOutDiscards, "Discards");
BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
children, txstats.ifOutErrors, "Errors");
}
#undef BGE_SYSCTL_STAT
#define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
static void
bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *parent)
{
struct sysctl_oid *tree;
struct sysctl_oid_list *child, *schild;
struct bge_mac_stats *stats;
stats = &sc->bge_mac_stats;
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics");
schild = child = SYSCTL_CHILDREN(tree);
BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
&stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
&stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
&stats->DmaWriteHighPriQueueFull,
"NIC DMA Write High Priority Queue Full");
BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
&stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
&stats->InputDiscards, "Discarded Input Frames");
BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
&stats->InputErrors, "Input Errors");
BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
&stats->RecvThresholdHit, "NIC Recv Threshold Hit");
tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics");
child = SYSCTL_CHILDREN(tree);
BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
&stats->ifHCInOctets, "Inbound Octets");
BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
&stats->etherStatsFragments, "Fragments");
BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
&stats->ifHCInUcastPkts, "Inbound Unicast Packets");
BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
&stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
&stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
&stats->dot3StatsFCSErrors, "FCS Errors");
BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
&stats->dot3StatsAlignmentErrors, "Alignment Errors");
BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
&stats->xonPauseFramesReceived, "XON Pause Frames Received");
BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
&stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
&stats->macControlFramesReceived, "MAC Control Frames Received");
BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
&stats->xoffStateEntered, "XOFF State Entered");
BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
&stats->dot3StatsFramesTooLong, "Frames Too Long");
BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
&stats->etherStatsJabbers, "Jabbers");
BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
&stats->etherStatsUndersizePkts, "Undersized Packets");
tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics");
child = SYSCTL_CHILDREN(tree);
BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
&stats->ifHCOutOctets, "Outbound Octets");
BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
&stats->etherStatsCollisions, "TX Collisions");
BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
&stats->outXonSent, "XON Sent");
BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
&stats->outXoffSent, "XOFF Sent");
BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
&stats->dot3StatsInternalMacTransmitErrors,
"Internal MAC TX Errors");
BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
&stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
&stats->dot3StatsMultipleCollisionFrames,
"Multiple Collision Frames");
BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
&stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
&stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
&stats->dot3StatsLateCollisions, "Late Collisions");
BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
&stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
&stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
&stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
}
#undef BGE_SYSCTL_STAT_ADD64
static int
bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
{
struct bge_softc *sc;
uint32_t result;
int offset;
sc = (struct bge_softc *)arg1;
offset = arg2;
result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
offsetof(bge_hostaddr, bge_addr_lo));
return (sysctl_handle_int(oidp, &result, 0, req));
}
#ifdef BGE_REGISTER_DEBUG
static int
bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
{
struct bge_softc *sc;
uint16_t *sbdata;
int error, result, sbsz;
int i, j;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
if (result == 1) {
sc = (struct bge_softc *)arg1;
if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
sbsz = BGE_STATUS_BLK_SZ;
else
sbsz = 32;
sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
printf("Status Block:\n");
BGE_LOCK(sc);
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
printf("%06x:", i);
for (j = 0; j < 8; j++)
printf(" %04x", sbdata[i++]);
printf("\n");
}
printf("Registers:\n");
for (i = 0x800; i < 0xA00; ) {
printf("%06x:", i);
for (j = 0; j < 8; j++) {
printf(" %08x", CSR_READ_4(sc, i));
i += 4;
}
printf("\n");
}
BGE_UNLOCK(sc);
printf("Hardware Flags:\n");
if (BGE_IS_5717_PLUS(sc))
printf(" - 5717 Plus\n");
if (BGE_IS_5755_PLUS(sc))
printf(" - 5755 Plus\n");
if (BGE_IS_575X_PLUS(sc))
printf(" - 575X Plus\n");
if (BGE_IS_5705_PLUS(sc))
printf(" - 5705 Plus\n");
if (BGE_IS_5714_FAMILY(sc))
printf(" - 5714 Family\n");
if (BGE_IS_5700_FAMILY(sc))
printf(" - 5700 Family\n");
if (sc->bge_flags & BGE_FLAG_JUMBO)
printf(" - Supports Jumbo Frames\n");
if (sc->bge_flags & BGE_FLAG_PCIX)
printf(" - PCI-X Bus\n");
if (sc->bge_flags & BGE_FLAG_PCIE)
printf(" - PCI Express Bus\n");
if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
printf(" - No 3 LEDs\n");
if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
printf(" - RX Alignment Bug\n");
}
return (error);
}
static int
bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
{
struct bge_softc *sc;
int error;
uint16_t result;
uint32_t val;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
if (result < 0x8000) {
sc = (struct bge_softc *)arg1;
val = CSR_READ_4(sc, result);
printf("reg 0x%06X = 0x%08X\n", result, val);
}
return (error);
}
static int
bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS)
{
struct bge_softc *sc;
int error;
uint16_t result;
uint32_t val;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
if (result < 0x8000) {
sc = (struct bge_softc *)arg1;
val = APE_READ_4(sc, result);
printf("reg 0x%06X = 0x%08X\n", result, val);
}
return (error);
}
static int
bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
{
struct bge_softc *sc;
int error;
uint16_t result;
uint32_t val;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || (req->newptr == NULL))
return (error);
if (result < 0x8000) {
sc = (struct bge_softc *)arg1;
val = bge_readmem_ind(sc, result);
printf("mem 0x%06X = 0x%08X\n", result, val);
}
return (error);
}
#endif
static int
bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
{
return (1);
}
static int
bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
{
uint32_t mac_addr;
mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
if ((mac_addr >> 16) == 0x484b) {
ether_addr[0] = (uint8_t)(mac_addr >> 8);
ether_addr[1] = (uint8_t)mac_addr;
mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
ether_addr[2] = (uint8_t)(mac_addr >> 24);
ether_addr[3] = (uint8_t)(mac_addr >> 16);
ether_addr[4] = (uint8_t)(mac_addr >> 8);
ether_addr[5] = (uint8_t)mac_addr;
return (0);
}
return (1);
}
static int
bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
{
int mac_offset = BGE_EE_MAC_OFFSET;
if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
mac_offset = BGE_EE_MAC_OFFSET_5906;
return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
ETHER_ADDR_LEN));
}
static int
bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
{
if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
return (1);
return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
ETHER_ADDR_LEN));
}
static int
bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
{
static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
/* NOTE: Order is critical */
bge_get_eaddr_fw,
bge_get_eaddr_mem,
bge_get_eaddr_nvram,
bge_get_eaddr_eeprom,
NULL
};
const bge_eaddr_fcn_t *func;
for (func = bge_eaddr_funcs; *func != NULL; ++func) {
if ((*func)(sc, eaddr) == 0)
break;
}
return (*func == NULL ? ENXIO : 0);
}
static uint64_t
bge_get_counter(if_t ifp, ift_counter cnt)
{
struct bge_softc *sc;
struct bge_mac_stats *stats;
sc = if_getsoftc(ifp);
if (!BGE_IS_5705_PLUS(sc))
return (if_get_counter_default(ifp, cnt));
stats = &sc->bge_mac_stats;
switch (cnt) {
case IFCOUNTER_IERRORS:
return (stats->NoMoreRxBDs + stats->InputDiscards +
stats->InputErrors);
case IFCOUNTER_COLLISIONS:
return (stats->etherStatsCollisions);
default:
return (if_get_counter_default(ifp, cnt));
}
}
#ifdef DEBUGNET
static void
bge_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
{
struct bge_softc *sc;
sc = if_getsoftc(ifp);
BGE_LOCK(sc);
/*
* There is only one logical receive ring, but it is backed
* by two actual rings, for cluster- and jumbo-sized mbufs.
* Debugnet expects only one size, so if jumbo is in use,
* this says we have two rings of jumbo mbufs, but that's
* only a little wasteful.
*/
*nrxr = 2;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
if ((sc->bge_flags & BGE_FLAG_JUMBO_STD) != 0 &&
(if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)))
*clsize = MJUM9BYTES;
else
*clsize = MCLBYTES;
BGE_UNLOCK(sc);
}
static void
bge_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
{
}
static int
bge_debugnet_transmit(if_t ifp, struct mbuf *m)
{
struct bge_softc *sc;
uint32_t prodidx;
int error;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (1);
prodidx = sc->bge_tx_prodidx;
error = bge_encap(sc, &m, &prodidx);
if (error == 0)
bge_start_tx(sc, prodidx);
return (error);
}
static int
bge_debugnet_poll(if_t ifp, int count)
{
struct bge_softc *sc;
uint32_t rx_prod, tx_cons;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (1);
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
(void)bge_rxeof(sc, rx_prod, 0);
bge_txeof(sc, tx_cons);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c
index 38a45de95083..26a87354e5b1 100644
--- a/sys/dev/bxe/bxe.c
+++ b/sys/dev/bxe/bxe.c
@@ -1,19463 +1,19453 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#define BXE_DRIVER_VERSION "1.78.91"
#include "bxe.h"
#include "ecore_sp.h"
#include "ecore_init.h"
#include "ecore_init_ops.h"
#include "57710_int_offsets.h"
#include "57711_int_offsets.h"
#include "57712_int_offsets.h"
/*
* CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
* explicitly here for older kernels that don't include this changeset.
*/
#ifndef CTLTYPE_U64
#define CTLTYPE_U64 CTLTYPE_QUAD
#define sysctl_handle_64 sysctl_handle_quad
#endif
/*
* CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
* here as zero(0) for older kernels that don't include this changeset
* thereby masking the functionality.
*/
#ifndef CSUM_TCP_IPV6
#define CSUM_TCP_IPV6 0
#define CSUM_UDP_IPV6 0
#endif
#define BXE_DEF_SB_ATT_IDX 0x0001
#define BXE_DEF_SB_IDX 0x0002
/*
* FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
* function HW initialization.
*/
#define FLR_WAIT_USEC 10000 /* 10 msecs */
#define FLR_WAIT_INTERVAL 50 /* usecs */
#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
struct pbf_pN_buf_regs {
int pN;
uint32_t init_crd;
uint32_t crd;
uint32_t crd_freed;
};
struct pbf_pN_cmd_regs {
int pN;
uint32_t lines_occup;
uint32_t lines_freed;
};
/*
* PCI Device ID Table used by bxe_probe().
*/
#define BXE_DEVDESC_MAX 64
static struct bxe_device_type bxe_devs[] = {
{
BRCM_VENDORID,
CHIP_NUM_57710,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57710 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57711,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57711 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57711E,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57711E 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57712,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57712 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57712_MF,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57712 MF 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57800,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57800 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57800_MF,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57800 MF 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57810,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57810 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57810_MF,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57810 MF 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57811,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57811 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57811_MF,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57811 MF 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57840_4_10,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57840 4x10GbE"
},
{
QLOGIC_VENDORID,
CHIP_NUM_57840_4_10,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57840 4x10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57840_2_20,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57840 2x20GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57840_MF,
PCI_ANY_ID, PCI_ANY_ID,
"QLogic NetXtreme II BCM57840 MF 10GbE"
},
{
0, 0, 0, 0, NULL
}
};
MALLOC_DECLARE(M_BXE_ILT);
MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
/*
* FreeBSD device entry points.
*/
static int bxe_probe(device_t);
static int bxe_attach(device_t);
static int bxe_detach(device_t);
static int bxe_shutdown(device_t);
/*
* FreeBSD KLD module/device interface event handler method.
*/
static device_method_t bxe_methods[] = {
/* Device interface (device_if.h) */
DEVMETHOD(device_probe, bxe_probe),
DEVMETHOD(device_attach, bxe_attach),
DEVMETHOD(device_detach, bxe_detach),
DEVMETHOD(device_shutdown, bxe_shutdown),
/* Bus interface (bus_if.h) */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
KOBJMETHOD_END
};
/*
* FreeBSD KLD Module data declaration
*/
static driver_t bxe_driver = {
"bxe", /* module name */
bxe_methods, /* event handler */
sizeof(struct bxe_softc) /* extra data */
};
MODULE_DEPEND(bxe, pci, 1, 1, 1);
MODULE_DEPEND(bxe, ether, 1, 1, 1);
DRIVER_MODULE(bxe, pci, bxe_driver, 0, 0);
DEBUGNET_DEFINE(bxe);
/* resources needed for unloading a previously loaded device */
#define BXE_PREV_WAIT_NEEDED 1
struct mtx bxe_prev_mtx;
MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
struct bxe_prev_list_node {
LIST_ENTRY(bxe_prev_list_node) node;
uint8_t bus;
uint8_t slot;
uint8_t path;
uint8_t aer; /* XXX automatic error recovery */
uint8_t undi;
};
static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* Tunable device values... */
SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"bxe driver parameters");
/* Debug */
unsigned long bxe_debug = 0;
SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
&bxe_debug, 0, "Debug logging mode");
/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
static int bxe_interrupt_mode = INTR_MODE_MSIX;
SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
&bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
static int bxe_queue_count = 4;
SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
&bxe_queue_count, 0, "Multi-Queue queue count");
/* max number of buffers per queue (default RX_BD_USABLE) */
static int bxe_max_rx_bufs = 0;
SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
&bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
/* Host interrupt coalescing RX tick timer (usecs) */
static int bxe_hc_rx_ticks = 25;
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
&bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
/* Host interrupt coalescing TX tick timer (usecs) */
static int bxe_hc_tx_ticks = 50;
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
&bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
/* Maximum number of Rx packets to process at a time */
static int bxe_rx_budget = 0xffffffff;
SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_RDTUN,
&bxe_rx_budget, 0, "Rx processing budget");
/* Maximum LRO aggregation size */
static int bxe_max_aggregation_size = 0;
SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_RDTUN,
&bxe_max_aggregation_size, 0, "max aggregation size");
/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
static int bxe_mrrs = -1;
SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
&bxe_mrrs, 0, "PCIe maximum read request size");
/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
static int bxe_autogreeen = 0;
SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
&bxe_autogreeen, 0, "AutoGrEEEn support");
/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
static int bxe_udp_rss = 0;
SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
&bxe_udp_rss, 0, "UDP RSS support");
#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bxe_eth_stats, stat_name) / 4)
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bxe_eth_q_stats, stat_name) / 4)
static const struct {
uint32_t offset;
uint32_t size;
uint32_t flags;
#define STATS_FLAGS_PORT 1
#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
char string[STAT_NAME_LEN];
} bxe_eth_stats_arr[] = {
{ STATS_OFFSET32(total_bytes_received_hi),
8, STATS_FLAGS_BOTH, "rx_bytes" },
{ STATS_OFFSET32(error_bytes_received_hi),
8, STATS_FLAGS_BOTH, "rx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8, STATS_FLAGS_PORT, "rx_crc_errors" },
{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8, STATS_FLAGS_PORT, "rx_align_errors" },
{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8, STATS_FLAGS_PORT, "rx_undersize_packets" },
{ STATS_OFFSET32(etherstatsoverrsizepkts_hi),
8, STATS_FLAGS_PORT, "rx_oversize_packets" },
{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8, STATS_FLAGS_PORT, "rx_fragments" },
{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8, STATS_FLAGS_PORT, "rx_jabbers" },
{ STATS_OFFSET32(no_buff_discard_hi),
8, STATS_FLAGS_BOTH, "rx_discards" },
{ STATS_OFFSET32(mac_filter_discard),
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(mf_tag_discard),
4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
{ STATS_OFFSET32(pfc_frames_received_hi),
8, STATS_FLAGS_PORT, "pfc_frames_received" },
{ STATS_OFFSET32(pfc_frames_sent_hi),
8, STATS_FLAGS_PORT, "pfc_frames_sent" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
8, STATS_FLAGS_PORT, "rx_brb_truncate" },
{ STATS_OFFSET32(pause_frames_received_hi),
8, STATS_FLAGS_PORT, "rx_pause_frames" },
{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
{ STATS_OFFSET32(nig_timer_max),
4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
{ STATS_OFFSET32(total_bytes_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_bytes" },
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8, STATS_FLAGS_PORT, "tx_mac_errors" },
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8, STATS_FLAGS_PORT, "tx_carrier_errors" },
{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_multi_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8, STATS_FLAGS_PORT, "tx_excess_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8, STATS_FLAGS_PORT, "tx_late_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8, STATS_FLAGS_PORT, "tx_total_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" },
{ STATS_OFFSET32(total_tpa_aggregations_hi),
8, STATS_FLAGS_FUNC, "tpa_aggregations" },
{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
{ STATS_OFFSET32(total_tpa_bytes_hi),
8, STATS_FLAGS_FUNC, "tpa_bytes"},
{ STATS_OFFSET32(eee_tx_lpi),
4, STATS_FLAGS_PORT, "eee_tx_lpi"},
{ STATS_OFFSET32(rx_calls),
4, STATS_FLAGS_FUNC, "rx_calls"},
{ STATS_OFFSET32(rx_pkts),
4, STATS_FLAGS_FUNC, "rx_pkts"},
{ STATS_OFFSET32(rx_tpa_pkts),
4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
{ STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
{ STATS_OFFSET32(rx_bxe_service_rxsgl),
4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
{ STATS_OFFSET32(rx_jumbo_sge_pkts),
4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
{ STATS_OFFSET32(rx_soft_errors),
4, STATS_FLAGS_FUNC, "rx_soft_errors"},
{ STATS_OFFSET32(rx_hw_csum_errors),
4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
{ STATS_OFFSET32(rx_ofld_frames_csum_ip),
4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
{ STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
{ STATS_OFFSET32(rx_budget_reached),
4, STATS_FLAGS_FUNC, "rx_budget_reached"},
{ STATS_OFFSET32(tx_pkts),
4, STATS_FLAGS_FUNC, "tx_pkts"},
{ STATS_OFFSET32(tx_soft_errors),
4, STATS_FLAGS_FUNC, "tx_soft_errors"},
{ STATS_OFFSET32(tx_ofld_frames_csum_ip),
4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
{ STATS_OFFSET32(tx_ofld_frames_csum_tcp),
4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
{ STATS_OFFSET32(tx_ofld_frames_csum_udp),
4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
{ STATS_OFFSET32(tx_ofld_frames_lso),
4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
{ STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
{ STATS_OFFSET32(tx_encap_failures),
4, STATS_FLAGS_FUNC, "tx_encap_failures"},
{ STATS_OFFSET32(tx_hw_queue_full),
4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
{ STATS_OFFSET32(tx_hw_max_queue_depth),
4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
{ STATS_OFFSET32(tx_dma_mapping_failure),
4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
{ STATS_OFFSET32(tx_max_drbr_queue_depth),
4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
{ STATS_OFFSET32(tx_window_violation_std),
4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
{ STATS_OFFSET32(tx_window_violation_tso),
4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
{ STATS_OFFSET32(tx_chain_lost_mbuf),
4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
{ STATS_OFFSET32(tx_frames_deferred),
4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
{ STATS_OFFSET32(tx_queue_xoff),
4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
{ STATS_OFFSET32(mbuf_defrag_attempts),
4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
{ STATS_OFFSET32(mbuf_defrag_failures),
4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
{ STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
{ STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
{ STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
{ STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
{ STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
{ STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
{ STATS_OFFSET32(mbuf_alloc_tx),
4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
{ STATS_OFFSET32(mbuf_alloc_rx),
4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
{ STATS_OFFSET32(mbuf_alloc_sge),
4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
{ STATS_OFFSET32(mbuf_alloc_tpa),
4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
{ STATS_OFFSET32(tx_queue_full_return),
4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
{ STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
{ STATS_OFFSET32(tx_request_link_down_failures),
4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
{ STATS_OFFSET32(bd_avail_too_less_failures),
4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
{ STATS_OFFSET32(tx_mq_not_empty),
4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
{ STATS_OFFSET32(nsegs_path1_errors),
4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
{ STATS_OFFSET32(nsegs_path2_errors),
4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
};
static const struct {
uint32_t offset;
uint32_t size;
char string[STAT_NAME_LEN];
} bxe_eth_q_stats_arr[] = {
{ Q_STATS_OFFSET32(total_bytes_received_hi),
8, "rx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
8, "rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
8, "rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
8, "rx_bcast_packets" },
{ Q_STATS_OFFSET32(no_buff_discard_hi),
8, "rx_discards" },
{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),
8, "tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, "tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, "tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, "tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
8, "tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, "tpa_aggregated_frames"},
{ Q_STATS_OFFSET32(total_tpa_bytes_hi),
8, "tpa_bytes"},
{ Q_STATS_OFFSET32(rx_calls),
4, "rx_calls"},
{ Q_STATS_OFFSET32(rx_pkts),
4, "rx_pkts"},
{ Q_STATS_OFFSET32(rx_tpa_pkts),
4, "rx_tpa_pkts"},
{ Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
4, "rx_erroneous_jumbo_sge_pkts"},
{ Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
4, "rx_bxe_service_rxsgl"},
{ Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
4, "rx_jumbo_sge_pkts"},
{ Q_STATS_OFFSET32(rx_soft_errors),
4, "rx_soft_errors"},
{ Q_STATS_OFFSET32(rx_hw_csum_errors),
4, "rx_hw_csum_errors"},
{ Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
4, "rx_ofld_frames_csum_ip"},
{ Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
4, "rx_ofld_frames_csum_tcp_udp"},
{ Q_STATS_OFFSET32(rx_budget_reached),
4, "rx_budget_reached"},
{ Q_STATS_OFFSET32(tx_pkts),
4, "tx_pkts"},
{ Q_STATS_OFFSET32(tx_soft_errors),
4, "tx_soft_errors"},
{ Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
4, "tx_ofld_frames_csum_ip"},
{ Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
4, "tx_ofld_frames_csum_tcp"},
{ Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
4, "tx_ofld_frames_csum_udp"},
{ Q_STATS_OFFSET32(tx_ofld_frames_lso),
4, "tx_ofld_frames_lso"},
{ Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
4, "tx_ofld_frames_lso_hdr_splits"},
{ Q_STATS_OFFSET32(tx_encap_failures),
4, "tx_encap_failures"},
{ Q_STATS_OFFSET32(tx_hw_queue_full),
4, "tx_hw_queue_full"},
{ Q_STATS_OFFSET32(tx_hw_max_queue_depth),
4, "tx_hw_max_queue_depth"},
{ Q_STATS_OFFSET32(tx_dma_mapping_failure),
4, "tx_dma_mapping_failure"},
{ Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
4, "tx_max_drbr_queue_depth"},
{ Q_STATS_OFFSET32(tx_window_violation_std),
4, "tx_window_violation_std"},
{ Q_STATS_OFFSET32(tx_window_violation_tso),
4, "tx_window_violation_tso"},
{ Q_STATS_OFFSET32(tx_chain_lost_mbuf),
4, "tx_chain_lost_mbuf"},
{ Q_STATS_OFFSET32(tx_frames_deferred),
4, "tx_frames_deferred"},
{ Q_STATS_OFFSET32(tx_queue_xoff),
4, "tx_queue_xoff"},
{ Q_STATS_OFFSET32(mbuf_defrag_attempts),
4, "mbuf_defrag_attempts"},
{ Q_STATS_OFFSET32(mbuf_defrag_failures),
4, "mbuf_defrag_failures"},
{ Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
4, "mbuf_rx_bd_alloc_failed"},
{ Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
4, "mbuf_rx_bd_mapping_failed"},
{ Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
4, "mbuf_rx_tpa_alloc_failed"},
{ Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
4, "mbuf_rx_tpa_mapping_failed"},
{ Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
4, "mbuf_rx_sge_alloc_failed"},
{ Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
4, "mbuf_rx_sge_mapping_failed"},
{ Q_STATS_OFFSET32(mbuf_alloc_tx),
4, "mbuf_alloc_tx"},
{ Q_STATS_OFFSET32(mbuf_alloc_rx),
4, "mbuf_alloc_rx"},
{ Q_STATS_OFFSET32(mbuf_alloc_sge),
4, "mbuf_alloc_sge"},
{ Q_STATS_OFFSET32(mbuf_alloc_tpa),
4, "mbuf_alloc_tpa"},
{ Q_STATS_OFFSET32(tx_queue_full_return),
4, "tx_queue_full_return"},
{ Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
4, "bxe_tx_mq_sc_state_failures"},
{ Q_STATS_OFFSET32(tx_request_link_down_failures),
4, "tx_request_link_down_failures"},
{ Q_STATS_OFFSET32(bd_avail_too_less_failures),
4, "bd_avail_too_less_failures"},
{ Q_STATS_OFFSET32(tx_mq_not_empty),
4, "tx_mq_not_empty"},
{ Q_STATS_OFFSET32(nsegs_path1_errors),
4, "nsegs_path1_errors"},
{ Q_STATS_OFFSET32(nsegs_path2_errors),
4, "nsegs_path2_errors"}
};
#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
static void bxe_cmng_fns_init(struct bxe_softc *sc,
uint8_t read_cfg,
uint8_t cmng_type);
static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
static void storm_memset_cmng(struct bxe_softc *sc,
struct cmng_init *cmng,
uint8_t port);
static void bxe_set_reset_global(struct bxe_softc *sc);
static void bxe_set_reset_in_progress(struct bxe_softc *sc);
static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
int engine);
static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
uint8_t *global,
uint8_t print);
static void bxe_int_disable(struct bxe_softc *sc);
static int bxe_release_leader_lock(struct bxe_softc *sc);
static void bxe_pf_disable(struct bxe_softc *sc);
static void bxe_free_fp_buffers(struct bxe_softc *sc);
static inline void bxe_update_rx_prod(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint16_t rx_bd_prod,
uint16_t rx_cq_prod,
uint16_t rx_sge_prod);
static void bxe_link_report_locked(struct bxe_softc *sc);
static void bxe_link_report(struct bxe_softc *sc);
static void bxe_link_status_update(struct bxe_softc *sc);
static void bxe_periodic_callout_func(void *xsc);
static void bxe_periodic_start(struct bxe_softc *sc);
static void bxe_periodic_stop(struct bxe_softc *sc);
static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
uint16_t prev_index,
uint16_t index);
static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
int queue);
static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
uint16_t index);
static uint8_t bxe_txeof(struct bxe_softc *sc,
struct bxe_fastpath *fp);
static void bxe_task_fp(struct bxe_fastpath *fp);
static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
struct mbuf *m,
uint8_t contents);
static int bxe_alloc_mem(struct bxe_softc *sc);
static void bxe_free_mem(struct bxe_softc *sc);
static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
static int bxe_interrupt_attach(struct bxe_softc *sc);
static void bxe_interrupt_detach(struct bxe_softc *sc);
static void bxe_set_rx_mode(struct bxe_softc *sc);
static int bxe_init_locked(struct bxe_softc *sc);
static int bxe_stop_locked(struct bxe_softc *sc);
static void bxe_sp_err_timeout_task(void *arg, int pending);
void bxe_parity_recover(struct bxe_softc *sc);
void bxe_handle_error(struct bxe_softc *sc);
static __noinline int bxe_nic_load(struct bxe_softc *sc,
int load_mode);
static __noinline int bxe_nic_unload(struct bxe_softc *sc,
uint32_t unload_mode,
uint8_t keep_link);
static void bxe_handle_sp_tq(void *context, int pending);
static void bxe_handle_fp_tq(void *context, int pending);
static int bxe_add_cdev(struct bxe_softc *sc);
static void bxe_del_cdev(struct bxe_softc *sc);
int bxe_grc_dump(struct bxe_softc *sc);
static int bxe_alloc_buf_rings(struct bxe_softc *sc);
static void bxe_free_buf_rings(struct bxe_softc *sc);
/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
uint32_t
calc_crc32(uint8_t *crc32_packet,
uint32_t crc32_length,
uint32_t crc32_seed,
uint8_t complement)
{
uint32_t byte = 0;
uint32_t bit = 0;
uint8_t msb = 0;
uint32_t temp = 0;
uint32_t shft = 0;
uint8_t current_byte = 0;
uint32_t crc32_result = crc32_seed;
const uint32_t CRC32_POLY = 0x1edc6f41;
if ((crc32_packet == NULL) ||
(crc32_length == 0) ||
((crc32_length % 8) != 0))
{
return (crc32_result);
}
for (byte = 0; byte < crc32_length; byte = byte + 1)
{
current_byte = crc32_packet[byte];
for (bit = 0; bit < 8; bit = bit + 1)
{
/* msb = crc32_result[31]; */
msb = (uint8_t)(crc32_result >> 31);
crc32_result = crc32_result << 1;
/* it (msb != current_byte[bit]) */
if (msb != (0x1 & (current_byte >> bit)))
{
crc32_result = crc32_result ^ CRC32_POLY;
/* crc32_result[0] = 1 */
crc32_result |= 1;
}
}
}
/* Last step is to:
* 1. "mirror" every bit
* 2. swap the 4 bytes
* 3. complement each bit
*/
/* Mirror */
temp = crc32_result;
shft = sizeof(crc32_result) * 8 - 1;
for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
{
temp <<= 1;
temp |= crc32_result & 1;
shft-- ;
}
/* temp[31-bit] = crc32_result[bit] */
temp <<= shft;
/* Swap */
/* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
{
uint32_t t0, t1, t2, t3;
t0 = (0x000000ff & (temp >> 24));
t1 = (0x0000ff00 & (temp >> 8));
t2 = (0x00ff0000 & (temp << 8));
t3 = (0xff000000 & (temp << 24));
crc32_result = t0 | t1 | t2 | t3;
}
/* Complement */
if (complement)
{
crc32_result = ~crc32_result;
}
return (crc32_result);
}
int
bxe_test_bit(int nr,
volatile unsigned long *addr)
{
return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
}
void
bxe_set_bit(unsigned int nr,
volatile unsigned long *addr)
{
atomic_set_acq_long(addr, (1 << nr));
}
void
bxe_clear_bit(int nr,
volatile unsigned long *addr)
{
atomic_clear_acq_long(addr, (1 << nr));
}
int
bxe_test_and_set_bit(int nr,
volatile unsigned long *addr)
{
unsigned long x;
nr = (1 << nr);
do {
x = *addr;
} while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
// if (x & nr) bit_was_set; else bit_was_not_set;
return (x & nr);
}
int
bxe_test_and_clear_bit(int nr,
volatile unsigned long *addr)
{
unsigned long x;
nr = (1 << nr);
do {
x = *addr;
} while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
// if (x & nr) bit_was_set; else bit_was_not_set;
return (x & nr);
}
int
bxe_cmpxchg(volatile int *addr,
int old,
int new)
{
int x;
do {
x = *addr;
} while (atomic_cmpset_acq_int(addr, old, new) == 0);
return (x);
}
/*
* Get DMA memory from the OS.
*
* Validates that the OS has provided DMA buffers in response to a
* bus_dmamap_load call and saves the physical address of those buffers.
* When the callback is used the OS will return 0 for the mapping function
* (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
* failures back to the caller.
*
* Returns:
* Nothing.
*/
static void
bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct bxe_dma *dma = arg;
if (error) {
dma->paddr = 0;
dma->nseg = 0;
BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
} else {
dma->paddr = segs->ds_addr;
dma->nseg = nseg;
}
}
/*
* Allocate a block of memory and map it for DMA. No partial completions
* allowed and release any resources acquired if we can't acquire all
* resources.
*
* Returns:
* 0 = Success, !0 = Failure
*/
int
bxe_dma_alloc(struct bxe_softc *sc,
bus_size_t size,
struct bxe_dma *dma,
const char *msg)
{
int rc;
if (dma->size > 0) {
BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
(unsigned long)dma->size);
return (1);
}
memset(dma, 0, sizeof(*dma)); /* sanity */
dma->sc = sc;
dma->size = size;
snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
BCM_PAGE_SIZE, /* alignment */
0, /* boundary limit */
BUS_SPACE_MAXADDR, /* restricted low */
BUS_SPACE_MAXADDR, /* restricted hi */
NULL, /* addr filter() */
NULL, /* addr filter() arg */
size, /* max map size */
1, /* num discontinuous */
size, /* max seg size */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lock() */
NULL, /* lock() arg */
&dma->tag); /* returned dma tag */
if (rc != 0) {
BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
memset(dma, 0, sizeof(*dma));
return (1);
}
rc = bus_dmamem_alloc(dma->tag,
(void **)&dma->vaddr,
(BUS_DMA_NOWAIT | BUS_DMA_ZERO),
&dma->map);
if (rc != 0) {
BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
bus_dma_tag_destroy(dma->tag);
memset(dma, 0, sizeof(*dma));
return (1);
}
rc = bus_dmamap_load(dma->tag,
dma->map,
dma->vaddr,
size,
bxe_dma_map_addr, /* BLOGD in here */
dma,
BUS_DMA_NOWAIT);
if (rc != 0) {
BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
bus_dma_tag_destroy(dma->tag);
memset(dma, 0, sizeof(*dma));
return (1);
}
return (0);
}
void
bxe_dma_free(struct bxe_softc *sc,
struct bxe_dma *dma)
{
if (dma->size > 0) {
DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
bus_dmamap_sync(dma->tag, dma->map,
(BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
bus_dmamap_unload(dma->tag, dma->map);
bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
bus_dma_tag_destroy(dma->tag);
}
memset(dma, 0, sizeof(*dma));
}
/*
* These indirect read and write routines are only during init.
* The locking is handled by the MCP.
*/
void
bxe_reg_wr_ind(struct bxe_softc *sc,
uint32_t addr,
uint32_t val)
{
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
}
uint32_t
bxe_reg_rd_ind(struct bxe_softc *sc,
uint32_t addr)
{
uint32_t val;
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
return (val);
}
static int
bxe_acquire_hw_lock(struct bxe_softc *sc,
uint32_t resource)
{
uint32_t lock_status;
uint32_t resource_bit = (1 << resource);
int func = SC_FUNC(sc);
uint32_t hw_lock_control_reg;
int cnt;
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
" resource_bit 0x%x\n", resource, resource_bit);
return (-1);
}
if (func <= 5) {
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
} else {
hw_lock_control_reg =
(MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
}
/* validate the resource is not already taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (lock_status & resource_bit) {
BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
resource, lock_status, resource_bit);
return (-1);
}
/* try every 5ms for 5 seconds */
for (cnt = 0; cnt < 1000; cnt++) {
REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
lock_status = REG_RD(sc, hw_lock_control_reg);
if (lock_status & resource_bit) {
return (0);
}
DELAY(5000);
}
BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
resource, resource_bit);
return (-1);
}
static int
bxe_release_hw_lock(struct bxe_softc *sc,
uint32_t resource)
{
uint32_t lock_status;
uint32_t resource_bit = (1 << resource);
int func = SC_FUNC(sc);
uint32_t hw_lock_control_reg;
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
" resource_bit 0x%x\n", resource, resource_bit);
return (-1);
}
if (func <= 5) {
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
} else {
hw_lock_control_reg =
(MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
}
/* validate the resource is currently taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
resource, lock_status, resource_bit);
return (-1);
}
REG_WR(sc, hw_lock_control_reg, resource_bit);
return (0);
}
static void bxe_acquire_phy_lock(struct bxe_softc *sc)
{
BXE_PHY_LOCK(sc);
bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
}
static void bxe_release_phy_lock(struct bxe_softc *sc)
{
bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
BXE_PHY_UNLOCK(sc);
}
/*
* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
* had we done things the other way around, if two pfs from the same port
* would attempt to access nvram at the same time, we could run into a
* scenario such as:
* pf A takes the port lock.
* pf B succeeds in taking the same lock since they are from the same port.
* pf A takes the per pf misc lock. Performs eeprom access.
* pf A finishes. Unlocks the per pf misc lock.
* Pf B takes the lock and proceeds to perform it's own access.
* pf A unlocks the per port lock, while pf B is still working (!).
* mcp takes the per port lock and corrupts pf B's access (and/or has it's own
* access corrupted by pf B).*
*/
static int
bxe_acquire_nvram_lock(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
int count, i;
uint32_t val = 0;
/* acquire HW lock: protect against other PFs in PF Direct Assignment */
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
/* adjust timeout for emulation/FPGA */
count = NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(sc)) {
count *= 100;
}
/* request access to nvram interface */
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
(MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
for (i = 0; i < count*10; i++) {
val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
break;
}
DELAY(5);
}
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
BLOGE(sc, "Cannot get access to nvram interface "
"port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
port, val);
return (-1);
}
return (0);
}
static int
bxe_release_nvram_lock(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
int count, i;
uint32_t val = 0;
/* adjust timeout for emulation/FPGA */
count = NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(sc)) {
count *= 100;
}
/* relinquish nvram interface */
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
(MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
for (i = 0; i < count*10; i++) {
val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
break;
}
DELAY(5);
}
if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
BLOGE(sc, "Cannot free access to nvram interface "
"port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
port, val);
return (-1);
}
/* release HW lock: protect against other PFs in PF Direct Assignment */
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
return (0);
}
static void
bxe_enable_nvram_access(struct bxe_softc *sc)
{
uint32_t val;
val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
/* enable both bits, even on read */
REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
(val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
}
static void
bxe_disable_nvram_access(struct bxe_softc *sc)
{
uint32_t val;
val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
/* disable both bits, even after read */
REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
(val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
MCPR_NVM_ACCESS_ENABLE_WR_EN)));
}
static int
bxe_nvram_read_dword(struct bxe_softc *sc,
uint32_t offset,
uint32_t *ret_val,
uint32_t cmd_flags)
{
int count, i, rc;
uint32_t val;
/* build the command word */
cmd_flags |= MCPR_NVM_COMMAND_DOIT;
/* need to clear DONE bit separately */
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
/* address of the NVRAM to read from */
REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
(offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
/* issue a read command */
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
/* adjust timeout for emulation/FPGA */
count = NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(sc)) {
count *= 100;
}
/* wait for completion */
*ret_val = 0;
rc = -1;
for (i = 0; i < count; i++) {
DELAY(5);
val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
if (val & MCPR_NVM_COMMAND_DONE) {
val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
/* we read nvram data in cpu order
* but ethtool sees it as an array of bytes
* converting to big-endian will do the work
*/
*ret_val = htobe32(val);
rc = 0;
break;
}
}
if (rc == -1) {
BLOGE(sc, "nvram read timeout expired "
"(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
offset, cmd_flags, val);
}
return (rc);
}
static int
bxe_nvram_read(struct bxe_softc *sc,
uint32_t offset,
uint8_t *ret_buf,
int buf_size)
{
uint32_t cmd_flags;
uint32_t val;
int rc;
if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
offset, buf_size);
return (-1);
}
if ((offset + buf_size) > sc->devinfo.flash_size) {
BLOGE(sc, "Invalid parameter, "
"offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
offset, buf_size, sc->devinfo.flash_size);
return (-1);
}
/* request access to nvram interface */
rc = bxe_acquire_nvram_lock(sc);
if (rc) {
return (rc);
}
/* enable access to nvram interface */
bxe_enable_nvram_access(sc);
/* read the first word(s) */
cmd_flags = MCPR_NVM_COMMAND_FIRST;
while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
memcpy(ret_buf, &val, 4);
/* advance to the next dword */
offset += sizeof(uint32_t);
ret_buf += sizeof(uint32_t);
buf_size -= sizeof(uint32_t);
cmd_flags = 0;
}
if (rc == 0) {
cmd_flags |= MCPR_NVM_COMMAND_LAST;
rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
memcpy(ret_buf, &val, 4);
}
/* disable access to nvram interface */
bxe_disable_nvram_access(sc);
bxe_release_nvram_lock(sc);
return (rc);
}
static int
bxe_nvram_write_dword(struct bxe_softc *sc,
uint32_t offset,
uint32_t val,
uint32_t cmd_flags)
{
int count, i, rc;
/* build the command word */
cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
/* need to clear DONE bit separately */
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
/* write the data */
REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
/* address of the NVRAM to write to */
REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
(offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
/* issue the write command */
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
/* adjust timeout for emulation/FPGA */
count = NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(sc)) {
count *= 100;
}
/* wait for completion */
rc = -1;
for (i = 0; i < count; i++) {
DELAY(5);
val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
if (val & MCPR_NVM_COMMAND_DONE) {
rc = 0;
break;
}
}
if (rc == -1) {
BLOGE(sc, "nvram write timeout expired "
"(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
offset, cmd_flags, val);
}
return (rc);
}
#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
static int
bxe_nvram_write1(struct bxe_softc *sc,
uint32_t offset,
uint8_t *data_buf,
int buf_size)
{
uint32_t cmd_flags;
uint32_t align_offset;
uint32_t val;
int rc;
if ((offset + buf_size) > sc->devinfo.flash_size) {
BLOGE(sc, "Invalid parameter, "
"offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
offset, buf_size, sc->devinfo.flash_size);
return (-1);
}
/* request access to nvram interface */
rc = bxe_acquire_nvram_lock(sc);
if (rc) {
return (rc);
}
/* enable access to nvram interface */
bxe_enable_nvram_access(sc);
cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
align_offset = (offset & ~0x03);
rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
if (rc == 0) {
val &= ~(0xff << BYTE_OFFSET(offset));
val |= (*data_buf << BYTE_OFFSET(offset));
/* nvram data is returned as an array of bytes
* convert it back to cpu order
*/
val = be32toh(val);
rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
}
/* disable access to nvram interface */
bxe_disable_nvram_access(sc);
bxe_release_nvram_lock(sc);
return (rc);
}
static int
bxe_nvram_write(struct bxe_softc *sc,
uint32_t offset,
uint8_t *data_buf,
int buf_size)
{
uint32_t cmd_flags;
uint32_t val;
uint32_t written_so_far;
int rc;
if (buf_size == 1) {
return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
}
if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
offset, buf_size);
return (-1);
}
if (buf_size == 0) {
return (0); /* nothing to do */
}
if ((offset + buf_size) > sc->devinfo.flash_size) {
BLOGE(sc, "Invalid parameter, "
"offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
offset, buf_size, sc->devinfo.flash_size);
return (-1);
}
/* request access to nvram interface */
rc = bxe_acquire_nvram_lock(sc);
if (rc) {
return (rc);
}
/* enable access to nvram interface */
bxe_enable_nvram_access(sc);
written_so_far = 0;
cmd_flags = MCPR_NVM_COMMAND_FIRST;
while ((written_so_far < buf_size) && (rc == 0)) {
if (written_so_far == (buf_size - sizeof(uint32_t))) {
cmd_flags |= MCPR_NVM_COMMAND_LAST;
} else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
cmd_flags |= MCPR_NVM_COMMAND_LAST;
} else if ((offset % NVRAM_PAGE_SIZE) == 0) {
cmd_flags |= MCPR_NVM_COMMAND_FIRST;
}
memcpy(&val, data_buf, 4);
rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
/* advance to the next dword */
offset += sizeof(uint32_t);
data_buf += sizeof(uint32_t);
written_so_far += sizeof(uint32_t);
cmd_flags = 0;
}
/* disable access to nvram interface */
bxe_disable_nvram_access(sc);
bxe_release_nvram_lock(sc);
return (rc);
}
/* copy command into DMAE command memory and set DMAE command Go */
void
bxe_post_dmae(struct bxe_softc *sc,
struct dmae_cmd *dmae,
int idx)
{
uint32_t cmd_offset;
int i;
cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
}
REG_WR(sc, dmae_reg_go_c[idx], 1);
}
uint32_t
bxe_dmae_opcode_add_comp(uint32_t opcode,
uint8_t comp_type)
{
return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
DMAE_CMD_C_TYPE_ENABLE));
}
uint32_t
bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
{
return (opcode & ~DMAE_CMD_SRC_RESET);
}
uint32_t
bxe_dmae_opcode(struct bxe_softc *sc,
uint8_t src_type,
uint8_t dst_type,
uint8_t with_comp,
uint8_t comp_type)
{
uint32_t opcode = 0;
opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
(dst_type << DMAE_CMD_DST_SHIFT));
opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
(SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
#ifdef __BIG_ENDIAN
opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
#else
opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
#endif
if (with_comp) {
opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
}
return (opcode);
}
static void
bxe_prep_dmae_with_comp(struct bxe_softc *sc,
struct dmae_cmd *dmae,
uint8_t src_type,
uint8_t dst_type)
{
memset(dmae, 0, sizeof(struct dmae_cmd));
/* set the opcode */
dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
TRUE, DMAE_COMP_PCI);
/* fill in the completion parameters */
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
dmae->comp_val = DMAE_COMP_VAL;
}
/* issue a DMAE command over the init channel and wait for completion */
static int
bxe_issue_dmae_with_comp(struct bxe_softc *sc,
struct dmae_cmd *dmae)
{
uint32_t *wb_comp = BXE_SP(sc, wb_comp);
int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
BXE_DMAE_LOCK(sc);
/* reset completion */
*wb_comp = 0;
/* post the command on the channel used for initializations */
bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
/* wait for completion */
DELAY(5);
while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
if (!timeout ||
(sc->recovery_state != BXE_RECOVERY_DONE &&
sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
*wb_comp, sc->recovery_state);
BXE_DMAE_UNLOCK(sc);
return (DMAE_TIMEOUT);
}
timeout--;
DELAY(50);
}
if (*wb_comp & DMAE_PCI_ERR_FLAG) {
BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
*wb_comp, sc->recovery_state);
BXE_DMAE_UNLOCK(sc);
return (DMAE_PCI_ERROR);
}
BXE_DMAE_UNLOCK(sc);
return (0);
}
void
bxe_read_dmae(struct bxe_softc *sc,
uint32_t src_addr,
uint32_t len32)
{
struct dmae_cmd dmae;
uint32_t *data;
int i, rc;
DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
if (!sc->dmae_ready) {
data = BXE_SP(sc, wb_data[0]);
for (i = 0; i < len32; i++) {
data[i] = (CHIP_IS_E1(sc)) ?
bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
REG_RD(sc, (src_addr + (i * 4)));
}
return;
}
/* set opcode and fixed command fields */
bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
/* fill in addresses and len */
dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
dmae.src_addr_hi = 0;
dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
dmae.len = len32;
/* issue the command and wait for completion */
if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
bxe_panic(sc, ("DMAE failed (%d)\n", rc));
}
}
void
bxe_write_dmae(struct bxe_softc *sc,
bus_addr_t dma_addr,
uint32_t dst_addr,
uint32_t len32)
{
struct dmae_cmd dmae;
int rc;
if (!sc->dmae_ready) {
DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
if (CHIP_IS_E1(sc)) {
ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
} else {
ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
}
return;
}
/* set opcode and fixed command fields */
bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
/* fill in addresses and len */
dmae.src_addr_lo = U64_LO(dma_addr);
dmae.src_addr_hi = U64_HI(dma_addr);
dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
dmae.dst_addr_hi = 0;
dmae.len = len32;
/* issue the command and wait for completion */
if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
bxe_panic(sc, ("DMAE failed (%d)\n", rc));
}
}
void
bxe_write_dmae_phys_len(struct bxe_softc *sc,
bus_addr_t phys_addr,
uint32_t addr,
uint32_t len)
{
int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
int offset = 0;
while (len > dmae_wr_max) {
bxe_write_dmae(sc,
(phys_addr + offset), /* src DMA address */
(addr + offset), /* dst GRC address */
dmae_wr_max);
offset += (dmae_wr_max * 4);
len -= dmae_wr_max;
}
bxe_write_dmae(sc,
(phys_addr + offset), /* src DMA address */
(addr + offset), /* dst GRC address */
len);
}
void
bxe_set_ctx_validation(struct bxe_softc *sc,
struct eth_context *cxt,
uint32_t cid)
{
/* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
/* xcontext validation */
cxt->xstorm_ag_context.cdu_reserved =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
}
static void
bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
uint8_t port,
uint8_t fw_sb_id,
uint8_t sb_index,
uint8_t ticks)
{
uint32_t addr =
(BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
REG_WR8(sc, addr, ticks);
BLOGD(sc, DBG_LOAD,
"port %d fw_sb_id %d sb_index %d ticks %d\n",
port, fw_sb_id, sb_index, ticks);
}
static void
bxe_storm_memset_hc_disable(struct bxe_softc *sc,
uint8_t port,
uint16_t fw_sb_id,
uint8_t sb_index,
uint8_t disable)
{
uint32_t enable_flag =
(disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
uint32_t addr =
(BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
uint8_t flags;
/* clear and set */
flags = REG_RD8(sc, addr);
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
REG_WR8(sc, addr, flags);
BLOGD(sc, DBG_LOAD,
"port %d fw_sb_id %d sb_index %d disable %d\n",
port, fw_sb_id, sb_index, disable);
}
void
bxe_update_coalesce_sb_index(struct bxe_softc *sc,
uint8_t fw_sb_id,
uint8_t sb_index,
uint8_t disable,
uint16_t usec)
{
int port = SC_PORT(sc);
uint8_t ticks = (usec / 4); /* XXX ??? */
bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
disable = (disable) ? 1 : ((usec) ? 0 : 1);
bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
}
void
elink_cb_udelay(struct bxe_softc *sc,
uint32_t usecs)
{
DELAY(usecs);
}
uint32_t
elink_cb_reg_read(struct bxe_softc *sc,
uint32_t reg_addr)
{
return (REG_RD(sc, reg_addr));
}
void
elink_cb_reg_write(struct bxe_softc *sc,
uint32_t reg_addr,
uint32_t val)
{
REG_WR(sc, reg_addr, val);
}
void
elink_cb_reg_wb_write(struct bxe_softc *sc,
uint32_t offset,
uint32_t *wb_write,
uint16_t len)
{
REG_WR_DMAE(sc, offset, wb_write, len);
}
void
elink_cb_reg_wb_read(struct bxe_softc *sc,
uint32_t offset,
uint32_t *wb_write,
uint16_t len)
{
REG_RD_DMAE(sc, offset, wb_write, len);
}
uint8_t
elink_cb_path_id(struct bxe_softc *sc)
{
return (SC_PATH(sc));
}
void
elink_cb_event_log(struct bxe_softc *sc,
const elink_log_id_t elink_log_id,
...)
{
/* XXX */
BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
}
static int
bxe_set_spio(struct bxe_softc *sc,
int spio,
uint32_t mode)
{
uint32_t spio_reg;
/* Only 2 SPIOs are configurable */
if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
return (-1);
}
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
/* read SPIO and mask except the float bits */
spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
switch (mode) {
case MISC_SPIO_OUTPUT_LOW:
BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
/* clear FLOAT and set CLR */
spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
spio_reg |= (spio << MISC_SPIO_CLR_POS);
break;
case MISC_SPIO_OUTPUT_HIGH:
BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
/* clear FLOAT and set SET */
spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
spio_reg |= (spio << MISC_SPIO_SET_POS);
break;
case MISC_SPIO_INPUT_HI_Z:
BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
/* set FLOAT */
spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
break;
default:
break;
}
REG_WR(sc, MISC_REG_SPIO, spio_reg);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
return (0);
}
static int
bxe_gpio_read(struct bxe_softc *sc,
int gpio_num,
uint8_t port)
{
/* The GPIO should be swapped if swap register is set and active */
int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
int gpio_shift = (gpio_num +
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
uint32_t gpio_mask = (1 << gpio_shift);
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
" gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
gpio_mask);
return (-1);
}
/* read GPIO value */
gpio_reg = REG_RD(sc, MISC_REG_GPIO);
/* get the requested pin value */
return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
}
static int
bxe_gpio_write(struct bxe_softc *sc,
int gpio_num,
uint32_t mode,
uint8_t port)
{
/* The GPIO should be swapped if swap register is set and active */
int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
int gpio_shift = (gpio_num +
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
uint32_t gpio_mask = (1 << gpio_shift);
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
" gpio_shift %d gpio_mask 0x%x\n",
gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
return (-1);
}
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
/* read GPIO and mask except the float bits */
gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
switch (mode) {
case MISC_REGISTERS_GPIO_OUTPUT_LOW:
BLOGD(sc, DBG_PHY,
"Set GPIO %d (shift %d) -> output low\n",
gpio_num, gpio_shift);
/* clear FLOAT and set CLR */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
break;
case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
BLOGD(sc, DBG_PHY,
"Set GPIO %d (shift %d) -> output high\n",
gpio_num, gpio_shift);
/* clear FLOAT and set SET */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
break;
case MISC_REGISTERS_GPIO_INPUT_HI_Z:
BLOGD(sc, DBG_PHY,
"Set GPIO %d (shift %d) -> input\n",
gpio_num, gpio_shift);
/* set FLOAT */
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
break;
default:
break;
}
REG_WR(sc, MISC_REG_GPIO, gpio_reg);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
return (0);
}
static int
bxe_gpio_mult_write(struct bxe_softc *sc,
uint8_t pins,
uint32_t mode)
{
uint32_t gpio_reg;
/* any port swapping should be handled by caller */
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
/* read GPIO and mask except the float bits */
gpio_reg = REG_RD(sc, MISC_REG_GPIO);
gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
switch (mode) {
case MISC_REGISTERS_GPIO_OUTPUT_LOW:
BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
/* set CLR */
gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
break;
case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
/* set SET */
gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
break;
case MISC_REGISTERS_GPIO_INPUT_HI_Z:
BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
/* set FLOAT */
gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
break;
default:
BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
" gpio_reg 0x%x\n", pins, mode, gpio_reg);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
return (-1);
}
REG_WR(sc, MISC_REG_GPIO, gpio_reg);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
return (0);
}
static int
bxe_gpio_int_write(struct bxe_softc *sc,
int gpio_num,
uint32_t mode,
uint8_t port)
{
/* The GPIO should be swapped if swap register is set and active */
int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
int gpio_shift = (gpio_num +
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
uint32_t gpio_mask = (1 << gpio_shift);
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
" gpio_shift %d gpio_mask 0x%x\n",
gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
return (-1);
}
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
/* read GPIO int */
gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
switch (mode) {
case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
BLOGD(sc, DBG_PHY,
"Clear GPIO INT %d (shift %d) -> output low\n",
gpio_num, gpio_shift);
/* clear SET and set CLR */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
break;
case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
BLOGD(sc, DBG_PHY,
"Set GPIO INT %d (shift %d) -> output high\n",
gpio_num, gpio_shift);
/* clear CLR and set SET */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
break;
default:
break;
}
REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
return (0);
}
uint32_t
elink_cb_gpio_read(struct bxe_softc *sc,
uint16_t gpio_num,
uint8_t port)
{
return (bxe_gpio_read(sc, gpio_num, port));
}
uint8_t
elink_cb_gpio_write(struct bxe_softc *sc,
uint16_t gpio_num,
uint8_t mode, /* 0=low 1=high */
uint8_t port)
{
return (bxe_gpio_write(sc, gpio_num, mode, port));
}
uint8_t
elink_cb_gpio_mult_write(struct bxe_softc *sc,
uint8_t pins,
uint8_t mode) /* 0=low 1=high */
{
return (bxe_gpio_mult_write(sc, pins, mode));
}
uint8_t
elink_cb_gpio_int_write(struct bxe_softc *sc,
uint16_t gpio_num,
uint8_t mode, /* 0=low 1=high */
uint8_t port)
{
return (bxe_gpio_int_write(sc, gpio_num, mode, port));
}
void
elink_cb_notify_link_changed(struct bxe_softc *sc)
{
REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
(SC_FUNC(sc) * sizeof(uint32_t))), 1);
}
/* send the MCP a request, block until there is a reply */
uint32_t
elink_cb_fw_command(struct bxe_softc *sc,
uint32_t command,
uint32_t param)
{
int mb_idx = SC_FW_MB_IDX(sc);
uint32_t seq;
uint32_t rc = 0;
uint32_t cnt = 1;
uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
BXE_FWMB_LOCK(sc);
seq = ++sc->fw_seq;
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
BLOGD(sc, DBG_PHY,
"wrote command 0x%08x to FW MB param 0x%08x\n",
(command | seq), param);
/* Let the FW do it's magic. GIve it up to 5 seconds... */
do {
DELAY(delay * 1000);
rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
BLOGD(sc, DBG_PHY,
"[after %d ms] read 0x%x seq 0x%x from FW MB\n",
cnt*delay, rc, seq);
/* is this a reply to our command? */
if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
rc &= FW_MSG_CODE_MASK;
} else {
/* Ruh-roh! */
BLOGE(sc, "FW failed to respond!\n");
// XXX bxe_fw_dump(sc);
rc = 0;
}
BXE_FWMB_UNLOCK(sc);
return (rc);
}
static uint32_t
bxe_fw_command(struct bxe_softc *sc,
uint32_t command,
uint32_t param)
{
return (elink_cb_fw_command(sc, command, param));
}
static void
__storm_memset_dma_mapping(struct bxe_softc *sc,
uint32_t addr,
bus_addr_t mapping)
{
REG_WR(sc, addr, U64_LO(mapping));
REG_WR(sc, (addr + 4), U64_HI(mapping));
}
static void
storm_memset_spq_addr(struct bxe_softc *sc,
bus_addr_t mapping,
uint16_t abs_fid)
{
uint32_t addr = (XSEM_REG_FAST_MEMORY +
XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
__storm_memset_dma_mapping(sc, addr, mapping);
}
static void
storm_memset_vf_to_pf(struct bxe_softc *sc,
uint16_t abs_fid,
uint16_t pf_id)
{
REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
}
static void
storm_memset_func_en(struct bxe_softc *sc,
uint16_t abs_fid,
uint8_t enable)
{
REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
}
static void
storm_memset_eq_data(struct bxe_softc *sc,
struct event_ring_data *eq_data,
uint16_t pfid)
{
uint32_t addr;
size_t size;
addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
size = sizeof(struct event_ring_data);
ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
}
static void
storm_memset_eq_prod(struct bxe_softc *sc,
uint16_t eq_prod,
uint16_t pfid)
{
uint32_t addr = (BAR_CSTRORM_INTMEM +
CSTORM_EVENT_RING_PROD_OFFSET(pfid));
REG_WR16(sc, addr, eq_prod);
}
/*
* Post a slowpath command.
*
* A slowpath command is used to propagate a configuration change through
* the controller in a controlled manner, allowing each STORM processor and
* other H/W blocks to phase in the change. The commands sent on the
* slowpath are referred to as ramrods. Depending on the ramrod used the
* completion of the ramrod will occur in different ways. Here's a
* breakdown of ramrods and how they complete:
*
* RAMROD_CMD_ID_ETH_PORT_SETUP
* Used to setup the leading connection on a port. Completes on the
* Receive Completion Queue (RCQ) of that port (typically fp[0]).
*
* RAMROD_CMD_ID_ETH_CLIENT_SETUP
* Used to setup an additional connection on a port. Completes on the
* RCQ of the multi-queue/RSS connection being initialized.
*
* RAMROD_CMD_ID_ETH_STAT_QUERY
* Used to force the storm processors to update the statistics database
* in host memory. This ramrod is send on the leading connection CID and
* completes as an index increment of the CSTORM on the default status
* block.
*
* RAMROD_CMD_ID_ETH_UPDATE
* Used to update the state of the leading connection, usually to udpate
* the RSS indirection table. Completes on the RCQ of the leading
* connection. (Not currently used under FreeBSD until OS support becomes
* available.)
*
* RAMROD_CMD_ID_ETH_HALT
* Used when tearing down a connection prior to driver unload. Completes
* on the RCQ of the multi-queue/RSS connection being torn down. Don't
* use this on the leading connection.
*
* RAMROD_CMD_ID_ETH_SET_MAC
* Sets the Unicast/Broadcast/Multicast used by the port. Completes on
* the RCQ of the leading connection.
*
* RAMROD_CMD_ID_ETH_CFC_DEL
* Used when tearing down a conneciton prior to driver unload. Completes
* on the RCQ of the leading connection (since the current connection
* has been completely removed from controller memory).
*
* RAMROD_CMD_ID_ETH_PORT_DEL
* Used to tear down the leading connection prior to driver unload,
* typically fp[0]. Completes as an index increment of the CSTORM on the
* default status block.
*
* RAMROD_CMD_ID_ETH_FORWARD_SETUP
* Used for connection offload. Completes on the RCQ of the multi-queue
* RSS connection that is being offloaded. (Not currently used under
* FreeBSD.)
*
* There can only be one command pending per function.
*
* Returns:
* 0 = Success, !0 = Failure.
*/
/* must be called under the spq lock */
static inline
struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
{
struct eth_spe *next_spe = sc->spq_prod_bd;
if (sc->spq_prod_bd == sc->spq_last_bd) {
/* wrap back to the first eth_spq */
sc->spq_prod_bd = sc->spq;
sc->spq_prod_idx = 0;
} else {
sc->spq_prod_bd++;
sc->spq_prod_idx++;
}
return (next_spe);
}
/* must be called under the spq lock */
static inline
void bxe_sp_prod_update(struct bxe_softc *sc)
{
int func = SC_FUNC(sc);
/*
* Make sure that BD data is updated before writing the producer.
* BD data is written to the memory, the producer is read from the
* memory, thus we need a full memory barrier to ensure the ordering.
*/
mb();
REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
sc->spq_prod_idx);
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
BUS_SPACE_BARRIER_WRITE);
}
/**
* bxe_is_contextless_ramrod - check if the current command ends on EQ
*
* @cmd: command to check
* @cmd_type: command type
*/
static inline
int bxe_is_contextless_ramrod(int cmd,
int cmd_type)
{
if ((cmd_type == NONE_CONNECTION_TYPE) ||
(cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
(cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
(cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
(cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
(cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
(cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
return (TRUE);
} else {
return (FALSE);
}
}
/**
* bxe_sp_post - place a single command on an SP ring
*
* @sc: driver handle
* @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
* @cid: SW CID the command is related to
* @data_hi: command private data address (high 32 bits)
* @data_lo: command private data address (low 32 bits)
* @cmd_type: command type (e.g. NONE, ETH)
*
* SP data is handled as if it's always an address pair, thus data fields are
* not swapped to little endian in upper functions. Instead this function swaps
* data as if it's two uint32 fields.
*/
int
bxe_sp_post(struct bxe_softc *sc,
int command,
int cid,
uint32_t data_hi,
uint32_t data_lo,
int cmd_type)
{
struct eth_spe *spe;
uint16_t type;
int common;
common = bxe_is_contextless_ramrod(command, cmd_type);
BXE_SP_LOCK(sc);
if (common) {
if (!atomic_load_acq_long(&sc->eq_spq_left)) {
BLOGE(sc, "EQ ring is full!\n");
BXE_SP_UNLOCK(sc);
return (-1);
}
} else {
if (!atomic_load_acq_long(&sc->cq_spq_left)) {
BLOGE(sc, "SPQ ring is full!\n");
BXE_SP_UNLOCK(sc);
return (-1);
}
}
spe = bxe_sp_get_next(sc);
/* CID needs port number to be encoded int it */
spe->hdr.conn_and_cmd_data =
htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
/* TBD: Check if it works for VFs */
type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
SPE_HDR_T_FUNCTION_ID);
spe->hdr.type = htole16(type);
spe->data.update_data_addr.hi = htole32(data_hi);
spe->data.update_data_addr.lo = htole32(data_lo);
/*
* It's ok if the actual decrement is issued towards the memory
* somewhere between the lock and unlock. Thus no more explict
* memory barrier is needed.
*/
if (common) {
atomic_subtract_acq_long(&sc->eq_spq_left, 1);
} else {
atomic_subtract_acq_long(&sc->cq_spq_left, 1);
}
BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
BLOGD(sc, DBG_SP,
"SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
sc->spq_prod_idx,
(uint32_t)U64_HI(sc->spq_dma.paddr),
(uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
command,
common,
HW_CID(sc, cid),
data_hi,
data_lo,
type,
atomic_load_acq_long(&sc->cq_spq_left),
atomic_load_acq_long(&sc->eq_spq_left));
bxe_sp_prod_update(sc);
BXE_SP_UNLOCK(sc);
return (0);
}
/**
* bxe_debug_print_ind_table - prints the indirection table configuration.
*
* @sc: driver hanlde
* @p: pointer to rss configuration
*/
/*
* FreeBSD Device probe function.
*
* Compares the device found to the driver's list of supported devices and
* reports back to the bsd loader whether this is the right driver for the device.
* This is the driver entry function called from the "kldload" command.
*
* Returns:
* BUS_PROBE_DEFAULT on success, positive value on failure.
*/
static int
bxe_probe(device_t dev)
{
struct bxe_device_type *t;
uint16_t did, sdid, svid, vid;
/* Find our device structure */
t = bxe_devs;
/* Get the data for the device to be probed. */
vid = pci_get_vendor(dev);
did = pci_get_device(dev);
svid = pci_get_subvendor(dev);
sdid = pci_get_subdevice(dev);
/* Look through the list of known devices for a match. */
while (t->bxe_name != NULL) {
if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
device_set_descf(dev,
"%s (%c%d) BXE v:%s", t->bxe_name,
(((pci_read_config(dev, PCIR_REVID, 4) &
0xf0) >> 4) + 'A'),
(pci_read_config(dev, PCIR_REVID, 4) & 0xf),
BXE_DRIVER_VERSION);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
static void
bxe_init_mutexes(struct bxe_softc *sc)
{
#ifdef BXE_CORE_LOCK_SX
snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
"bxe%d_core_lock", sc->unit);
sx_init(&sc->core_sx, sc->core_sx_name);
#else
snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
"bxe%d_core_lock", sc->unit);
mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
#endif
snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
"bxe%d_sp_lock", sc->unit);
mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
"bxe%d_dmae_lock", sc->unit);
mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
"bxe%d_phy_lock", sc->unit);
mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
"bxe%d_fwmb_lock", sc->unit);
mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
"bxe%d_print_lock", sc->unit);
mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
"bxe%d_stats_lock", sc->unit);
mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
"bxe%d_mcast_lock", sc->unit);
mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
}
static void
bxe_release_mutexes(struct bxe_softc *sc)
{
#ifdef BXE_CORE_LOCK_SX
sx_destroy(&sc->core_sx);
#else
if (mtx_initialized(&sc->core_mtx)) {
mtx_destroy(&sc->core_mtx);
}
#endif
if (mtx_initialized(&sc->sp_mtx)) {
mtx_destroy(&sc->sp_mtx);
}
if (mtx_initialized(&sc->dmae_mtx)) {
mtx_destroy(&sc->dmae_mtx);
}
if (mtx_initialized(&sc->port.phy_mtx)) {
mtx_destroy(&sc->port.phy_mtx);
}
if (mtx_initialized(&sc->fwmb_mtx)) {
mtx_destroy(&sc->fwmb_mtx);
}
if (mtx_initialized(&sc->print_mtx)) {
mtx_destroy(&sc->print_mtx);
}
if (mtx_initialized(&sc->stats_mtx)) {
mtx_destroy(&sc->stats_mtx);
}
if (mtx_initialized(&sc->mcast_mtx)) {
mtx_destroy(&sc->mcast_mtx);
}
}
static void
bxe_tx_disable(struct bxe_softc* sc)
{
if_t ifp = sc->ifp;
/* tell the stack the driver is stopped and TX queue is full */
if (ifp != NULL) {
if_setdrvflags(ifp, 0);
}
}
static void
bxe_drv_pulse(struct bxe_softc *sc)
{
SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
sc->fw_drv_pulse_wr_seq);
}
static inline uint16_t
bxe_tx_avail(struct bxe_softc *sc,
struct bxe_fastpath *fp)
{
int16_t used;
uint16_t prod;
uint16_t cons;
prod = fp->tx_bd_prod;
cons = fp->tx_bd_cons;
used = SUB_S16(prod, cons);
return (int16_t)(sc->tx_ring_size) - used;
}
static inline int
bxe_tx_queue_has_work(struct bxe_fastpath *fp)
{
uint16_t hw_cons;
mb(); /* status block fields can change */
hw_cons = le16toh(*fp->tx_cons_sb);
return (hw_cons != fp->tx_pkt_cons);
}
static inline uint8_t
bxe_has_tx_work(struct bxe_fastpath *fp)
{
/* expand this for multi-cos if ever supported */
return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
}
static inline int
bxe_has_rx_work(struct bxe_fastpath *fp)
{
uint16_t rx_cq_cons_sb;
mb(); /* status block fields can change */
rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
rx_cq_cons_sb++;
return (fp->rx_cq_cons != rx_cq_cons_sb);
}
static void
bxe_sp_event(struct bxe_softc *sc,
struct bxe_fastpath *fp,
union eth_rx_cqe *rr_cqe)
{
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
drv_cmd = ECORE_Q_CMD_UPDATE;
break;
case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
drv_cmd = ECORE_Q_CMD_SETUP;
break;
case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
break;
case (RAMROD_CMD_ID_ETH_HALT):
BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
drv_cmd = ECORE_Q_CMD_HALT;
break;
case (RAMROD_CMD_ID_ETH_TERMINATE):
BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
drv_cmd = ECORE_Q_CMD_TERMINATE;
break;
case (RAMROD_CMD_ID_ETH_EMPTY):
BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
drv_cmd = ECORE_Q_CMD_EMPTY;
break;
default:
BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
command, fp->index);
return;
}
if ((drv_cmd != ECORE_Q_CMD_MAX) &&
q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
/*
* q_obj->complete_cmd() failure means that this was
* an unexpected completion.
*
* In this case we don't want to increase the sc->spq_left
* because apparently we haven't sent this command the first
* place.
*/
// bxe_panic(sc, ("Unexpected SP completion\n"));
return;
}
atomic_add_acq_long(&sc->cq_spq_left, 1);
BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
atomic_load_acq_long(&sc->cq_spq_left));
}
/*
* The current mbuf is part of an aggregation. Move the mbuf into the TPA
* aggregation queue, put an empty mbuf back onto the receive chain, and mark
* the current aggregation queue as in-progress.
*/
static void
bxe_tpa_start(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint16_t queue,
uint16_t cons,
uint16_t prod,
struct eth_fast_path_rx_cqe *cqe)
{
struct bxe_sw_rx_bd tmp_bd;
struct bxe_sw_rx_bd *rx_buf;
struct eth_rx_bd *rx_bd;
int max_agg_queues __diagused;
struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
uint16_t index;
BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
"cons=%d prod=%d\n",
fp->index, queue, cons, prod);
max_agg_queues = MAX_AGG_QS(sc);
KASSERT((queue < max_agg_queues),
("fp[%02d] invalid aggr queue (%d >= %d)!",
fp->index, queue, max_agg_queues));
KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
fp->index, queue));
/* copy the existing mbuf and mapping from the TPA pool */
tmp_bd = tpa_info->bd;
if (tmp_bd.m == NULL) {
uint32_t *tmp;
tmp = (uint32_t *)cqe;
BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
fp->index, queue, cons, prod);
BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
*tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
/* XXX Error handling? */
return;
}
/* change the TPA queue to the start state */
tpa_info->state = BXE_TPA_STATE_START;
tpa_info->placement_offset = cqe->placement_offset;
tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
fp->rx_tpa_queue_used |= (1 << queue);
/*
* If all the buffer descriptors are filled with mbufs then fill in
* the current consumer index with a new BD. Else if a maximum Rx
* buffer limit is imposed then fill in the next producer index.
*/
index = (sc->max_rx_bufs != RX_BD_USABLE) ?
prod : cons;
/* move the received mbuf and mapping to TPA pool */
tpa_info->bd = fp->rx_mbuf_chain[cons];
/* release any existing RX BD mbuf mappings */
if (cons != index) {
rx_buf = &fp->rx_mbuf_chain[cons];
if (rx_buf->m_map != NULL) {
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
}
/*
* We get here when the maximum number of rx buffers is less than
* RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
* it out here without concern of a memory leak.
*/
fp->rx_mbuf_chain[cons].m = NULL;
}
/* update the Rx SW BD with the mbuf info from the TPA pool */
fp->rx_mbuf_chain[index] = tmp_bd;
/* update the Rx BD with the empty mbuf phys address from the TPA pool */
rx_bd = &fp->rx_chain[index];
rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
}
/*
* When a TPA aggregation is completed, loop through the individual mbufs
* of the aggregation, combining them into a single mbuf which will be sent
* up the stack. Refill all freed SGEs with mbufs as we go along.
*/
static int
bxe_fill_frag_mbuf(struct bxe_softc *sc,
struct bxe_fastpath *fp,
struct bxe_sw_tpa_info *tpa_info,
uint16_t queue,
uint16_t pages,
struct mbuf *m,
struct eth_end_agg_rx_cqe *cqe,
uint16_t cqe_idx)
{
struct mbuf *m_frag;
uint32_t frag_len, frag_size, i;
uint16_t sge_idx;
int rc = 0;
int j;
frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
BLOGD(sc, DBG_LRO,
"fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
/* make sure the aggregated frame is not too big to handle */
if (pages > 8 * PAGES_PER_SGE) {
uint32_t *tmp = (uint32_t *)cqe;
BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
"pkt_len=%d len_on_bd=%d frag_size=%d\n",
fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
tpa_info->len_on_bd, frag_size);
BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
*tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
bxe_panic(sc, ("sge page count error\n"));
return (EINVAL);
}
/*
* Scan through the scatter gather list pulling individual mbufs into a
* single mbuf for the host stack.
*/
for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
/*
* Firmware gives the indices of the SGE as if the ring is an array
* (meaning that the "next" element will consume 2 indices).
*/
frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
"sge_idx=%d frag_size=%d frag_len=%d\n",
fp->index, queue, i, j, sge_idx, frag_size, frag_len);
m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
/* allocate a new mbuf for the SGE */
rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
if (rc) {
/* Leave all remaining SGEs in the ring! */
return (rc);
}
/* update the fragment length */
m_frag->m_len = frag_len;
/* concatenate the fragment to the head mbuf */
m_cat(m, m_frag);
fp->eth_q_stats.mbuf_alloc_sge--;
/* update the TPA mbuf size and remaining fragment size */
m->m_pkthdr.len += frag_len;
frag_size -= frag_len;
}
BLOGD(sc, DBG_LRO,
"fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
fp->index, queue, frag_size);
return (rc);
}
static inline void
bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
{
int i, j;
for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
for (j = 0; j < 2; j++) {
BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
idx--;
}
}
}
static inline void
bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
{
/* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
/*
* Clear the two last indices in the page to 1. These are the indices that
* correspond to the "next" element, hence will never be indicated and
* should be removed from the calculations.
*/
bxe_clear_sge_mask_next_elems(fp);
}
static inline void
bxe_update_last_max_sge(struct bxe_fastpath *fp,
uint16_t idx)
{
uint16_t last_max = fp->last_max_sge;
if (SUB_S16(idx, last_max) > 0) {
fp->last_max_sge = idx;
}
}
static inline void
bxe_update_sge_prod(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint16_t sge_len,
union eth_sgl_or_raw_data *cqe)
{
uint16_t last_max, last_elem, first_elem;
uint16_t delta = 0;
uint16_t i;
if (!sge_len) {
return;
}
/* first mark all used pages */
for (i = 0; i < sge_len; i++) {
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
RX_SGE(le16toh(cqe->sgl[i])));
}
BLOGD(sc, DBG_LRO,
"fp[%02d] fp_cqe->sgl[%d] = %d\n",
fp->index, sge_len - 1,
le16toh(cqe->sgl[sge_len - 1]));
/* assume that the last SGE index is the biggest */
bxe_update_last_max_sge(fp,
le16toh(cqe->sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
/* if ring is not full */
if (last_elem + 1 != first_elem) {
last_elem++;
}
/* now update the prod */
for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
if (__predict_true(fp->sge_mask[i])) {
break;
}
fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
delta += BIT_VEC64_ELEM_SZ;
}
if (delta > 0) {
fp->rx_sge_prod += delta;
/* clear page-end entries */
bxe_clear_sge_mask_next_elems(fp);
}
BLOGD(sc, DBG_LRO,
"fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
fp->index, fp->last_max_sge, fp->rx_sge_prod);
}
/*
* The aggregation on the current TPA queue has completed. Pull the individual
* mbuf fragments together into a single mbuf, perform all necessary checksum
* calculations, and send the resuting mbuf to the stack.
*/
static void
bxe_tpa_stop(struct bxe_softc *sc,
struct bxe_fastpath *fp,
struct bxe_sw_tpa_info *tpa_info,
uint16_t queue,
uint16_t pages,
struct eth_end_agg_rx_cqe *cqe,
uint16_t cqe_idx)
{
if_t ifp = sc->ifp;
struct mbuf *m;
int rc = 0;
BLOGD(sc, DBG_LRO,
"fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
fp->index, queue, tpa_info->placement_offset,
le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
m = tpa_info->bd.m;
/* allocate a replacement before modifying existing mbuf */
rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
if (rc) {
/* drop the frame and log an error */
fp->eth_q_stats.rx_soft_errors++;
goto bxe_tpa_stop_exit;
}
/* we have a replacement, fixup the current mbuf */
m_adj(m, tpa_info->placement_offset);
m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
/* mark the checksums valid (taken care of by the firmware) */
fp->eth_q_stats.rx_ofld_frames_csum_ip++;
fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
CSUM_IP_VALID |
CSUM_DATA_VALID |
CSUM_PSEUDO_HDR);
/* aggregate all of the SGEs into a single mbuf */
rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
if (rc) {
/* drop the packet and log an error */
fp->eth_q_stats.rx_soft_errors++;
m_freem(m);
} else {
if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
m->m_flags |= M_VLANTAG;
}
/* assign packet to this interface interface */
if_setrcvif(m, ifp);
/* specify what RSS queue was used for this flow */
m->m_pkthdr.flowid = fp->index;
BXE_SET_FLOWID(m);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
fp->eth_q_stats.rx_tpa_pkts++;
/* pass the frame to the stack */
if_input(ifp, m);
}
/* we passed an mbuf up the stack or dropped the frame */
fp->eth_q_stats.mbuf_alloc_tpa--;
bxe_tpa_stop_exit:
fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
fp->rx_tpa_queue_used &= ~(1 << queue);
}
static uint8_t
bxe_service_rxsgl(
struct bxe_fastpath *fp,
uint16_t len,
uint16_t lenonbd,
struct mbuf *m,
struct eth_fast_path_rx_cqe *cqe_fp)
{
struct mbuf *m_frag;
uint16_t frags, frag_len;
uint16_t sge_idx = 0;
uint16_t j;
uint8_t i, rc = 0;
uint32_t frag_size;
/* adjust the mbuf */
m->m_len = lenonbd;
frag_size = len - lenonbd;
frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
m_frag->m_len = frag_len;
/* allocate a new mbuf for the SGE */
rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
if (rc) {
/* Leave all remaining SGEs in the ring! */
return (rc);
}
fp->eth_q_stats.mbuf_alloc_sge--;
/* concatenate the fragment to the head mbuf */
m_cat(m, m_frag);
frag_size -= frag_len;
}
bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
return rc;
}
static uint8_t
bxe_rxeof(struct bxe_softc *sc,
struct bxe_fastpath *fp)
{
if_t ifp = sc->ifp;
uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
int rx_pkts = 0;
int rc = 0;
BXE_FP_RX_LOCK(fp);
/* CQ "next element" is of the size of the regular element */
hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
hw_cq_cons++;
}
bd_cons = fp->rx_bd_cons;
bd_prod = fp->rx_bd_prod;
bd_prod_fw = bd_prod;
sw_cq_cons = fp->rx_cq_cons;
sw_cq_prod = fp->rx_cq_prod;
/*
* Memory barrier necessary as speculative reads of the rx
* buffer can be ahead of the index in the status block
*/
rmb();
BLOGD(sc, DBG_RX,
"fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
fp->index, hw_cq_cons, sw_cq_cons);
while (sw_cq_cons != hw_cq_cons) {
struct bxe_sw_rx_bd *rx_buf = NULL;
union eth_rx_cqe *cqe;
struct eth_fast_path_rx_cqe *cqe_fp;
uint8_t cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
uint16_t len, lenonbd, pad;
struct mbuf *m = NULL;
comp_ring_cons = RCQ(sw_cq_cons);
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
cqe = &fp->rcq_chain[comp_ring_cons];
cqe_fp = &cqe->fast_path_cqe;
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
BLOGD(sc, DBG_RX,
"fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
"BD prod=%d cons=%d CQE type=0x%x err=0x%x "
"status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
fp->index,
hw_cq_cons,
sw_cq_cons,
bd_prod,
bd_cons,
CQE_TYPE(cqe_fp_flags),
cqe_fp_flags,
cqe_fp->status_flags,
le32toh(cqe_fp->rss_hash_result),
le16toh(cqe_fp->vlan_tag),
le16toh(cqe_fp->pkt_len_or_gro_seg_len),
le16toh(cqe_fp->len_on_bd));
/* is this a slowpath msg? */
if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
bxe_sp_event(sc, fp, cqe);
goto next_cqe;
}
rx_buf = &fp->rx_mbuf_chain[bd_cons];
if (!CQE_TYPE_FAST(cqe_fp_type)) {
struct bxe_sw_tpa_info *tpa_info;
uint16_t frag_size, pages;
uint8_t queue;
if (CQE_TYPE_START(cqe_fp_type)) {
bxe_tpa_start(sc, fp, cqe_fp->queue_index,
bd_cons, bd_prod, cqe_fp);
m = NULL; /* packet not ready yet */
goto next_rx;
}
KASSERT(CQE_TYPE_STOP(cqe_fp_type),
("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
queue = cqe->end_agg_cqe.queue_index;
tpa_info = &fp->rx_tpa_info[queue];
BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
fp->index, queue);
frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
tpa_info->len_on_bd);
pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
&cqe->end_agg_cqe, comp_ring_cons);
bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
goto next_cqe;
}
/* non TPA */
/* is this an error packet? */
if (__predict_false(cqe_fp_flags &
ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
fp->eth_q_stats.rx_soft_errors++;
goto next_rx;
}
len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
lenonbd = le16toh(cqe_fp->len_on_bd);
pad = cqe_fp->placement_offset;
m = rx_buf->m;
if (__predict_false(m == NULL)) {
BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
bd_cons, fp->index);
goto next_rx;
}
/* XXX double copy if packet length under a threshold */
/*
* If all the buffer descriptors are filled with mbufs then fill in
* the current consumer index with a new BD. Else if a maximum Rx
* buffer limit is imposed then fill in the next producer index.
*/
rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
(sc->max_rx_bufs != RX_BD_USABLE) ?
bd_prod : bd_cons);
if (rc != 0) {
/* we simply reuse the received mbuf and don't post it to the stack */
m = NULL;
BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
fp->index, rc);
fp->eth_q_stats.rx_soft_errors++;
if (sc->max_rx_bufs != RX_BD_USABLE) {
/* copy this consumer index to the producer index */
memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
sizeof(struct bxe_sw_rx_bd));
memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
}
goto next_rx;
}
/* current mbuf was detached from the bd */
fp->eth_q_stats.mbuf_alloc_rx--;
/* we allocated a replacement mbuf, fixup the current one */
m_adj(m, pad);
m->m_pkthdr.len = m->m_len = len;
if ((len > 60) && (len > lenonbd)) {
fp->eth_q_stats.rx_bxe_service_rxsgl++;
rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
if (rc)
break;
fp->eth_q_stats.rx_jumbo_sge_pkts++;
} else if (lenonbd < len) {
fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
}
/* assign packet to this interface interface */
if_setrcvif(m, ifp);
/* assume no hardware checksum has complated */
m->m_pkthdr.csum_flags = 0;
/* validate checksum if offload enabled */
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
/* check for a valid IP frame */
if (!(cqe->fast_path_cqe.status_flags &
ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if (__predict_false(cqe_fp_flags &
ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
fp->eth_q_stats.rx_hw_csum_errors++;
} else {
fp->eth_q_stats.rx_ofld_frames_csum_ip++;
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
}
}
/* check for a valid TCP/UDP frame */
if (!(cqe->fast_path_cqe.status_flags &
ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
if (__predict_false(cqe_fp_flags &
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
fp->eth_q_stats.rx_hw_csum_errors++;
} else {
fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
m->m_pkthdr.csum_data = 0xFFFF;
m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
CSUM_PSEUDO_HDR);
}
}
}
/* if there is a VLAN tag then flag that info */
if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
m->m_flags |= M_VLANTAG;
}
/* specify what RSS queue was used for this flow */
m->m_pkthdr.flowid = fp->index;
BXE_SET_FLOWID(m);
next_rx:
bd_cons = RX_BD_NEXT(bd_cons);
bd_prod = RX_BD_NEXT(bd_prod);
bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
/* pass the frame to the stack */
if (__predict_true(m != NULL)) {
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
rx_pkts++;
if_input(ifp, m);
}
next_cqe:
sw_cq_prod = RCQ_NEXT(sw_cq_prod);
sw_cq_cons = RCQ_NEXT(sw_cq_cons);
/* limit spinning on the queue */
if (rc != 0)
break;
if (rx_pkts == sc->rx_budget) {
fp->eth_q_stats.rx_budget_reached++;
break;
}
} /* while work to do */
fp->rx_bd_cons = bd_cons;
fp->rx_bd_prod = bd_prod_fw;
fp->rx_cq_cons = sw_cq_cons;
fp->rx_cq_prod = sw_cq_prod;
/* Update producers */
bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
fp->eth_q_stats.rx_pkts += rx_pkts;
fp->eth_q_stats.rx_calls++;
BXE_FP_RX_UNLOCK(fp);
return (sw_cq_cons != hw_cq_cons);
}
static uint16_t
bxe_free_tx_pkt(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint16_t idx)
{
struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
struct eth_tx_start_bd *tx_start_bd;
uint16_t bd_idx = TX_BD(tx_buf->first_bd);
uint16_t new_cons;
int nbd;
/* unmap the mbuf from non-paged memory */
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
nbd = le16toh(tx_start_bd->nbd) - 1;
new_cons = (tx_buf->first_bd + nbd);
/* free the mbuf */
if (__predict_true(tx_buf->m != NULL)) {
m_freem(tx_buf->m);
fp->eth_q_stats.mbuf_alloc_tx--;
} else {
fp->eth_q_stats.tx_chain_lost_mbuf++;
}
tx_buf->m = NULL;
tx_buf->first_bd = 0;
return (new_cons);
}
/* transmit timeout watchdog */
static int
bxe_watchdog(struct bxe_softc *sc,
struct bxe_fastpath *fp)
{
BXE_FP_TX_LOCK(fp);
if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
BXE_FP_TX_UNLOCK(fp);
return (0);
}
BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
BXE_FP_TX_UNLOCK(fp);
BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
return (-1);
}
/* processes transmit completions */
static uint8_t
bxe_txeof(struct bxe_softc *sc,
struct bxe_fastpath *fp)
{
if_t ifp = sc->ifp;
uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
uint16_t tx_bd_avail;
BXE_FP_TX_LOCK_ASSERT(fp);
bd_cons = fp->tx_bd_cons;
hw_cons = le16toh(*fp->tx_cons_sb);
sw_cons = fp->tx_pkt_cons;
while (sw_cons != hw_cons) {
pkt_cons = TX_BD(sw_cons);
BLOGD(sc, DBG_TX,
"TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
fp->index, hw_cons, sw_cons, pkt_cons);
bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
sw_cons++;
}
fp->tx_pkt_cons = sw_cons;
fp->tx_bd_cons = bd_cons;
BLOGD(sc, DBG_TX,
"TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
mb();
tx_bd_avail = bxe_tx_avail(sc, fp);
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
} else {
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
/* reset the watchdog timer if there are pending transmits */
fp->watchdog_timer = BXE_TX_TIMEOUT;
return (TRUE);
} else {
/* clear watchdog when there are no pending transmits */
fp->watchdog_timer = 0;
return (FALSE);
}
}
static void
bxe_drain_tx_queues(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i, count;
/* wait until all TX fastpath tasks have completed */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
count = 1000;
while (bxe_has_tx_work(fp)) {
BXE_FP_TX_LOCK(fp);
bxe_txeof(sc, fp);
BXE_FP_TX_UNLOCK(fp);
if (count == 0) {
BLOGE(sc, "Timeout waiting for fp[%d] "
"transmits to complete!\n", i);
bxe_panic(sc, ("tx drain failure\n"));
return;
}
count--;
DELAY(1000);
rmb();
}
}
return;
}
static int
bxe_del_all_macs(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *mac_obj,
int mac_type,
uint8_t wait_for_comp)
{
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
int rc;
/* wait for completion of requested */
if (wait_for_comp) {
bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
}
/* Set the mac type of addresses we want to clear */
bxe_set_bit(mac_type, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc < 0) {
BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
rc, mac_type, wait_for_comp);
}
return (rc);
}
static int
bxe_fill_accept_flags(struct bxe_softc *sc,
uint32_t rx_mode,
unsigned long *rx_accept_flags,
unsigned long *tx_accept_flags)
{
/* Clear the flags first */
*rx_accept_flags = 0;
*tx_accept_flags = 0;
switch (rx_mode) {
case BXE_RX_MODE_NONE:
/*
* 'drop all' supersedes any accept flags that may have been
* passed to the function.
*/
break;
case BXE_RX_MODE_NORMAL:
bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BXE_RX_MODE_ALLMULTI:
bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BXE_RX_MODE_PROMISC:
/*
* According to deffinition of SI mode, iface in promisc mode
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(sc)) {
bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
} else {
bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
}
break;
default:
BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
return (-1);
}
/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (rx_mode != BXE_RX_MODE_NONE) {
bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
}
return (0);
}
static int
bxe_set_q_rx_mode(struct bxe_softc *sc,
uint8_t cl_id,
unsigned long rx_mode_flags,
unsigned long rx_accept_flags,
unsigned long tx_accept_flags,
unsigned long ramrod_flags)
{
struct ecore_rx_mode_ramrod_params ramrod_param;
int rc;
memset(&ramrod_param, 0, sizeof(ramrod_param));
/* Prepare ramrod parameters */
ramrod_param.cid = 0;
ramrod_param.cl_id = cl_id;
ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
ramrod_param.func_id = SC_FUNC(sc);
ramrod_param.pstate = &sc->sp_state;
ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
ramrod_param.ramrod_flags = ramrod_flags;
ramrod_param.rx_mode_flags = rx_mode_flags;
ramrod_param.rx_accept_flags = rx_accept_flags;
ramrod_param.tx_accept_flags = tx_accept_flags;
rc = ecore_config_rx_mode(sc, &ramrod_param);
if (rc < 0) {
BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
"rx_accept_flags 0x%x tx_accept_flags 0x%x "
"ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
(uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
(uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
return (rc);
}
return (0);
}
static int
bxe_set_storm_rx_mode(struct bxe_softc *sc)
{
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
int rc;
rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
&tx_accept_flags);
if (rc) {
return (rc);
}
bxe_set_bit(RAMROD_RX, &ramrod_flags);
bxe_set_bit(RAMROD_TX, &ramrod_flags);
/* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
rx_accept_flags, tx_accept_flags,
ramrod_flags));
}
/* returns the "mcp load_code" according to global load_count array */
static int
bxe_nic_load_no_mcp(struct bxe_softc *sc)
{
int path = SC_PATH(sc);
int port = SC_PORT(sc);
BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]++;
load_count[path][1 + port]++;
BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 1) {
return (FW_MSG_CODE_DRV_LOAD_COMMON);
} else if (load_count[path][1 + port] == 1) {
return (FW_MSG_CODE_DRV_LOAD_PORT);
} else {
return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
}
}
/* returns the "mcp load_code" according to global load_count array */
static int
bxe_nic_unload_no_mcp(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
int path = SC_PATH(sc);
BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]--;
load_count[path][1 + port]--;
BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 0) {
return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
} else if (load_count[path][1 + port] == 0) {
return (FW_MSG_CODE_DRV_UNLOAD_PORT);
} else {
return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
}
}
/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
static uint32_t
bxe_send_unload_req(struct bxe_softc *sc,
int unload_mode)
{
uint32_t reset_code = 0;
/* Select the UNLOAD request mode */
if (unload_mode == UNLOAD_NORMAL) {
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
} else {
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
}
/* Send the request to the MCP */
if (!BXE_NOMCP(sc)) {
reset_code = bxe_fw_command(sc, reset_code, 0);
} else {
reset_code = bxe_nic_unload_no_mcp(sc);
}
return (reset_code);
}
/* send UNLOAD_DONE command to the MCP */
static void
bxe_send_unload_done(struct bxe_softc *sc,
uint8_t keep_link)
{
uint32_t reset_param =
keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
/* Report UNLOAD_DONE to MCP */
if (!BXE_NOMCP(sc)) {
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
}
}
static int
bxe_func_wait_started(struct bxe_softc *sc)
{
int tout = 50;
if (!sc->port.pmf) {
return (0);
}
/*
* (assumption: No Attention from MCP at this stage)
* PMF probably in the middle of TX disable/enable transaction
* 1. Sync IRS for default SB
* 2. Sync SP queue - this guarantees us that attention handling started
* 3. Wait, that TX disable/enable transaction completes
*
* 1+2 guarantee that if DCBX attention was scheduled it already changed
* pending bit of transaction from STARTED-->TX_STOPPED, if we already
* received completion for the transaction the state is TX_STOPPED.
* State will return to STARTED after completion of TX_STOPPED-->STARTED
* transaction.
*/
/* XXX make sure default SB ISR is done */
/* need a way to synchronize an irq (intr_mtx?) */
/* XXX flush any work queues */
while (ecore_func_get_state(sc, &sc->func_obj) !=
ECORE_F_STATE_STARTED && tout--) {
DELAY(20000);
}
if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
/*
* Failed to complete the transaction in a "good way"
* Force both transactions with CLR bit.
*/
struct ecore_func_state_params func_params = { NULL };
BLOGE(sc, "Unexpected function state! "
"Forcing STARTED-->TX_STOPPED-->STARTED\n");
func_params.f_obj = &sc->func_obj;
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
/* STARTED-->TX_STOPPED */
func_params.cmd = ECORE_F_CMD_TX_STOP;
ecore_func_state_change(sc, &func_params);
/* TX_STOPPED-->STARTED */
func_params.cmd = ECORE_F_CMD_TX_START;
return (ecore_func_state_change(sc, &func_params));
}
return (0);
}
static int
bxe_stop_queue(struct bxe_softc *sc,
int index)
{
struct bxe_fastpath *fp = &sc->fp[index];
struct ecore_queue_state_params q_params = { NULL };
int rc;
BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
/* We want to wait for completion in this context */
bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
/* Stop the primary connection: */
/* ...halt the connection */
q_params.cmd = ECORE_Q_CMD_HALT;
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
return (rc);
}
/* ...terminate the connection */
q_params.cmd = ECORE_Q_CMD_TERMINATE;
memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
return (rc);
}
/* ...delete cfc entry */
q_params.cmd = ECORE_Q_CMD_CFC_DEL;
memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
return (ecore_queue_state_change(sc, &q_params));
}
/* wait for the outstanding SP commands */
static inline uint8_t
bxe_wait_sp_comp(struct bxe_softc *sc,
unsigned long mask)
{
unsigned long tmp;
int tout = 5000; /* wait for 5 secs tops */
while (tout--) {
mb();
if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
return (TRUE);
}
DELAY(1000);
}
mb();
tmp = atomic_load_acq_long(&sc->sp_state);
if (tmp & mask) {
BLOGE(sc, "Filtering completion timed out: "
"sp_state 0x%lx, mask 0x%lx\n",
tmp, mask);
return (FALSE);
}
return (FALSE);
}
static int
bxe_func_stop(struct bxe_softc *sc)
{
struct ecore_func_state_params func_params = { NULL };
int rc;
/* prepare parameters for function state transitions */
bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_STOP;
/*
* Try to stop the function the 'good way'. If it fails (in case
* of a parity error during bxe_chip_cleanup()) and we are
* not in a debug mode, perform a state transaction in order to
* enable further HW_RESET transaction.
*/
rc = ecore_func_state_change(sc, &func_params);
if (rc) {
BLOGE(sc, "FUNC_STOP ramrod failed. "
"Running a dry transaction (%d)\n", rc);
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
return (ecore_func_state_change(sc, &func_params));
}
return (0);
}
static int
bxe_reset_hw(struct bxe_softc *sc,
uint32_t load_code)
{
struct ecore_func_state_params func_params = { NULL };
/* Prepare parameters for function state transitions */
bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_HW_RESET;
func_params.params.hw_init.load_phase = load_code;
return (ecore_func_state_change(sc, &func_params));
}
static void
bxe_int_disable_sync(struct bxe_softc *sc,
int disable_hw)
{
if (disable_hw) {
/* prevent the HW from sending interrupts */
bxe_int_disable(sc);
}
/* XXX need a way to synchronize ALL irqs (intr_mtx?) */
/* make sure all ISRs are done */
/* XXX make sure sp_task is not running */
/* cancel and flush work queues */
}
static void
bxe_chip_cleanup(struct bxe_softc *sc,
uint32_t unload_mode,
uint8_t keep_link)
{
int port = SC_PORT(sc);
struct ecore_mcast_ramrod_params rparam = { NULL };
uint32_t reset_code;
int i, rc = 0;
bxe_drain_tx_queues(sc);
/* give HW time to discard old tx messages */
DELAY(1000);
/* Clean all ETH MACs */
rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
if (rc < 0) {
BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
}
/* Clean up UC list */
rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
if (rc < 0) {
BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
}
/* Disable LLH */
if (!CHIP_IS_E1(sc)) {
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
}
/* Set "drop all" to stop Rx */
/*
* We need to take the BXE_MCAST_LOCK() here in order to prevent
* a race between the completion code and this code.
*/
BXE_MCAST_LOCK(sc);
if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
} else {
bxe_set_storm_rx_mode(sc);
}
/* Clean up multicast configuration */
rparam.mcast_obj = &sc->mcast_obj;
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
if (rc < 0) {
BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
}
BXE_MCAST_UNLOCK(sc);
// XXX bxe_iov_chip_cleanup(sc);
/*
* Send the UNLOAD_REQUEST to the MCP. This will return if
* this function should perform FUNCTION, PORT, or COMMON HW
* reset.
*/
reset_code = bxe_send_unload_req(sc, unload_mode);
/*
* (assumption: No Attention from MCP at this stage)
* PMF probably in the middle of TX disable/enable transaction
*/
rc = bxe_func_wait_started(sc);
if (rc) {
BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
}
/*
* Close multi and leading connections
* Completions for ramrods are collected in a synchronous way
*/
for (i = 0; i < sc->num_queues; i++) {
if (bxe_stop_queue(sc, i)) {
goto unload_error;
}
}
/*
* If SP settings didn't get completed so far - something
* very wrong has happen.
*/
if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
}
unload_error:
rc = bxe_func_stop(sc);
if (rc) {
BLOGE(sc, "Function stop failed!(%d)\n", rc);
}
/* disable HW interrupts */
bxe_int_disable_sync(sc, TRUE);
/* detach interrupts */
bxe_interrupt_detach(sc);
/* Reset the chip */
rc = bxe_reset_hw(sc, reset_code);
if (rc) {
BLOGE(sc, "Hardware reset failed(%d)\n", rc);
}
/* Report UNLOAD_DONE to MCP */
bxe_send_unload_done(sc, keep_link);
}
static void
bxe_disable_close_the_gate(struct bxe_softc *sc)
{
uint32_t val;
int port = SC_PORT(sc);
BLOGD(sc, DBG_LOAD,
"Disabling 'close the gates'\n");
if (CHIP_IS_E1(sc)) {
uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
val = REG_RD(sc, addr);
val &= ~(0x300);
REG_WR(sc, addr, val);
} else {
val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
}
}
/*
* Cleans the object that have internal lists without sending
* ramrods. Should be run when interrupts are disabled.
*/
static void
bxe_squeeze_objects(struct bxe_softc *sc)
{
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
struct ecore_mcast_ramrod_params rparam = { NULL };
struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
int rc;
/* Cleanup MACs' object first... */
/* Wait for completion of requested */
bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Perform a dry cleanup */
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
/* Clean ETH primary MAC */
bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0) {
BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
}
/* Cleanup UC list */
vlan_mac_flags = 0;
bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0) {
BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
}
/* Now clean mcast object... */
rparam.mcast_obj = &sc->mcast_obj;
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
/* Add a DEL command... */
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
if (rc < 0) {
BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
}
/* now wait until all pending commands are cleared */
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
while (rc != 0) {
if (rc < 0) {
BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
return;
}
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
}
}
/* stop the controller */
static __noinline int
bxe_nic_unload(struct bxe_softc *sc,
uint32_t unload_mode,
uint8_t keep_link)
{
uint8_t global = FALSE;
uint32_t val;
int i;
BXE_CORE_LOCK_ASSERT(sc);
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
for (i = 0; i < sc->num_queues; i++) {
struct bxe_fastpath *fp;
fp = &sc->fp[i];
fp->watchdog_timer = 0;
BXE_FP_TX_LOCK(fp);
BXE_FP_TX_UNLOCK(fp);
}
BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
/* mark driver as unloaded in shmem2 */
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
}
if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
(sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
/*
* We can get here if the driver has been unloaded
* during parity error recovery and is either waiting for a
* leader to complete or for other functions to unload and
* then ifconfig down has been issued. In this case we want to
* unload and let other functions to complete a recovery
* process.
*/
sc->recovery_state = BXE_RECOVERY_DONE;
sc->is_leader = 0;
bxe_release_leader_lock(sc);
mb();
BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
}
BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
" state = 0x%x\n", sc->recovery_state, sc->state);
return (-1);
}
/*
* Nothing to do during unload if previous bxe_nic_load()
* did not completed successfully - all resourses are released.
*/
if ((sc->state == BXE_STATE_CLOSED) ||
(sc->state == BXE_STATE_ERROR)) {
return (0);
}
sc->state = BXE_STATE_CLOSING_WAITING_HALT;
mb();
/* stop tx */
bxe_tx_disable(sc);
sc->rx_mode = BXE_RX_MODE_NONE;
/* XXX set rx mode ??? */
if (IS_PF(sc) && !sc->grcdump_done) {
/* set ALWAYS_ALIVE bit in shmem */
sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
bxe_drv_pulse(sc);
bxe_stats_handle(sc, STATS_EVENT_STOP);
bxe_save_statistics(sc);
}
/* wait till consumers catch up with producers in all queues */
bxe_drain_tx_queues(sc);
/* if VF indicate to PF this function is going down (PF will delete sp
* elements and clear initializations
*/
if (IS_VF(sc)) {
; /* bxe_vfpf_close_vf(sc); */
} else if (unload_mode != UNLOAD_RECOVERY) {
/* if this is a normal/close unload need to clean up chip */
if (!sc->grcdump_done)
bxe_chip_cleanup(sc, unload_mode, keep_link);
} else {
/* Send the UNLOAD_REQUEST to the MCP */
bxe_send_unload_req(sc, unload_mode);
/*
* Prevent transactions to host from the functions on the
* engine that doesn't reset global blocks in case of global
* attention once gloabl blocks are reset and gates are opened
* (the engine which leader will perform the recovery
* last).
*/
if (!CHIP_IS_E1x(sc)) {
bxe_pf_disable(sc);
}
/* disable HW interrupts */
bxe_int_disable_sync(sc, TRUE);
/* detach interrupts */
bxe_interrupt_detach(sc);
/* Report UNLOAD_DONE to MCP */
bxe_send_unload_done(sc, FALSE);
}
/*
* At this stage no more interrupts will arrive so we may safely clean
* the queue'able objects here in case they failed to get cleaned so far.
*/
if (IS_PF(sc)) {
bxe_squeeze_objects(sc);
}
/* There should be no more pending SP commands at this stage */
sc->sp_state = 0;
sc->port.pmf = 0;
bxe_free_fp_buffers(sc);
if (IS_PF(sc)) {
bxe_free_mem(sc);
}
bxe_free_fw_stats_mem(sc);
sc->state = BXE_STATE_CLOSED;
/*
* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
bxe_set_reset_in_progress(sc);
/* Set RESET_IS_GLOBAL if needed */
if (global) {
bxe_set_reset_global(sc);
}
}
/*
* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
bxe_reset_is_done(sc, SC_PATH(sc))) {
bxe_disable_close_the_gate(sc);
}
BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
bxe_link_report(sc);
return (0);
}
/*
* Called by the OS to set various media options (i.e. link, speed, etc.) when
* the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
*/
static int
bxe_ifmedia_update(if_t ifp)
{
struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
struct ifmedia *ifm;
ifm = &sc->ifmedia;
/* We only support Ethernet media type. */
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
return (EINVAL);
}
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
break;
case IFM_10G_CX4:
case IFM_10G_SR:
case IFM_10G_T:
case IFM_10G_TWINAX:
default:
/* We don't support changing the media type. */
BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
IFM_SUBTYPE(ifm->ifm_media));
return (EINVAL);
}
return (0);
}
/*
* Called by the OS to get the current media status (i.e. link, speed, etc.).
*/
static void
bxe_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
{
struct bxe_softc *sc = if_getsoftc(ifp);
/* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
line if the IFM_AVALID flag is *NOT* set. So we need to set this
flag unconditionally (irrespective of the admininistrative
'up/down' state of the interface) to ensure that the line is always
displayed.
*/
ifmr->ifm_status = IFM_AVALID;
/* Setup the default interface info. */
ifmr->ifm_active = IFM_ETHER;
/* Report link down if the driver isn't running. */
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
ifmr->ifm_active |= IFM_NONE;
BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
__func__, sc->link_vars.link_up);
return;
}
if (sc->link_vars.link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= IFM_FDX;
} else {
ifmr->ifm_active |= IFM_NONE;
BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
__func__);
return;
}
ifmr->ifm_active |= sc->media;
return;
}
static void
bxe_handle_chip_tq(void *context,
int pending)
{
struct bxe_softc *sc = (struct bxe_softc *)context;
long work = atomic_load_acq_long(&sc->chip_tq_flags);
switch (work)
{
case CHIP_TQ_REINIT:
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
/* restart the interface */
BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
bxe_periodic_stop(sc);
BXE_CORE_LOCK(sc);
bxe_stop_locked(sc);
bxe_init_locked(sc);
BXE_CORE_UNLOCK(sc);
}
break;
default:
break;
}
}
/*
* Handles any IOCTL calls from the operating system.
*
* Returns:
* 0 = Success, >0 Failure
*/
static int
bxe_ioctl(if_t ifp,
u_long command,
caddr_t data)
{
struct bxe_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int mask = 0;
int reinit = 0;
int error = 0;
int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
switch (command)
{
case SIOCSIFMTU:
BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
ifr->ifr_mtu);
if (sc->mtu == ifr->ifr_mtu) {
/* nothing to change */
break;
}
if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
ifr->ifr_mtu, mtu_min, mtu_max);
error = EINVAL;
break;
}
atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
(unsigned long)ifr->ifr_mtu);
/*
atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
(unsigned long)ifr->ifr_mtu);
XXX - Not sure why it needs to be atomic
*/
if_setmtu(ifp, ifr->ifr_mtu);
reinit = 1;
break;
case SIOCSIFFLAGS:
/* toggle the interface state up or down */
BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
BXE_CORE_LOCK(sc);
/* check if the interface is up */
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* set the receive mode flags */
bxe_set_rx_mode(sc);
} else if(sc->state != BXE_STATE_DISABLED) {
bxe_init_locked(sc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
bxe_periodic_stop(sc);
bxe_stop_locked(sc);
}
}
BXE_CORE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/* add/delete multicast addresses */
BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
/* check if the interface is up */
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* set the receive mode flags */
BXE_CORE_LOCK(sc);
bxe_set_rx_mode(sc);
BXE_CORE_UNLOCK(sc);
}
break;
case SIOCSIFCAP:
/* find out which capabilities have changed */
mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
mask);
/* toggle the LRO capabilites enable flag */
if (mask & IFCAP_LRO) {
if_togglecapenable(ifp, IFCAP_LRO);
BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
(if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
reinit = 1;
}
/* toggle the TXCSUM checksum capabilites enable flag */
if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
(if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
if_sethwassistbits(ifp, (CSUM_IP |
CSUM_TCP |
CSUM_UDP |
CSUM_TSO |
CSUM_TCP_IPV6 |
CSUM_UDP_IPV6), 0);
} else {
if_clearhwassist(ifp); /* XXX */
}
}
/* toggle the RXCSUM checksum capabilities enable flag */
if (mask & IFCAP_RXCSUM) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
(if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
if_sethwassistbits(ifp, (CSUM_IP |
CSUM_TCP |
CSUM_UDP |
CSUM_TSO |
CSUM_TCP_IPV6 |
CSUM_UDP_IPV6), 0);
} else {
if_clearhwassist(ifp); /* XXX */
}
}
/* toggle TSO4 capabilities enabled flag */
if (mask & IFCAP_TSO4) {
if_togglecapenable(ifp, IFCAP_TSO4);
BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
(if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
}
/* toggle TSO6 capabilities enabled flag */
if (mask & IFCAP_TSO6) {
if_togglecapenable(ifp, IFCAP_TSO6);
BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
(if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
}
/* toggle VLAN_HWTSO capabilities enabled flag */
if (mask & IFCAP_VLAN_HWTSO) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
(if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
}
/* toggle VLAN_HWCSUM capabilities enabled flag */
if (mask & IFCAP_VLAN_HWCSUM) {
/* XXX investigate this... */
BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
error = EINVAL;
}
/* toggle VLAN_MTU capabilities enable flag */
if (mask & IFCAP_VLAN_MTU) {
/* XXX investigate this... */
BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
error = EINVAL;
}
/* toggle VLAN_HWTAGGING capabilities enabled flag */
if (mask & IFCAP_VLAN_HWTAGGING) {
/* XXX investigate this... */
BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
error = EINVAL;
}
/* toggle VLAN_HWFILTER capabilities enabled flag */
if (mask & IFCAP_VLAN_HWFILTER) {
/* XXX investigate this... */
BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
error = EINVAL;
}
/* XXX not yet...
* IFCAP_WOL_MAGIC
*/
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
/* set/get interface media */
BLOGD(sc, DBG_IOCTL,
"Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
(command & 0xff));
error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
break;
default:
BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
(command & 0xff));
error = ether_ioctl(ifp, command, data);
break;
}
if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
BLOGD(sc, DBG_LOAD | DBG_IOCTL,
"Re-initializing hardware from IOCTL change\n");
bxe_periodic_stop(sc);
BXE_CORE_LOCK(sc);
bxe_stop_locked(sc);
bxe_init_locked(sc);
BXE_CORE_UNLOCK(sc);
}
return (error);
}
static __noinline void
bxe_dump_mbuf(struct bxe_softc *sc,
struct mbuf *m,
uint8_t contents)
{
char * type;
int i = 0;
if (!(sc->debug & DBG_MBUF)) {
return;
}
if (m == NULL) {
BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
return;
}
while (m) {
BLOGD(sc, DBG_MBUF,
"%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
if (m->m_flags & M_PKTHDR) {
BLOGD(sc, DBG_MBUF,
"%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
(int)m->m_pkthdr.csum_flags, CSUM_BITS);
}
if (m->m_flags & M_EXT) {
switch (m->m_ext.ext_type) {
case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
case EXT_SFBUF: type = "EXT_SFBUF"; break;
case EXT_JUMBOP: type = "EXT_JUMBOP"; break;
case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
case EXT_PACKET: type = "EXT_PACKET"; break;
case EXT_MBUF: type = "EXT_MBUF"; break;
case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
case EXT_EXTREF: type = "EXT_EXTREF"; break;
default: type = "UNKNOWN"; break;
}
BLOGD(sc, DBG_MBUF,
"%02d: - m_ext: %p ext_size=%d type=%s\n",
i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
}
if (contents) {
bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
}
m = m->m_next;
i++;
}
}
/*
* Checks to ensure the 13 bd sliding window is >= MSS for TSO.
* Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
* The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
* The headers comes in a separate bd in FreeBSD so 13-3=10.
* Returns: 0 if OK to send, 1 if packet needs further defragmentation
*/
static int
bxe_chktso_window(struct bxe_softc *sc,
int nsegs,
bus_dma_segment_t *segs,
struct mbuf *m)
{
uint32_t num_wnds, wnd_size, wnd_sum;
int32_t frag_idx, wnd_idx;
unsigned short lso_mss;
wnd_sum = 0;
wnd_size = 10;
num_wnds = nsegs - wnd_size;
lso_mss = htole16(m->m_pkthdr.tso_segsz);
/*
* Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
* first window sum of data while skipping the first assuming it is the
* header in FreeBSD.
*/
for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
wnd_sum += htole16(segs[frag_idx].ds_len);
}
/* check the first 10 bd window size */
if (wnd_sum < lso_mss) {
return (1);
}
/* run through the windows */
for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
/* subtract the first mbuf->m_len of the last wndw(-header) */
wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
/* add the next mbuf len to the len of our new window */
wnd_sum += htole16(segs[frag_idx].ds_len);
if (wnd_sum < lso_mss) {
return (1);
}
}
return (0);
}
static uint8_t
bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
struct mbuf *m,
uint32_t *parsing_data)
{
struct ether_vlan_header *eh = NULL;
struct ip *ip4 = NULL;
struct ip6_hdr *ip6 = NULL;
caddr_t ip = NULL;
struct tcphdr *th = NULL;
int e_hlen, ip_hlen, l4_off;
uint16_t proto;
if (m->m_pkthdr.csum_flags == CSUM_IP) {
/* no L4 checksum offload needed */
return (0);
}
/* get the Ethernet header */
eh = mtod(m, struct ether_vlan_header *);
/* handle VLAN encapsulation if present */
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
proto = ntohs(eh->evl_proto);
} else {
e_hlen = ETHER_HDR_LEN;
proto = ntohs(eh->evl_encap_proto);
}
switch (proto) {
case ETHERTYPE_IP:
/* get the IP header, if mbuf len < 20 then header in next mbuf */
ip4 = (m->m_len < sizeof(struct ip)) ?
(struct ip *)m->m_next->m_data :
(struct ip *)(m->m_data + e_hlen);
/* ip_hl is number of 32-bit words */
ip_hlen = (ip4->ip_hl << 2);
ip = (caddr_t)ip4;
break;
case ETHERTYPE_IPV6:
/* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
(struct ip6_hdr *)m->m_next->m_data :
(struct ip6_hdr *)(m->m_data + e_hlen);
/* XXX cannot support offload with IPv6 extensions */
ip_hlen = sizeof(struct ip6_hdr);
ip = (caddr_t)ip6;
break;
default:
/* We can't offload in this case... */
/* XXX error stat ??? */
return (0);
}
/* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
l4_off = (e_hlen + ip_hlen);
*parsing_data |=
(((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
if (m->m_pkthdr.csum_flags & (CSUM_TCP |
CSUM_TSO |
CSUM_TCP_IPV6)) {
fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
th = (struct tcphdr *)(ip + ip_hlen);
/* th_off is number of 32-bit words */
*parsing_data |= ((th->th_off <<
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
return (l4_off + (th->th_off << 2)); /* entire header length */
} else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
CSUM_UDP_IPV6)) {
fp->eth_q_stats.tx_ofld_frames_csum_udp++;
return (l4_off + sizeof(struct udphdr)); /* entire header length */
} else {
/* XXX error stat ??? */
return (0);
}
}
static uint8_t
bxe_set_pbd_csum(struct bxe_fastpath *fp,
struct mbuf *m,
struct eth_tx_parse_bd_e1x *pbd)
{
struct ether_vlan_header *eh = NULL;
struct ip *ip4 = NULL;
struct ip6_hdr *ip6 = NULL;
caddr_t ip = NULL;
struct tcphdr *th = NULL;
struct udphdr *uh = NULL;
int e_hlen, ip_hlen;
uint16_t proto;
uint8_t hlen;
uint16_t tmp_csum;
uint32_t *tmp_uh;
/* get the Ethernet header */
eh = mtod(m, struct ether_vlan_header *);
/* handle VLAN encapsulation if present */
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
proto = ntohs(eh->evl_proto);
} else {
e_hlen = ETHER_HDR_LEN;
proto = ntohs(eh->evl_encap_proto);
}
switch (proto) {
case ETHERTYPE_IP:
/* get the IP header, if mbuf len < 20 then header in next mbuf */
ip4 = (m->m_len < sizeof(struct ip)) ?
(struct ip *)m->m_next->m_data :
(struct ip *)(m->m_data + e_hlen);
/* ip_hl is number of 32-bit words */
ip_hlen = (ip4->ip_hl << 1);
ip = (caddr_t)ip4;
break;
case ETHERTYPE_IPV6:
/* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
(struct ip6_hdr *)m->m_next->m_data :
(struct ip6_hdr *)(m->m_data + e_hlen);
/* XXX cannot support offload with IPv6 extensions */
ip_hlen = (sizeof(struct ip6_hdr) >> 1);
ip = (caddr_t)ip6;
break;
default:
/* We can't offload in this case... */
/* XXX error stat ??? */
return (0);
}
hlen = (e_hlen >> 1);
/* note that rest of global_data is indirectly zeroed here */
if (m->m_flags & M_VLANTAG) {
pbd->global_data =
htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
} else {
pbd->global_data = htole16(hlen);
}
pbd->ip_hlen_w = ip_hlen;
hlen += pbd->ip_hlen_w;
/* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
if (m->m_pkthdr.csum_flags & (CSUM_TCP |
CSUM_TSO |
CSUM_TCP_IPV6)) {
th = (struct tcphdr *)(ip + (ip_hlen << 1));
/* th_off is number of 32-bit words */
hlen += (uint16_t)(th->th_off << 1);
} else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
CSUM_UDP_IPV6)) {
uh = (struct udphdr *)(ip + (ip_hlen << 1));
hlen += (sizeof(struct udphdr) / 2);
} else {
/* valid case as only CSUM_IP was set */
return (0);
}
pbd->total_hlen_w = htole16(hlen);
if (m->m_pkthdr.csum_flags & (CSUM_TCP |
CSUM_TSO |
CSUM_TCP_IPV6)) {
fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
pbd->tcp_pseudo_csum = ntohs(th->th_sum);
} else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
CSUM_UDP_IPV6)) {
fp->eth_q_stats.tx_ofld_frames_csum_udp++;
/*
* Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
* checksums and does not know anything about the UDP header and where
* the checksum field is located. It only knows about TCP. Therefore
* we "lie" to the hardware for outgoing UDP packets w/ checksum
* offload. Since the checksum field offset for TCP is 16 bytes and
* for UDP it is 6 bytes we pass a pointer to the hardware that is 10
* bytes less than the start of the UDP header. This allows the
* hardware to write the checksum in the correct spot. But the
* hardware will compute a checksum which includes the last 10 bytes
* of the IP header. To correct this we tweak the stack computed
* pseudo checksum by folding in the calculation of the inverse
* checksum for those final 10 bytes of the IP header. This allows
* the correct checksum to be computed by the hardware.
*/
/* set pointer 10 bytes before UDP header */
tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
/* calculate a pseudo header checksum over the first 10 bytes */
tmp_csum = in_pseudo(*tmp_uh,
*(tmp_uh + 1),
*(uint16_t *)(tmp_uh + 2));
pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
}
return (hlen * 2); /* entire header length, number of bytes */
}
static void
bxe_set_pbd_lso_e2(struct mbuf *m,
uint32_t *parsing_data)
{
*parsing_data |= ((m->m_pkthdr.tso_segsz <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS);
/* XXX test for IPv6 with extension header... */
}
static void
bxe_set_pbd_lso(struct mbuf *m,
struct eth_tx_parse_bd_e1x *pbd)
{
struct ether_vlan_header *eh = NULL;
struct ip *ip = NULL;
struct tcphdr *th = NULL;
int e_hlen;
/* get the Ethernet header */
eh = mtod(m, struct ether_vlan_header *);
/* handle VLAN encapsulation if present */
e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
/* get the IP and TCP header, with LSO entire header in first mbuf */
/* XXX assuming IPv4 */
ip = (struct ip *)(m->m_data + e_hlen);
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
pbd->tcp_send_seq = ntohl(th->th_seq);
pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
#if 1
/* XXX IPv4 */
pbd->ip_id = ntohs(ip->ip_id);
pbd->tcp_pseudo_csum =
ntohs(in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr,
htons(IPPROTO_TCP)));
#else
/* XXX IPv6 */
pbd->tcp_pseudo_csum =
ntohs(in_pseudo(&ip6->ip6_src,
&ip6->ip6_dst,
htons(IPPROTO_TCP)));
#endif
pbd->global_data |=
htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
}
/*
* Encapsulte an mbuf cluster into the tx bd chain and makes the memory
* visible to the controller.
*
* If an mbuf is submitted to this routine and cannot be given to the
* controller (e.g. it has too many fragments) then the function may free
* the mbuf and return to the caller.
*
* Returns:
* 0 = Success, !0 = Failure
* Note the side effect that an mbuf may be freed if it causes a problem.
*/
static int
bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
{
bus_dma_segment_t segs[32];
struct mbuf *m0;
struct bxe_sw_tx_bd *tx_buf;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
/* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
struct eth_tx_bd *tx_data_bd;
struct eth_tx_bd *tx_total_pkt_size_bd;
struct eth_tx_start_bd *tx_start_bd;
uint16_t bd_prod, pkt_prod, total_pkt_size;
uint8_t mac_type;
int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
struct bxe_softc *sc;
uint16_t tx_bd_avail;
struct ether_vlan_header *eh;
uint32_t pbd_e2_parsing_data = 0;
uint8_t hlen = 0;
int tmp_bd;
int i;
sc = fp->sc;
M_ASSERTPKTHDR(*m_head);
m0 = *m_head;
rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
tx_start_bd = NULL;
tx_data_bd = NULL;
tx_total_pkt_size_bd = NULL;
/* get the H/W pointer for packets and BDs */
pkt_prod = fp->tx_pkt_prod;
bd_prod = fp->tx_bd_prod;
mac_type = UNICAST_ADDRESS;
/* map the mbuf into the next open DMAable memory */
tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
tx_buf->m_map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
/* mapping errors */
if(__predict_false(error != 0)) {
fp->eth_q_stats.tx_dma_mapping_failure++;
if (error == ENOMEM) {
/* resource issue, try again later */
rc = ENOMEM;
} else if (error == EFBIG) {
/* possibly recoverable with defragmentation */
fp->eth_q_stats.mbuf_defrag_attempts++;
m0 = m_defrag(*m_head, M_NOWAIT);
if (m0 == NULL) {
fp->eth_q_stats.mbuf_defrag_failures++;
rc = ENOBUFS;
} else {
/* defrag successful, try mapping again */
*m_head = m0;
error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
tx_buf->m_map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error) {
fp->eth_q_stats.tx_dma_mapping_failure++;
rc = error;
}
}
} else {
/* unknown, unrecoverable mapping error */
BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
bxe_dump_mbuf(sc, m0, FALSE);
rc = error;
}
goto bxe_tx_encap_continue;
}
tx_bd_avail = bxe_tx_avail(sc, fp);
/* make sure there is enough room in the send queue */
if (__predict_false(tx_bd_avail < (nsegs + 2))) {
/* Recoverable, try again later. */
fp->eth_q_stats.tx_hw_queue_full++;
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
rc = ENOMEM;
goto bxe_tx_encap_continue;
}
/* capture the current H/W TX chain high watermark */
if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
(TX_BD_USABLE - tx_bd_avail))) {
fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
}
/* make sure it fits in the packet window */
if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
/*
* The mbuf may be to big for the controller to handle. If the frame
* is a TSO frame we'll need to do an additional check.
*/
if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
goto bxe_tx_encap_continue; /* OK to send */
} else {
fp->eth_q_stats.tx_window_violation_tso++;
}
} else {
fp->eth_q_stats.tx_window_violation_std++;
}
/* lets try to defragment this mbuf and remap it */
fp->eth_q_stats.mbuf_defrag_attempts++;
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
m0 = m_defrag(*m_head, M_NOWAIT);
if (m0 == NULL) {
fp->eth_q_stats.mbuf_defrag_failures++;
/* Ugh, just drop the frame... :( */
rc = ENOBUFS;
} else {
/* defrag successful, try mapping again */
*m_head = m0;
error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
tx_buf->m_map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error) {
fp->eth_q_stats.tx_dma_mapping_failure++;
/* No sense in trying to defrag/copy chain, drop it. :( */
rc = error;
} else {
/* if the chain is still too long then drop it */
if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
/*
* in case TSO is enabled nsegs should be checked against
* BXE_TSO_MAX_SEGMENTS
*/
if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
fp->eth_q_stats.nsegs_path1_errors++;
rc = ENODEV;
}
} else {
if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
fp->eth_q_stats.nsegs_path2_errors++;
rc = ENODEV;
}
}
}
}
}
bxe_tx_encap_continue:
/* Check for errors */
if (rc) {
if (rc == ENOMEM) {
/* recoverable try again later */
} else {
fp->eth_q_stats.tx_soft_errors++;
fp->eth_q_stats.mbuf_alloc_tx--;
m_freem(*m_head);
*m_head = NULL;
}
return (rc);
}
/* set flag according to packet type (UNICAST_ADDRESS is default) */
if (m0->m_flags & M_BCAST) {
mac_type = BROADCAST_ADDRESS;
} else if (m0->m_flags & M_MCAST) {
mac_type = MULTICAST_ADDRESS;
}
/* store the mbuf into the mbuf ring */
tx_buf->m = m0;
tx_buf->first_bd = fp->tx_bd_prod;
tx_buf->flags = 0;
/* prepare the first transmit (start) BD for the mbuf */
tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
BLOGD(sc, DBG_TX,
"sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
tx_start_bd->nbytes = htole16(segs[0].ds_len);
total_pkt_size += tx_start_bd->nbytes;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
/* all frames have at least Start BD + Parsing BD */
nbds = nsegs + 1;
tx_start_bd->nbd = htole16(nbds);
if (m0->m_flags & M_VLANTAG) {
tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
} else {
/* vf tx, start bd must hold the ethertype for fw to enforce it */
if (IS_VF(sc)) {
/* map ethernet header to find type and header length */
eh = mtod(m0, struct ether_vlan_header *);
tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
} else {
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
}
}
/*
* add a parsing BD from the chain. The parsing BD is always added
* though it is only used for TSO and chksum
*/
bd_prod = TX_BD_NEXT(bd_prod);
if (m0->m_pkthdr.csum_flags) {
if (m0->m_pkthdr.csum_flags & CSUM_IP) {
fp->eth_q_stats.tx_ofld_frames_csum_ip++;
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
}
if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
ETH_TX_BD_FLAGS_L4_CSUM);
} else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
ETH_TX_BD_FLAGS_IS_UDP |
ETH_TX_BD_FLAGS_L4_CSUM);
} else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
(m0->m_pkthdr.csum_flags & CSUM_TSO)) {
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
} else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
ETH_TX_BD_FLAGS_IS_UDP);
}
}
if (!CHIP_IS_E1x(sc)) {
pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
if (m0->m_pkthdr.csum_flags) {
hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
}
SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
mac_type);
} else {
uint16_t global_data = 0;
pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
if (m0->m_pkthdr.csum_flags) {
hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
}
SET_FLAG(global_data,
ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
pbd_e1x->global_data |= htole16(global_data);
}
/* setup the parsing BD with TSO specific info */
if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
fp->eth_q_stats.tx_ofld_frames_lso++;
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
if (__predict_false(tx_start_bd->nbytes > hlen)) {
fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
/* split the first BD into header/data making the fw job easy */
nbds++;
tx_start_bd->nbd = htole16(nbds);
tx_start_bd->nbytes = htole16(hlen);
bd_prod = TX_BD_NEXT(bd_prod);
/* new transmit BD after the tx_parse_bd */
tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
if (tx_total_pkt_size_bd == NULL) {
tx_total_pkt_size_bd = tx_data_bd;
}
BLOGD(sc, DBG_TX,
"TSO split header size is %d (%x:%x) nbds %d\n",
le16toh(tx_start_bd->nbytes),
le32toh(tx_start_bd->addr_hi),
le32toh(tx_start_bd->addr_lo),
nbds);
}
if (!CHIP_IS_E1x(sc)) {
bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
} else {
bxe_set_pbd_lso(m0, pbd_e1x);
}
}
if (pbd_e2_parsing_data) {
pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
}
/* prepare remaining BDs, start tx bd contains first seg/frag */
for (i = 1; i < nsegs ; i++) {
bd_prod = TX_BD_NEXT(bd_prod);
tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
tx_data_bd->nbytes = htole16(segs[i].ds_len);
if (tx_total_pkt_size_bd == NULL) {
tx_total_pkt_size_bd = tx_data_bd;
}
total_pkt_size += tx_data_bd->nbytes;
}
BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
if (tx_total_pkt_size_bd != NULL) {
tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
}
if (__predict_false(sc->debug & DBG_TX)) {
tmp_bd = tx_buf->first_bd;
for (i = 0; i < nbds; i++)
{
if (i == 0) {
BLOGD(sc, DBG_TX,
"TX Strt: %p bd=%d nbd=%d vlan=0x%x "
"bd_flags=0x%x hdr_nbds=%d\n",
tx_start_bd,
tmp_bd,
le16toh(tx_start_bd->nbd),
le16toh(tx_start_bd->vlan_or_ethertype),
tx_start_bd->bd_flags.as_bitfield,
(tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
} else if (i == 1) {
if (pbd_e1x) {
BLOGD(sc, DBG_TX,
"-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
"ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
"tcp_seq=%u total_hlen_w=%u\n",
pbd_e1x,
tmp_bd,
pbd_e1x->global_data,
pbd_e1x->ip_hlen_w,
pbd_e1x->ip_id,
pbd_e1x->lso_mss,
pbd_e1x->tcp_flags,
pbd_e1x->tcp_pseudo_csum,
pbd_e1x->tcp_send_seq,
le16toh(pbd_e1x->total_hlen_w));
} else { /* if (pbd_e2) */
BLOGD(sc, DBG_TX,
"-> Parse: %p bd=%d dst=%02x:%02x:%02x "
"src=%02x:%02x:%02x parsing_data=0x%x\n",
pbd_e2,
tmp_bd,
pbd_e2->data.mac_addr.dst_hi,
pbd_e2->data.mac_addr.dst_mid,
pbd_e2->data.mac_addr.dst_lo,
pbd_e2->data.mac_addr.src_hi,
pbd_e2->data.mac_addr.src_mid,
pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data);
}
}
if (i != 1) { /* skip parse db as it doesn't hold data */
tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
BLOGD(sc, DBG_TX,
"-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
tx_data_bd,
tmp_bd,
le16toh(tx_data_bd->nbytes),
le32toh(tx_data_bd->addr_hi),
le32toh(tx_data_bd->addr_lo));
}
tmp_bd = TX_BD_NEXT(tmp_bd);
}
}
BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
/* update TX BD producer index value for next TX */
bd_prod = TX_BD_NEXT(bd_prod);
/*
* If the chain of tx_bd's describing this frame is adjacent to or spans
* an eth_tx_next_bd element then we need to increment the nbds value.
*/
if (TX_BD_IDX(bd_prod) < nbds) {
nbds++;
}
/* don't allow reordering of writes for nbd and packets */
mb();
fp->tx_db.data.prod += nbds;
/* producer points to the next free tx_bd at this point */
fp->tx_pkt_prod++;
fp->tx_bd_prod = bd_prod;
DOORBELL(sc, fp->index, fp->tx_db.raw);
fp->eth_q_stats.tx_pkts++;
/* Prevent speculative reads from getting ahead of the status block. */
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
0, 0, BUS_SPACE_BARRIER_READ);
/* Prevent speculative reads from getting ahead of the doorbell. */
bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
0, 0, BUS_SPACE_BARRIER_READ);
return (0);
}
static void
bxe_tx_start_locked(struct bxe_softc *sc,
if_t ifp,
struct bxe_fastpath *fp)
{
struct mbuf *m = NULL;
int tx_count = 0;
uint16_t tx_bd_avail;
BXE_FP_TX_LOCK_ASSERT(fp);
/* keep adding entries while there are frames to send */
while (!if_sendq_empty(ifp)) {
/*
* check for any frames to send
* dequeue can still be NULL even if queue is not empty
*/
m = if_dequeue(ifp);
if (__predict_false(m == NULL)) {
break;
}
/* the mbuf now belongs to us */
fp->eth_q_stats.mbuf_alloc_tx++;
/*
* Put the frame into the transmit ring. If we don't have room,
* place the mbuf back at the head of the TX queue, set the
* OACTIVE flag, and wait for the NIC to drain the chain.
*/
if (__predict_false(bxe_tx_encap(fp, &m))) {
fp->eth_q_stats.tx_encap_failures++;
if (m != NULL) {
/* mark the TX queue as full and return the frame */
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m);
fp->eth_q_stats.mbuf_alloc_tx--;
fp->eth_q_stats.tx_queue_xoff++;
}
/* stop looking for more work */
break;
}
/* the frame was enqueued successfully */
tx_count++;
/* send a copy of the frame to any BPF listeners. */
ether_bpf_mtap_if(ifp, m);
tx_bd_avail = bxe_tx_avail(sc, fp);
/* handle any completions if we're running low */
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
/* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
bxe_txeof(sc, fp);
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
break;
}
}
}
/* all TX packets were dequeued and/or the tx ring is full */
if (tx_count > 0) {
/* reset the TX watchdog timeout timer */
fp->watchdog_timer = BXE_TX_TIMEOUT;
}
}
/* Legacy (non-RSS) dispatch routine */
static void
bxe_tx_start(if_t ifp)
{
struct bxe_softc *sc;
struct bxe_fastpath *fp;
sc = if_getsoftc(ifp);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BLOGW(sc, "Interface not running, ignoring transmit request\n");
return;
}
if (!sc->link_vars.link_up) {
BLOGW(sc, "Interface link is down, ignoring transmit request\n");
return;
}
fp = &sc->fp[0];
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
fp->eth_q_stats.tx_queue_full_return++;
return;
}
BXE_FP_TX_LOCK(fp);
bxe_tx_start_locked(sc, ifp, fp);
BXE_FP_TX_UNLOCK(fp);
}
static int
bxe_tx_mq_start_locked(struct bxe_softc *sc,
if_t ifp,
struct bxe_fastpath *fp,
struct mbuf *m)
{
struct buf_ring *tx_br = fp->tx_br;
struct mbuf *next;
int depth, rc, tx_count;
uint16_t tx_bd_avail;
rc = tx_count = 0;
BXE_FP_TX_LOCK_ASSERT(fp);
if (sc->state != BXE_STATE_OPEN) {
fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
return ENETDOWN;
}
if (!tx_br) {
BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
return (EINVAL);
}
if (m != NULL) {
rc = drbr_enqueue(ifp, tx_br, m);
if (rc != 0) {
fp->eth_q_stats.tx_soft_errors++;
goto bxe_tx_mq_start_locked_exit;
}
}
if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
fp->eth_q_stats.tx_request_link_down_failures++;
goto bxe_tx_mq_start_locked_exit;
}
/* fetch the depth of the driver queue */
depth = drbr_inuse(ifp, tx_br);
if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
}
/* keep adding entries while there are frames to send */
while ((next = drbr_peek(ifp, tx_br)) != NULL) {
/* handle any completions if we're running low */
tx_bd_avail = bxe_tx_avail(sc, fp);
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
/* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
bxe_txeof(sc, fp);
tx_bd_avail = bxe_tx_avail(sc, fp);
if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
fp->eth_q_stats.bd_avail_too_less_failures++;
m_freem(next);
drbr_advance(ifp, tx_br);
rc = ENOBUFS;
break;
}
}
/* the mbuf now belongs to us */
fp->eth_q_stats.mbuf_alloc_tx++;
/*
* Put the frame into the transmit ring. If we don't have room,
* place the mbuf back at the head of the TX queue, set the
* OACTIVE flag, and wait for the NIC to drain the chain.
*/
rc = bxe_tx_encap(fp, &next);
if (__predict_false(rc != 0)) {
fp->eth_q_stats.tx_encap_failures++;
if (next != NULL) {
/* mark the TX queue as full and save the frame */
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
drbr_putback(ifp, tx_br, next);
fp->eth_q_stats.mbuf_alloc_tx--;
fp->eth_q_stats.tx_frames_deferred++;
} else
drbr_advance(ifp, tx_br);
/* stop looking for more work */
break;
}
/* the transmit frame was enqueued successfully */
tx_count++;
/* send a copy of the frame to any BPF listeners */
ether_bpf_mtap_if(ifp, next);
drbr_advance(ifp, tx_br);
}
/* all TX packets were dequeued and/or the tx ring is full */
if (tx_count > 0) {
/* reset the TX watchdog timeout timer */
fp->watchdog_timer = BXE_TX_TIMEOUT;
}
bxe_tx_mq_start_locked_exit:
/* If we didn't drain the drbr, enqueue a task in the future to do it. */
if (!drbr_empty(ifp, tx_br)) {
fp->eth_q_stats.tx_mq_not_empty++;
taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
}
return (rc);
}
static void
bxe_tx_mq_start_deferred(void *arg,
int pending)
{
struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
struct bxe_softc *sc = fp->sc;
if_t ifp = sc->ifp;
BXE_FP_TX_LOCK(fp);
bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
BXE_FP_TX_UNLOCK(fp);
}
/* Multiqueue (TSS) dispatch routine. */
static int
bxe_tx_mq_start(if_t ifp,
struct mbuf *m)
{
struct bxe_softc *sc = if_getsoftc(ifp);
struct bxe_fastpath *fp;
int fp_index, rc;
fp_index = 0; /* default is the first queue */
/* check if flowid is set */
if (BXE_VALID_FLOWID(m))
fp_index = (m->m_pkthdr.flowid % sc->num_queues);
fp = &sc->fp[fp_index];
if (sc->state != BXE_STATE_OPEN) {
fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
return ENETDOWN;
}
if (BXE_FP_TX_TRYLOCK(fp)) {
rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
BXE_FP_TX_UNLOCK(fp);
} else {
rc = drbr_enqueue(ifp, fp->tx_br, m);
taskqueue_enqueue(fp->tq, &fp->tx_task);
}
return (rc);
}
static void
bxe_mq_flush(if_t ifp)
{
struct bxe_softc *sc = if_getsoftc(ifp);
struct bxe_fastpath *fp;
struct mbuf *m;
int i;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
if (fp->state != BXE_FP_STATE_IRQ) {
BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
fp->index, fp->state);
continue;
}
if (fp->tx_br != NULL) {
BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
BXE_FP_TX_LOCK(fp);
while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
m_freem(m);
}
BXE_FP_TX_UNLOCK(fp);
}
}
if_qflush(ifp);
}
static uint16_t
bxe_cid_ilt_lines(struct bxe_softc *sc)
{
if (IS_SRIOV(sc)) {
return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
}
return (L2_ILT_LINES(sc));
}
static void
bxe_ilt_set_info(struct bxe_softc *sc)
{
struct ilt_client_info *ilt_client;
struct ecore_ilt *ilt = sc->ilt;
uint16_t line = 0;
ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
/* CDU */
ilt_client = &ilt->clients[ILT_CLIENT_CDU];
ilt_client->client_num = ILT_CLIENT_CDU;
ilt_client->page_size = CDU_ILT_PAGE_SZ;
ilt_client->flags = ILT_CLIENT_SKIP_MEM;
ilt_client->start = line;
line += bxe_cid_ilt_lines(sc);
if (CNIC_SUPPORT(sc)) {
line += CNIC_ILT_LINES;
}
ilt_client->end = (line - 1);
BLOGD(sc, DBG_LOAD,
"ilt client[CDU]: start %d, end %d, "
"psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start, ilt_client->end,
ilt_client->page_size,
ilt_client->flags,
ilog2(ilt_client->page_size >> 12));
/* QM */
if (QM_INIT(sc->qm_cid_count)) {
ilt_client = &ilt->clients[ILT_CLIENT_QM];
ilt_client->client_num = ILT_CLIENT_QM;
ilt_client->page_size = QM_ILT_PAGE_SZ;
ilt_client->flags = 0;
ilt_client->start = line;
/* 4 bytes for each cid */
line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
QM_ILT_PAGE_SZ);
ilt_client->end = (line - 1);
BLOGD(sc, DBG_LOAD,
"ilt client[QM]: start %d, end %d, "
"psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start, ilt_client->end,
ilt_client->page_size, ilt_client->flags,
ilog2(ilt_client->page_size >> 12));
}
if (CNIC_SUPPORT(sc)) {
/* SRC */
ilt_client = &ilt->clients[ILT_CLIENT_SRC];
ilt_client->client_num = ILT_CLIENT_SRC;
ilt_client->page_size = SRC_ILT_PAGE_SZ;
ilt_client->flags = 0;
ilt_client->start = line;
line += SRC_ILT_LINES;
ilt_client->end = (line - 1);
BLOGD(sc, DBG_LOAD,
"ilt client[SRC]: start %d, end %d, "
"psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start, ilt_client->end,
ilt_client->page_size, ilt_client->flags,
ilog2(ilt_client->page_size >> 12));
/* TM */
ilt_client = &ilt->clients[ILT_CLIENT_TM];
ilt_client->client_num = ILT_CLIENT_TM;
ilt_client->page_size = TM_ILT_PAGE_SZ;
ilt_client->flags = 0;
ilt_client->start = line;
line += TM_ILT_LINES;
ilt_client->end = (line - 1);
BLOGD(sc, DBG_LOAD,
"ilt client[TM]: start %d, end %d, "
"psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start, ilt_client->end,
ilt_client->page_size, ilt_client->flags,
ilog2(ilt_client->page_size >> 12));
}
KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
}
static void
bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
{
int i;
uint32_t rx_buf_size;
rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
for (i = 0; i < sc->num_queues; i++) {
if(rx_buf_size <= MCLBYTES){
sc->fp[i].rx_buf_size = rx_buf_size;
sc->fp[i].mbuf_alloc_size = MCLBYTES;
}else if (rx_buf_size <= MJUMPAGESIZE){
sc->fp[i].rx_buf_size = rx_buf_size;
sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
}else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
sc->fp[i].rx_buf_size = MCLBYTES;
sc->fp[i].mbuf_alloc_size = MCLBYTES;
}else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
sc->fp[i].rx_buf_size = MJUMPAGESIZE;
sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
}else {
sc->fp[i].rx_buf_size = MCLBYTES;
sc->fp[i].mbuf_alloc_size = MCLBYTES;
}
}
}
static int
bxe_alloc_ilt_mem(struct bxe_softc *sc)
{
int rc = 0;
if ((sc->ilt =
(struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
M_BXE_ILT,
(M_NOWAIT | M_ZERO))) == NULL) {
rc = 1;
}
return (rc);
}
static int
bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
{
int rc = 0;
if ((sc->ilt->lines =
(struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
M_BXE_ILT,
(M_NOWAIT | M_ZERO))) == NULL) {
rc = 1;
}
return (rc);
}
static void
bxe_free_ilt_mem(struct bxe_softc *sc)
{
if (sc->ilt != NULL) {
free(sc->ilt, M_BXE_ILT);
sc->ilt = NULL;
}
}
static void
bxe_free_ilt_lines_mem(struct bxe_softc *sc)
{
if (sc->ilt->lines != NULL) {
free(sc->ilt->lines, M_BXE_ILT);
sc->ilt->lines = NULL;
}
}
static void
bxe_free_mem(struct bxe_softc *sc)
{
int i;
for (i = 0; i < L2_ILT_LINES(sc); i++) {
bxe_dma_free(sc, &sc->context[i].vcxt_dma);
sc->context[i].vcxt = NULL;
sc->context[i].size = 0;
}
ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
bxe_free_ilt_lines_mem(sc);
}
static int
bxe_alloc_mem(struct bxe_softc *sc)
{
int context_size;
int allocated;
int i;
/*
* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
* functions because CDU differs in few aspects:
* 1. There can be multiple entities allocating memory for context -
* regular L2, CNIC, and SRIOV drivers. Each separately controls
* its own ILT lines.
* 2. Since CDU page-size is not a single 4KB page (which is the case
* for the other ILT clients), to be efficient we want to support
* allocation of sub-page-size in the last entry.
* 3. Context pointers are used by the driver to pass to FW / update
* the context (for the other ILT clients the pointers are used just to
* free the memory during unload).
*/
context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
for (i = 0, allocated = 0; allocated < context_size; i++) {
sc->context[i].size = min(CDU_ILT_PAGE_SZ,
(context_size - allocated));
if (bxe_dma_alloc(sc, sc->context[i].size,
&sc->context[i].vcxt_dma,
"cdu context") != 0) {
bxe_free_mem(sc);
return (-1);
}
sc->context[i].vcxt =
(union cdu_context *)sc->context[i].vcxt_dma.vaddr;
allocated += sc->context[i].size;
}
bxe_alloc_ilt_lines_mem(sc);
BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
sc->ilt, sc->ilt->start_line, sc->ilt->lines);
{
for (i = 0; i < 4; i++) {
BLOGD(sc, DBG_LOAD,
"c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
i,
sc->ilt->clients[i].page_size,
sc->ilt->clients[i].start,
sc->ilt->clients[i].end,
sc->ilt->clients[i].client_num,
sc->ilt->clients[i].flags);
}
}
if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
bxe_free_mem(sc);
return (-1);
}
return (0);
}
static void
bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
{
int i;
if (fp->rx_mbuf_tag == NULL) {
return;
}
/* free all mbufs and unload all maps */
for (i = 0; i < RX_BD_TOTAL; i++) {
if (fp->rx_mbuf_chain[i].m_map != NULL) {
bus_dmamap_sync(fp->rx_mbuf_tag,
fp->rx_mbuf_chain[i].m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_mbuf_tag,
fp->rx_mbuf_chain[i].m_map);
}
if (fp->rx_mbuf_chain[i].m != NULL) {
m_freem(fp->rx_mbuf_chain[i].m);
fp->rx_mbuf_chain[i].m = NULL;
fp->eth_q_stats.mbuf_alloc_rx--;
}
}
}
static void
bxe_free_tpa_pool(struct bxe_fastpath *fp)
{
struct bxe_softc *sc;
int i, max_agg_queues;
sc = fp->sc;
if (fp->rx_mbuf_tag == NULL) {
return;
}
max_agg_queues = MAX_AGG_QS(sc);
/* release all mbufs and unload all DMA maps in the TPA pool */
for (i = 0; i < max_agg_queues; i++) {
if (fp->rx_tpa_info[i].bd.m_map != NULL) {
bus_dmamap_sync(fp->rx_mbuf_tag,
fp->rx_tpa_info[i].bd.m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_mbuf_tag,
fp->rx_tpa_info[i].bd.m_map);
}
if (fp->rx_tpa_info[i].bd.m != NULL) {
m_freem(fp->rx_tpa_info[i].bd.m);
fp->rx_tpa_info[i].bd.m = NULL;
fp->eth_q_stats.mbuf_alloc_tpa--;
}
}
}
static void
bxe_free_sge_chain(struct bxe_fastpath *fp)
{
int i;
if (fp->rx_sge_mbuf_tag == NULL) {
return;
}
/* rree all mbufs and unload all maps */
for (i = 0; i < RX_SGE_TOTAL; i++) {
if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
bus_dmamap_sync(fp->rx_sge_mbuf_tag,
fp->rx_sge_mbuf_chain[i].m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_sge_mbuf_tag,
fp->rx_sge_mbuf_chain[i].m_map);
}
if (fp->rx_sge_mbuf_chain[i].m != NULL) {
m_freem(fp->rx_sge_mbuf_chain[i].m);
fp->rx_sge_mbuf_chain[i].m = NULL;
fp->eth_q_stats.mbuf_alloc_sge--;
}
}
}
static void
bxe_free_fp_buffers(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
if (fp->tx_br != NULL) {
/* just in case bxe_mq_flush() wasn't called */
if (mtx_initialized(&fp->tx_mtx)) {
struct mbuf *m;
BXE_FP_TX_LOCK(fp);
while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
m_freem(m);
BXE_FP_TX_UNLOCK(fp);
}
}
/* free all RX buffers */
bxe_free_rx_bd_chain(fp);
bxe_free_tpa_pool(fp);
bxe_free_sge_chain(fp);
if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
fp->eth_q_stats.mbuf_alloc_rx);
}
if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
fp->eth_q_stats.mbuf_alloc_sge);
}
if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
fp->eth_q_stats.mbuf_alloc_tpa);
}
if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
BLOGE(sc, "failed to release tx mbufs (%d left)\n",
fp->eth_q_stats.mbuf_alloc_tx);
}
/* XXX verify all mbufs were reclaimed */
}
}
static int
bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
uint16_t prev_index,
uint16_t index)
{
struct bxe_sw_rx_bd *rx_buf;
struct eth_rx_bd *rx_bd;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
struct mbuf *m;
int nsegs, rc;
rc = 0;
/* allocate the new RX BD mbuf */
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
if (__predict_false(m == NULL)) {
fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
return (ENOBUFS);
}
fp->eth_q_stats.mbuf_alloc_rx++;
/* initialize the mbuf buffer length */
m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
/* map the mbuf into non-paged pool */
rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
fp->rx_mbuf_spare_map,
m, segs, &nsegs, BUS_DMA_NOWAIT);
if (__predict_false(rc != 0)) {
fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
m_freem(m);
fp->eth_q_stats.mbuf_alloc_rx--;
return (rc);
}
/* all mbufs must map to a single segment */
KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
/* release any existing RX BD mbuf mappings */
if (prev_index != index) {
rx_buf = &fp->rx_mbuf_chain[prev_index];
if (rx_buf->m_map != NULL) {
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
}
/*
* We only get here from bxe_rxeof() when the maximum number
* of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
* holds the mbuf in the prev_index so it's OK to NULL it out
* here without concern of a memory leak.
*/
fp->rx_mbuf_chain[prev_index].m = NULL;
}
rx_buf = &fp->rx_mbuf_chain[index];
if (rx_buf->m_map != NULL) {
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
}
/* save the mbuf and mapping info for a future packet */
map = (prev_index != index) ?
fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
rx_buf->m_map = fp->rx_mbuf_spare_map;
fp->rx_mbuf_spare_map = map;
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
BUS_DMASYNC_PREREAD);
rx_buf->m = m;
rx_bd = &fp->rx_chain[index];
rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
return (rc);
}
static int
bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
int queue)
{
struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
bus_dma_segment_t segs[1];
bus_dmamap_t map;
struct mbuf *m;
int nsegs;
int rc = 0;
/* allocate the new TPA mbuf */
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
if (__predict_false(m == NULL)) {
fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
return (ENOBUFS);
}
fp->eth_q_stats.mbuf_alloc_tpa++;
/* initialize the mbuf buffer length */
m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
/* map the mbuf into non-paged pool */
rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
fp->rx_tpa_info_mbuf_spare_map,
m, segs, &nsegs, BUS_DMA_NOWAIT);
if (__predict_false(rc != 0)) {
fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
m_free(m);
fp->eth_q_stats.mbuf_alloc_tpa--;
return (rc);
}
/* all mbufs must map to a single segment */
KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
/* release any existing TPA mbuf mapping */
if (tpa_info->bd.m_map != NULL) {
bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
}
/* save the mbuf and mapping info for the TPA mbuf */
map = tpa_info->bd.m_map;
tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
fp->rx_tpa_info_mbuf_spare_map = map;
bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
BUS_DMASYNC_PREREAD);
tpa_info->bd.m = m;
tpa_info->seg = segs[0];
return (rc);
}
/*
* Allocate an mbuf and assign it to the receive scatter gather chain. The
* caller must take care to save a copy of the existing mbuf in the SG mbuf
* chain.
*/
static int
bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
uint16_t index)
{
struct bxe_sw_rx_bd *sge_buf;
struct eth_rx_sge *sge;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
struct mbuf *m;
int nsegs;
int rc = 0;
/* allocate a new SGE mbuf */
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
if (__predict_false(m == NULL)) {
fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
return (ENOMEM);
}
fp->eth_q_stats.mbuf_alloc_sge++;
/* initialize the mbuf buffer length */
m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
/* map the SGE mbuf into non-paged pool */
rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
fp->rx_sge_mbuf_spare_map,
m, segs, &nsegs, BUS_DMA_NOWAIT);
if (__predict_false(rc != 0)) {
fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
m_freem(m);
fp->eth_q_stats.mbuf_alloc_sge--;
return (rc);
}
/* all mbufs must map to a single segment */
KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
sge_buf = &fp->rx_sge_mbuf_chain[index];
/* release any existing SGE mbuf mapping */
if (sge_buf->m_map != NULL) {
bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
}
/* save the mbuf and mapping info for a future packet */
map = sge_buf->m_map;
sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
fp->rx_sge_mbuf_spare_map = map;
bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
BUS_DMASYNC_PREREAD);
sge_buf->m = m;
sge = &fp->rx_sge_chain[index];
sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
return (rc);
}
static __noinline int
bxe_alloc_fp_buffers(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i, j, rc = 0;
int ring_prod, cqe_ring_prod;
int max_agg_queues;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
ring_prod = cqe_ring_prod = 0;
fp->rx_bd_cons = 0;
fp->rx_cq_cons = 0;
/* allocate buffers for the RX BDs in RX BD chain */
for (j = 0; j < sc->max_rx_bufs; j++) {
rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
if (rc != 0) {
BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
i, rc);
goto bxe_alloc_fp_buffers_error;
}
ring_prod = RX_BD_NEXT(ring_prod);
cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
}
fp->rx_bd_prod = ring_prod;
fp->rx_cq_prod = cqe_ring_prod;
fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
max_agg_queues = MAX_AGG_QS(sc);
fp->tpa_enable = TRUE;
/* fill the TPA pool */
for (j = 0; j < max_agg_queues; j++) {
rc = bxe_alloc_rx_tpa_mbuf(fp, j);
if (rc != 0) {
BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
i, j);
fp->tpa_enable = FALSE;
goto bxe_alloc_fp_buffers_error;
}
fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
}
if (fp->tpa_enable) {
/* fill the RX SGE chain */
ring_prod = 0;
for (j = 0; j < RX_SGE_USABLE; j++) {
rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
if (rc != 0) {
BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
i, ring_prod);
fp->tpa_enable = FALSE;
ring_prod = 0;
goto bxe_alloc_fp_buffers_error;
}
ring_prod = RX_SGE_NEXT(ring_prod);
}
fp->rx_sge_prod = ring_prod;
}
}
return (0);
bxe_alloc_fp_buffers_error:
/* unwind what was already allocated */
bxe_free_rx_bd_chain(fp);
bxe_free_tpa_pool(fp);
bxe_free_sge_chain(fp);
return (ENOBUFS);
}
static void
bxe_free_fw_stats_mem(struct bxe_softc *sc)
{
bxe_dma_free(sc, &sc->fw_stats_dma);
sc->fw_stats_num = 0;
sc->fw_stats_req_size = 0;
sc->fw_stats_req = NULL;
sc->fw_stats_req_mapping = 0;
sc->fw_stats_data_size = 0;
sc->fw_stats_data = NULL;
sc->fw_stats_data_mapping = 0;
}
static int
bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
{
uint8_t num_queue_stats;
int num_groups;
/* number of queues for statistics is number of eth queues */
num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
/*
* Total number of FW statistics requests =
* 1 for port stats + 1 for PF stats + num of queues
*/
sc->fw_stats_num = (2 + num_queue_stats);
/*
* Request is built from stats_query_header and an array of
* stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
* rules. The real number or requests is configured in the
* stats_query_header.
*/
num_groups =
((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
sc->fw_stats_num, num_groups);
sc->fw_stats_req_size =
(sizeof(struct stats_query_header) +
(num_groups * sizeof(struct stats_query_cmd_group)));
/*
* Data for statistics requests + stats_counter.
* stats_counter holds per-STORM counters that are incremented when
* STORM has finished with the current request. Memory for FCoE
* offloaded statistics are counted anyway, even if they will not be sent.
* VF stats are not accounted for here as the data of VF stats is stored
* in memory allocated by the VF, not here.
*/
sc->fw_stats_data_size =
(sizeof(struct stats_counter) +
sizeof(struct per_port_stats) +
sizeof(struct per_pf_stats) +
/* sizeof(struct fcoe_statistics_params) + */
(sizeof(struct per_queue_stats) * num_queue_stats));
if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
&sc->fw_stats_dma, "fw stats") != 0) {
bxe_free_fw_stats_mem(sc);
return (-1);
}
/* set up the shortcuts */
sc->fw_stats_req =
(struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
sc->fw_stats_data =
(struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
sc->fw_stats_req_size);
sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
sc->fw_stats_req_size);
BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
(uintmax_t)sc->fw_stats_req_mapping);
BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
(uintmax_t)sc->fw_stats_data_mapping);
return (0);
}
/*
* Bits map:
* 0-7 - Engine0 load counter.
* 8-15 - Engine1 load counter.
* 16 - Engine0 RESET_IN_PROGRESS bit.
* 17 - Engine1 RESET_IN_PROGRESS bit.
* 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
* function on the engine
* 19 - Engine1 ONE_IS_LOADED.
* 20 - Chip reset flow bit. When set none-leader must wait for both engines
* leader to complete (check for both RESET_IN_PROGRESS bits and not
* for just the one belonging to its engine).
*/
#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
#define BXE_PATH0_LOAD_CNT_SHIFT 0
#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
#define BXE_PATH1_LOAD_CNT_SHIFT 8
#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
#define BXE_GLOBAL_RESET_BIT 0x00040000
/* set the GLOBAL_RESET bit, should be run under rtnl lock */
static void
bxe_set_reset_global(struct bxe_softc *sc)
{
uint32_t val;
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
static void
bxe_clear_reset_global(struct bxe_softc *sc)
{
uint32_t val;
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
static uint8_t
bxe_reset_is_global(struct bxe_softc *sc)
{
uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
}
/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
static void
bxe_set_reset_done(struct bxe_softc *sc)
{
uint32_t val;
uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
BXE_PATH0_RST_IN_PROG_BIT;
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
/* Clear the bit */
val &= ~bit;
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
static void
bxe_set_reset_in_progress(struct bxe_softc *sc)
{
uint32_t val;
uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
BXE_PATH0_RST_IN_PROG_BIT;
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
/* Set the bit */
val |= bit;
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
static uint8_t
bxe_reset_is_done(struct bxe_softc *sc,
int engine)
{
uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
BXE_PATH0_RST_IN_PROG_BIT;
/* return false if bit is set */
return (val & bit) ? FALSE : TRUE;
}
/* get the load status for an engine, should be run under rtnl lock */
static uint8_t
bxe_get_load_status(struct bxe_softc *sc,
int engine)
{
uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
BXE_PATH0_LOAD_CNT_MASK;
uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
BXE_PATH0_LOAD_CNT_SHIFT;
uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
val = ((val & mask) >> shift);
BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
return (val != 0);
}
/* set pf load mark */
/* XXX needs to be under rtnl lock */
static void
bxe_set_pf_load(struct bxe_softc *sc)
{
uint32_t val;
uint32_t val1;
uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
BXE_PATH0_LOAD_CNT_MASK;
uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
BXE_PATH0_LOAD_CNT_SHIFT;
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
/* get the current counter value */
val1 = ((val & mask) >> shift);
/* set bit of this PF */
val1 |= (1 << SC_ABS_FUNC(sc));
/* clear the old value */
val &= ~mask;
/* set the new one */
val |= ((val1 << shift) & mask);
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/* clear pf load mark */
/* XXX needs to be under rtnl lock */
static uint8_t
bxe_clear_pf_load(struct bxe_softc *sc)
{
uint32_t val1, val;
uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
BXE_PATH0_LOAD_CNT_MASK;
uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
BXE_PATH0_LOAD_CNT_SHIFT;
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
/* get the current counter value */
val1 = (val & mask) >> shift;
/* clear bit of that PF */
val1 &= ~(1 << SC_ABS_FUNC(sc));
/* clear the old value */
val &= ~mask;
/* set the new one */
val |= ((val1 << shift) & mask);
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
return (val1 != 0);
}
/* send load requrest to mcp and analyze response */
static int
bxe_nic_load_request(struct bxe_softc *sc,
uint32_t *load_code)
{
/* init fw_seq */
sc->fw_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
/* get the current FW pulse sequence */
sc->fw_drv_pulse_wr_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
DRV_PULSE_SEQ_MASK);
BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
sc->fw_drv_pulse_wr_seq);
/* load request */
(*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
/* if the MCP fails to respond we must abort */
if (!(*load_code)) {
BLOGE(sc, "MCP response failure!\n");
return (-1);
}
/* if MCP refused then must abort */
if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
BLOGE(sc, "MCP refused load request\n");
return (-1);
}
return (0);
}
/*
* Check whether another PF has already loaded FW to chip. In virtualized
* environments a pf from anoth VM may have already initialized the device
* including loading FW.
*/
static int
bxe_nic_load_analyze_req(struct bxe_softc *sc,
uint32_t load_code)
{
uint32_t my_fw, loaded_fw;
/* is another pf loaded on this engine? */
if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
(load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
/* build my FW version dword */
my_fw = (BCM_5710_FW_MAJOR_VERSION +
(BCM_5710_FW_MINOR_VERSION << 8 ) +
(BCM_5710_FW_REVISION_VERSION << 16) +
(BCM_5710_FW_ENGINEERING_VERSION << 24));
/* read loaded FW from chip */
loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
loaded_fw, my_fw);
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
loaded_fw, my_fw);
return (-1);
}
}
return (0);
}
/* mark PMF if applicable */
static void
bxe_nic_load_pmf(struct bxe_softc *sc,
uint32_t load_code)
{
uint32_t ncsi_oem_data_addr;
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
(load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
/*
* Barrier here for ordering between the writing to sc->port.pmf here
* and reading it from the periodic task.
*/
sc->port.pmf = 1;
mb();
} else {
sc->port.pmf = 0;
}
BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
/* XXX needed? */
if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
if (ncsi_oem_data_addr) {
REG_WR(sc,
(ncsi_oem_data_addr +
offsetof(struct glob_ncsi_oem_data, driver_version)),
0);
}
}
}
}
static void
bxe_read_mf_cfg(struct bxe_softc *sc)
{
int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
int abs_func;
int vn;
if (BXE_NOMCP(sc)) {
return; /* what should be the default bvalue in this case */
}
/*
* The formula for computing the absolute function number is...
* For 2 port configuration (4 functions per port):
* abs_func = 2 * vn + SC_PORT + SC_PATH
* For 4 port configuration (2 functions per port):
* abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
*/
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
if (abs_func >= E1H_FUNC_MAX) {
break;
}
sc->devinfo.mf_info.mf_config[vn] =
MFCFG_RD(sc, func_mf_config[abs_func].config);
}
if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
FUNC_MF_CFG_FUNC_DISABLED) {
BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
sc->flags |= BXE_MF_FUNC_DIS;
} else {
BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
sc->flags &= ~BXE_MF_FUNC_DIS;
}
}
/* acquire split MCP access lock register */
static int bxe_acquire_alr(struct bxe_softc *sc)
{
uint32_t j, val;
for (j = 0; j < 1000; j++) {
val = (1UL << 31);
REG_WR(sc, GRCBASE_MCP + 0x9c, val);
val = REG_RD(sc, GRCBASE_MCP + 0x9c);
if (val & (1L << 31))
break;
DELAY(5000);
}
if (!(val & (1L << 31))) {
BLOGE(sc, "Cannot acquire MCP access lock register\n");
return (-1);
}
return (0);
}
/* release split MCP access lock register */
static void bxe_release_alr(struct bxe_softc *sc)
{
REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
}
static void
bxe_fan_failure(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
uint32_t ext_phy_config;
/* mark the failure */
ext_phy_config =
SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
ext_phy_config);
/* log the failure */
BLOGW(sc, "Fan Failure has caused the driver to shutdown "
"the card to prevent permanent damage. "
"Please contact OEM Support for assistance\n");
/* XXX */
#if 1
bxe_panic(sc, ("Schedule task to handle fan failure\n"));
#else
/*
* Schedule device reset (unload)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
schedule_delayed_work(&sc->sp_rtnl_task, 0);
#endif
}
/* this function is called upon a link interrupt */
static void
bxe_link_attn(struct bxe_softc *sc)
{
uint32_t pause_enabled = 0;
struct host_port_stats *pstats;
int cmng_fns;
struct bxe_fastpath *fp;
int i;
/* Make sure that we are synced with the current statistics */
bxe_stats_handle(sc, STATS_EVENT_STOP);
BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
elink_link_update(&sc->link_params, &sc->link_vars);
if (sc->link_vars.link_up) {
/* dropless flow control */
if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
pause_enabled = 0;
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
pause_enabled = 1;
}
REG_WR(sc,
(BAR_USTRORM_INTMEM +
USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
pause_enabled);
}
if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
pstats = BXE_SP(sc, port_stats);
/* reset old mac stats */
memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
}
if (sc->state == BXE_STATE_OPEN) {
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
/* Restart tx when the link comes back. */
FOR_EACH_ETH_QUEUE(sc, i) {
fp = &sc->fp[i];
taskqueue_enqueue(fp->tq, &fp->tx_task);
}
}
}
if (sc->link_vars.link_up && sc->link_vars.line_speed) {
cmng_fns = bxe_get_cmng_fns_mode(sc);
if (cmng_fns != CMNG_FNS_NONE) {
bxe_cmng_fns_init(sc, FALSE, cmng_fns);
storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
} else {
/* rate shaping and fairness are disabled */
BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
}
}
bxe_link_report_locked(sc);
if (IS_MF(sc)) {
; // XXX bxe_link_sync_notify(sc);
}
}
static void
bxe_attn_int_asserted(struct bxe_softc *sc,
uint32_t asserted)
{
int port = SC_PORT(sc);
uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
NIG_REG_MASK_INTERRUPT_PORT0;
uint32_t aeu_mask;
uint32_t nig_mask = 0;
uint32_t reg_addr;
uint32_t igu_acked;
uint32_t cnt;
if (sc->attn_state & asserted) {
BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
}
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
aeu_mask = REG_RD(sc, aeu_addr);
BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
aeu_mask, asserted);
aeu_mask &= ~(asserted & 0x3ff);
BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
REG_WR(sc, aeu_addr, aeu_mask);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
sc->attn_state |= asserted;
BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
if (asserted & ATTN_HARD_WIRED_MASK) {
if (asserted & ATTN_NIG_FOR_FUNC) {
bxe_acquire_phy_lock(sc);
/* save nig interrupt mask */
nig_mask = REG_RD(sc, nig_int_mask_addr);
/* If nig_mask is not set, no need to call the update function */
if (nig_mask) {
REG_WR(sc, nig_int_mask_addr, 0);
bxe_link_attn(sc);
}
/* handle unicore attn? */
}
if (asserted & ATTN_SW_TIMER_4_FUNC) {
BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
}
if (asserted & GPIO_2_FUNC) {
BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
}
if (asserted & GPIO_3_FUNC) {
BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
}
if (asserted & GPIO_4_FUNC) {
BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
}
if (port == 0) {
if (asserted & ATTN_GENERAL_ATTN_1) {
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_2) {
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_3) {
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
}
} else {
if (asserted & ATTN_GENERAL_ATTN_4) {
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_5) {
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_6) {
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
}
}
} /* hardwired */
if (sc->devinfo.int_block == INT_BLOCK_HC) {
reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
} else {
reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
}
BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
asserted,
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
REG_WR(sc, reg_addr, asserted);
/* now set back the mask */
if (asserted & ATTN_NIG_FOR_FUNC) {
/*
* Verify that IGU ack through BAR was written before restoring
* NIG mask. This loop should exit after 2-3 iterations max.
*/
if (sc->devinfo.int_block != INT_BLOCK_HC) {
cnt = 0;
do {
igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
} while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
(++cnt < MAX_IGU_ATTN_ACK_TO));
if (!igu_acked) {
BLOGE(sc, "Failed to verify IGU ack on time\n");
}
mb();
}
REG_WR(sc, nig_int_mask_addr, nig_mask);
bxe_release_phy_lock(sc);
}
}
static void
bxe_print_next_block(struct bxe_softc *sc,
int idx,
const char *blk)
{
BLOGI(sc, "%s%s", idx ? ", " : "", blk);
}
static int
bxe_check_blocks_with_parity0(struct bxe_softc *sc,
uint32_t sig,
int par_num,
uint8_t print)
{
uint32_t cur_bit = 0;
int i = 0;
for (i = 0; sig; i++) {
cur_bit = ((uint32_t)0x1 << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "BRB");
break;
case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "PARSER");
break;
case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "TSDM");
break;
case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "SEARCHER");
break;
case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "TCM");
break;
case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "TSEMI");
break;
case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "XPB");
break;
}
/* Clear the bit */
sig &= ~cur_bit;
}
}
return (par_num);
}
static int
bxe_check_blocks_with_parity1(struct bxe_softc *sc,
uint32_t sig,
int par_num,
uint8_t *global,
uint8_t print)
{
int i = 0;
uint32_t cur_bit = 0;
for (i = 0; sig; i++) {
cur_bit = ((uint32_t)0x1 << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "PBF");
break;
case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "QM");
break;
case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "TM");
break;
case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "XSDM");
break;
case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "XCM");
break;
case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "XSEMI");
break;
case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "DOORBELLQ");
break;
case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "NIG");
break;
case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
*global = TRUE;
break;
case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "DEBUG");
break;
case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "USDM");
break;
case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "UCM");
break;
case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "USEMI");
break;
case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "UPB");
break;
case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "CSDM");
break;
case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "CCM");
break;
}
/* Clear the bit */
sig &= ~cur_bit;
}
}
return (par_num);
}
static int
bxe_check_blocks_with_parity2(struct bxe_softc *sc,
uint32_t sig,
int par_num,
uint8_t print)
{
uint32_t cur_bit = 0;
int i = 0;
for (i = 0; sig; i++) {
cur_bit = ((uint32_t)0x1 << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "CSEMI");
break;
case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "PXP");
break;
case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
break;
case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "CFC");
break;
case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "CDU");
break;
case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "DMAE");
break;
case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "IGU");
break;
case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "MISC");
break;
}
/* Clear the bit */
sig &= ~cur_bit;
}
}
return (par_num);
}
static int
bxe_check_blocks_with_parity3(struct bxe_softc *sc,
uint32_t sig,
int par_num,
uint8_t *global,
uint8_t print)
{
uint32_t cur_bit = 0;
int i = 0;
for (i = 0; sig; i++) {
cur_bit = ((uint32_t)0x1 << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
if (print)
bxe_print_next_block(sc, par_num++, "MCP ROM");
*global = TRUE;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
if (print)
bxe_print_next_block(sc, par_num++,
"MCP UMP RX");
*global = TRUE;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
if (print)
bxe_print_next_block(sc, par_num++,
"MCP UMP TX");
*global = TRUE;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
if (print)
bxe_print_next_block(sc, par_num++,
"MCP SCPAD");
*global = TRUE;
break;
}
/* Clear the bit */
sig &= ~cur_bit;
}
}
return (par_num);
}
static int
bxe_check_blocks_with_parity4(struct bxe_softc *sc,
uint32_t sig,
int par_num,
uint8_t print)
{
uint32_t cur_bit = 0;
int i = 0;
for (i = 0; sig; i++) {
cur_bit = ((uint32_t)0x1 << i);
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "PGLUE_B");
break;
case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
if (print)
bxe_print_next_block(sc, par_num++, "ATC");
break;
}
/* Clear the bit */
sig &= ~cur_bit;
}
}
return (par_num);
}
static uint8_t
bxe_parity_attn(struct bxe_softc *sc,
uint8_t *global,
uint8_t print,
uint32_t *sig)
{
int par_num = 0;
if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
(sig[1] & HW_PRTY_ASSERT_SET_1) ||
(sig[2] & HW_PRTY_ASSERT_SET_2) ||
(sig[3] & HW_PRTY_ASSERT_SET_3) ||
(sig[4] & HW_PRTY_ASSERT_SET_4)) {
BLOGE(sc, "Parity error: HW block parity attention:\n"
"[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
(uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
(uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
(uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
(uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
(uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
if (print)
BLOGI(sc, "Parity errors detected in blocks: ");
par_num =
bxe_check_blocks_with_parity0(sc, sig[0] &
HW_PRTY_ASSERT_SET_0,
par_num, print);
par_num =
bxe_check_blocks_with_parity1(sc, sig[1] &
HW_PRTY_ASSERT_SET_1,
par_num, global, print);
par_num =
bxe_check_blocks_with_parity2(sc, sig[2] &
HW_PRTY_ASSERT_SET_2,
par_num, print);
par_num =
bxe_check_blocks_with_parity3(sc, sig[3] &
HW_PRTY_ASSERT_SET_3,
par_num, global, print);
par_num =
bxe_check_blocks_with_parity4(sc, sig[4] &
HW_PRTY_ASSERT_SET_4,
par_num, print);
if (print)
BLOGI(sc, "\n");
if( *global == TRUE ) {
BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
}
return (TRUE);
}
return (FALSE);
}
static uint8_t
bxe_chk_parity_attn(struct bxe_softc *sc,
uint8_t *global,
uint8_t print)
{
struct attn_route attn = { {0} };
int port = SC_PORT(sc);
if(sc->state != BXE_STATE_OPEN)
return FALSE;
attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
/*
* Since MCP attentions can't be disabled inside the block, we need to
* read AEU registers to see whether they're currently disabled
*/
attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
: MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
MISC_AEU_ENABLE_MCP_PRTY_BITS) |
~MISC_AEU_ENABLE_MCP_PRTY_BITS);
if (!CHIP_IS_E1x(sc))
attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
return (bxe_parity_attn(sc, global, print, attn.sig));
}
static void
bxe_attn_int_deasserted4(struct bxe_softc *sc,
uint32_t attn)
{
uint32_t val;
bool err_flg = false;
if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
err_flg = true;
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
}
if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
BLOGE(sc, "ATC hw attention 0x%08x\n", val);
err_flg = true;
if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
}
if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
(uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
err_flg = true;
}
if (err_flg) {
BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
}
}
static void
bxe_e1h_disable(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
bxe_tx_disable(sc);
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
}
static void
bxe_e1h_enable(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
// XXX bxe_tx_enable(sc);
}
/*
* called due to MCP event (on pmf):
* reread new bandwidth configuration
* configure FW
* notify others function about the change
*/
static void
bxe_config_mf_bw(struct bxe_softc *sc)
{
if (sc->link_vars.link_up) {
bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
// XXX bxe_link_sync_notify(sc);
}
storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
}
static void
bxe_set_mf_bw(struct bxe_softc *sc)
{
bxe_config_mf_bw(sc);
bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
static void
bxe_handle_eee_event(struct bxe_softc *sc)
{
BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}
#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
static void
bxe_drv_info_ether_stat(struct bxe_softc *sc)
{
struct eth_stats_info *ether_stat =
&sc->sp->drv_info_to_mcp.ether_stat;
strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* XXX (+ MAC_PAD) taken from other driver... verify this is right */
sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
ether_stat->mac_local + MAC_PAD,
MAC_PAD, ETH_ALEN);
ether_stat->mtu_size = sc->mtu;
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
}
// XXX ether_stat->feature_flags |= ???;
ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
ether_stat->txq_size = sc->tx_ring_size;
ether_stat->rxq_size = sc->rx_ring_size;
}
static void
bxe_handle_drv_info_req(struct bxe_softc *sc)
{
enum drv_info_opcode op_code;
uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
/* if drv_info version supported by MFW doesn't match - send NACK */
if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
return;
}
op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
DRV_INFO_CONTROL_OP_CODE_SHIFT);
memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
switch (op_code) {
case ETH_STATS_OPCODE:
bxe_drv_info_ether_stat(sc);
break;
case FCOE_STATS_OPCODE:
case ISCSI_STATS_OPCODE:
default:
/* if op code isn't supported - send NACK */
bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
return;
}
/*
* If we got drv_info attn from MFW then these fields are defined in
* shmem2 for sure
*/
SHMEM2_WR(sc, drv_info_host_addr_lo,
U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
SHMEM2_WR(sc, drv_info_host_addr_hi,
U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
}
static void
bxe_dcc_event(struct bxe_softc *sc,
uint32_t dcc_event)
{
BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
/*
* This is the only place besides the function initialization
* where the sc->flags can change so it is done without any
* locks
*/
if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
sc->flags |= BXE_MF_FUNC_DIS;
bxe_e1h_disable(sc);
} else {
BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
sc->flags &= ~BXE_MF_FUNC_DIS;
bxe_e1h_enable(sc);
}
dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
}
if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
bxe_config_mf_bw(sc);
dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
}
/* Report results to MCP */
if (dcc_event)
bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
else
bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
}
static void
bxe_pmf_update(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
uint32_t val;
sc->port.pmf = 1;
BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
/*
* We need the mb() to ensure the ordering between the writing to
* sc->port.pmf here and reading it from the bxe_periodic_task().
*/
mb();
/* queue a periodic task */
// XXX schedule task...
// XXX bxe_dcbx_pmf_update(sc);
/* enable nig attention */
val = (0xff0f | (1 << (SC_VN(sc) + 4)));
if (sc->devinfo.int_block == INT_BLOCK_HC) {
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
} else if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
}
bxe_stats_handle(sc, STATS_EVENT_PMF);
}
static int
bxe_mc_assert(struct bxe_softc *sc)
{
char last_idx;
int i, rc = 0;
uint32_t row0, row1, row2, row3;
/* XSTORM */
last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx)
BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
/* print the asserts */
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
break;
}
}
/* TSTORM */
last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
}
/* print the asserts */
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
break;
}
}
/* CSTORM */
last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
}
/* print the asserts */
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
break;
}
}
/* USTORM */
last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
}
/* print the asserts */
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
break;
}
}
return (rc);
}
static void
bxe_attn_int_deasserted3(struct bxe_softc *sc,
uint32_t attn)
{
int func = SC_FUNC(sc);
uint32_t val;
if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
if (attn & BXE_PMF_LINK_ASSERT(sc)) {
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
bxe_read_mf_cfg(sc);
sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
if (val & DRV_STATUS_DCC_EVENT_MASK)
bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
if (val & DRV_STATUS_SET_MF_BW)
bxe_set_mf_bw(sc);
if (val & DRV_STATUS_DRV_INFO_REQ)
bxe_handle_drv_info_req(sc);
if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
bxe_pmf_update(sc);
if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
bxe_handle_eee_event(sc);
if (sc->link_vars.periodic_flags &
ELINK_PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
bxe_acquire_phy_lock(sc);
sc->link_vars.periodic_flags &=
~ELINK_PERIODIC_FLAGS_LINK_EVENT;
bxe_release_phy_lock(sc);
if (IS_MF(sc))
; // XXX bxe_link_sync_notify(sc);
bxe_link_report(sc);
}
/*
* Always call it here: bxe_link_report() will
* prevent the link indication duplication.
*/
bxe_link_status_update(sc);
} else if (attn & BXE_MC_ASSERT_BITS) {
BLOGE(sc, "MC assert!\n");
bxe_mc_assert(sc);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
bxe_int_disable(sc);
BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
} else if (attn & BXE_MCP_ASSERT) {
BLOGE(sc, "MCP assert!\n");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
bxe_int_disable(sc); /*avoid repetive assert alert */
} else {
BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
}
}
if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
if (attn & BXE_GRC_TIMEOUT) {
val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
BLOGE(sc, "GRC time-out 0x%08x\n", val);
}
if (attn & BXE_GRC_RSV) {
val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
BLOGE(sc, "GRC reserved 0x%08x\n", val);
}
REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
}
}
static void
bxe_attn_int_deasserted2(struct bxe_softc *sc,
uint32_t attn)
{
int port = SC_PORT(sc);
int reg_offset;
uint32_t val0, mask0, val1, mask1;
uint32_t val;
bool err_flg = false;
if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
BLOGE(sc, "CFC hw attention 0x%08x\n", val);
/* CFC error attention */
if (val & 0x2) {
BLOGE(sc, "FATAL error from CFC\n");
err_flg = true;
}
}
if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
/* RQ_USDMDP_FIFO_OVERFLOW */
if (val & 0x18000) {
BLOGE(sc, "FATAL error from PXP\n");
err_flg = true;
}
if (!CHIP_IS_E1x(sc)) {
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
err_flg = true;
}
}
#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
if (attn & AEU_PXP2_HW_INT_BIT) {
/* CQ47854 workaround do not panic on
* PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
*/
if (!CHIP_IS_E1x(sc)) {
mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
/*
* If the only PXP2_EOP_ERROR_BIT is set in
* STS0 and STS1 - clear it
*
* probably we lose additional attentions between
* STS0 and STS_CLR0, in this case user will not
* be notified about them
*/
if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
!(val1 & mask1))
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
/* print the register, since no one can restore it */
BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
/*
* if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
* then notify
*/
if (val0 & PXP2_EOP_ERROR_BIT) {
BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
err_flg = true;
/*
* if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
* set then clear attention from PXP2 block without panic
*/
if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
((val1 & mask1) == 0))
attn &= ~AEU_PXP2_HW_INT_BIT;
}
}
}
if (attn & HW_INTERRUT_ASSERT_SET_2) {
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
val = REG_RD(sc, reg_offset);
val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
REG_WR(sc, reg_offset, val);
BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
(uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
err_flg = true;
bxe_panic(sc, ("HW block attention set2\n"));
}
if(err_flg) {
BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
}
}
static void
bxe_attn_int_deasserted1(struct bxe_softc *sc,
uint32_t attn)
{
int port = SC_PORT(sc);
int reg_offset;
uint32_t val;
bool err_flg = false;
if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
BLOGE(sc, "DB hw attention 0x%08x\n", val);
/* DORQ discard attention */
if (val & 0x2) {
BLOGE(sc, "FATAL error from DORQ\n");
err_flg = true;
}
}
if (attn & HW_INTERRUT_ASSERT_SET_1) {
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
val = REG_RD(sc, reg_offset);
val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
REG_WR(sc, reg_offset, val);
BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
(uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
err_flg = true;
bxe_panic(sc, ("HW block attention set1\n"));
}
if(err_flg) {
BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
}
}
static void
bxe_attn_int_deasserted0(struct bxe_softc *sc,
uint32_t attn)
{
int port = SC_PORT(sc);
int reg_offset;
uint32_t val;
reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
val = REG_RD(sc, reg_offset);
val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
REG_WR(sc, reg_offset, val);
BLOGW(sc, "SPIO5 hw attention\n");
/* Fan failure attention */
elink_hw_reset_phy(&sc->link_params);
bxe_fan_failure(sc);
}
if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
bxe_acquire_phy_lock(sc);
elink_handle_module_detect_int(&sc->link_params);
bxe_release_phy_lock(sc);
}
if (attn & HW_INTERRUT_ASSERT_SET_0) {
val = REG_RD(sc, reg_offset);
val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
REG_WR(sc, reg_offset, val);
BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
(attn & HW_INTERRUT_ASSERT_SET_0)));
}
}
static void
bxe_attn_int_deasserted(struct bxe_softc *sc,
uint32_t deasserted)
{
struct attn_route attn;
struct attn_route *group_mask;
int port = SC_PORT(sc);
int index;
uint32_t reg_addr;
uint32_t val;
uint32_t aeu_mask;
uint8_t global = FALSE;
/*
* Need to take HW lock because MCP or other port might also
* try to handle this event.
*/
bxe_acquire_alr(sc);
if (bxe_chk_parity_attn(sc, &global, TRUE)) {
/* XXX
* In case of parity errors don't handle attentions so that
* other function would "see" parity errors.
*/
// XXX schedule a recovery task...
/* disable HW interrupts */
bxe_int_disable(sc);
BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
bxe_release_alr(sc);
return;
}
attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
if (!CHIP_IS_E1x(sc)) {
attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
} else {
attn.sig[4] = 0;
}
BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
if (deasserted & (1 << index)) {
group_mask = &sc->attn_group[index];
BLOGD(sc, DBG_INTR,
"group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
group_mask->sig[0], group_mask->sig[1],
group_mask->sig[2], group_mask->sig[3],
group_mask->sig[4]);
bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
}
}
bxe_release_alr(sc);
if (sc->devinfo.int_block == INT_BLOCK_HC) {
reg_addr = (HC_REG_COMMAND_REG + port*32 +
COMMAND_REG_ATTN_BITS_CLR);
} else {
reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
}
val = ~deasserted;
BLOGD(sc, DBG_INTR,
"about to mask 0x%08x at %s addr 0x%08x\n", val,
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
REG_WR(sc, reg_addr, val);
if (~sc->attn_state & deasserted) {
BLOGE(sc, "IGU error\n");
}
reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
aeu_mask = REG_RD(sc, reg_addr);
BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
aeu_mask, deasserted);
aeu_mask |= (deasserted & 0x3ff);
BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
REG_WR(sc, reg_addr, aeu_mask);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
sc->attn_state &= ~deasserted;
BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
}
static void
bxe_attn_int(struct bxe_softc *sc)
{
/* read local copy of bits */
uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
uint32_t attn_state = sc->attn_state;
/* look for changed bits */
uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
BLOGD(sc, DBG_INTR,
"attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
attn_bits, attn_ack, asserted, deasserted);
if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
BLOGE(sc, "BAD attention state\n");
}
/* handle bits that were raised */
if (asserted) {
bxe_attn_int_asserted(sc, asserted);
}
if (deasserted) {
bxe_attn_int_deasserted(sc, deasserted);
}
}
static uint16_t
bxe_update_dsb_idx(struct bxe_softc *sc)
{
struct host_sp_status_block *def_sb = sc->def_sb;
uint16_t rc = 0;
mb(); /* status block is written to by the chip */
if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
rc |= BXE_DEF_SB_ATT_IDX;
}
if (sc->def_idx != def_sb->sp_sb.running_index) {
sc->def_idx = def_sb->sp_sb.running_index;
rc |= BXE_DEF_SB_IDX;
}
mb();
return (rc);
}
static inline struct ecore_queue_sp_obj *
bxe_cid_to_q_obj(struct bxe_softc *sc,
uint32_t cid)
{
BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
}
static void
bxe_handle_mcast_eqe(struct bxe_softc *sc)
{
struct ecore_mcast_ramrod_params rparam;
int rc;
memset(&rparam, 0, sizeof(rparam));
rparam.mcast_obj = &sc->mcast_obj;
BXE_MCAST_LOCK(sc);
/* clear pending state for the last command */
sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
/* if there are pending mcast commands - send them */
if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
if (rc < 0) {
BLOGD(sc, DBG_SP,
"ERROR: Failed to send pending mcast commands (%d)\n", rc);
}
}
BXE_MCAST_UNLOCK(sc);
}
static void
bxe_handle_classification_eqe(struct bxe_softc *sc,
union event_ring_elem *elem)
{
unsigned long ramrod_flags = 0;
int rc = 0;
uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
struct ecore_vlan_mac_obj *vlan_mac_obj;
/* always push next commands out, don't wait here */
bit_set(&ramrod_flags, RAMROD_CONT);
switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
case ECORE_FILTER_MAC_PENDING:
BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
break;
case ECORE_FILTER_MCAST_PENDING:
BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
/*
* This is only relevant for 57710 where multicast MACs are
* configured as unicast MACs using the same ramrod.
*/
bxe_handle_mcast_eqe(sc);
return;
default:
BLOGE(sc, "Unsupported classification command: %d\n",
elem->message.data.eth_event.echo);
return;
}
rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
if (rc < 0) {
BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
} else if (rc > 0) {
BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
}
}
static void
bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
union event_ring_elem *elem)
{
bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
/* send rx_mode command again if was requested */
if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
&sc->sp_state)) {
bxe_set_storm_rx_mode(sc);
}
}
static void
bxe_update_eq_prod(struct bxe_softc *sc,
uint16_t prod)
{
storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
wmb(); /* keep prod updates ordered */
}
static void
bxe_eq_int(struct bxe_softc *sc)
{
uint16_t hw_cons, sw_cons, sw_prod;
union event_ring_elem *elem;
uint8_t echo;
uint32_t cid;
uint8_t opcode;
int spqe_cnt = 0;
struct ecore_queue_sp_obj *q_obj;
struct ecore_func_sp_obj *f_obj = &sc->func_obj;
struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
hw_cons = le16toh(*sc->eq_cons_sb);
/*
* The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
* when we get to the next-page we need to adjust so the loop
* condition below will be met. The next element is the size of a
* regular element and hence incrementing by 1
*/
if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
hw_cons++;
}
/*
* This function may never run in parallel with itself for a
* specific sc and no need for a read memory barrier here.
*/
sw_cons = sc->eq_cons;
sw_prod = sc->eq_prod;
BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
for (;
sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
elem = &sc->eq[EQ_DESC(sw_cons)];
/* elem CID originates from FW, actually LE */
cid = SW_CID(elem->message.data.cfc_del_event.cid);
opcode = elem->message.opcode;
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_STAT_QUERY:
BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
sc->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
case EVENT_RING_OPCODE_CFC_DEL:
/* handle according to cid range */
/* we may want to verify here that the sc state is HALTING */
BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
q_obj = bxe_cid_to_q_obj(sc, cid);
if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
break;
}
goto next_spqe;
case EVENT_RING_OPCODE_STOP_TRAFFIC:
BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
break;
}
// XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
break;
}
// XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
echo = elem->message.data.function_update_event.echo;
if (echo == SWITCH_UPDATE) {
BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
if (f_obj->complete_cmd(sc, f_obj,
ECORE_F_CMD_SWITCH_UPDATE)) {
break;
}
}
else {
BLOGD(sc, DBG_SP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
}
goto next_spqe;
case EVENT_RING_OPCODE_FORWARD_SETUP:
q_obj = &bxe_fwd_sp_obj(sc, q_obj);
if (q_obj->complete_cmd(sc, q_obj,
ECORE_Q_CMD_SETUP_TX_ONLY)) {
break;
}
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_START:
BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
break;
}
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_STOP:
BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
break;
}
goto next_spqe;
}
switch (opcode | sc->state) {
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
rss_raw->clear_pending(rss_raw);
break;
case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
bxe_handle_classification_eqe(sc, elem);
break;
case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
BLOGD(sc, DBG_SP, "got mcast ramrod\n");
bxe_handle_mcast_eqe(sc);
break;
case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
bxe_handle_rx_mode_eqe(sc, elem);
break;
default:
/* unknown event log error and continue */
BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
elem->message.opcode, sc->state);
}
next_spqe:
spqe_cnt++;
} /* for */
mb();
atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
sc->eq_cons = sw_cons;
sc->eq_prod = sw_prod;
/* make sure that above mem writes were issued towards the memory */
wmb();
/* update producer */
bxe_update_eq_prod(sc, sc->eq_prod);
}
static void
bxe_handle_sp_tq(void *context,
int pending)
{
struct bxe_softc *sc = (struct bxe_softc *)context;
uint16_t status;
BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
/* what work needs to be performed? */
status = bxe_update_dsb_idx(sc);
BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
/* HW attentions */
if (status & BXE_DEF_SB_ATT_IDX) {
BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
bxe_attn_int(sc);
status &= ~BXE_DEF_SB_ATT_IDX;
}
/* SP events: STAT_QUERY and others */
if (status & BXE_DEF_SB_IDX) {
/* handle EQ completions */
BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
bxe_eq_int(sc);
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
le16toh(sc->def_idx), IGU_INT_NOP, 1);
status &= ~BXE_DEF_SB_IDX;
}
/* if status is non zero then something went wrong */
if (__predict_false(status)) {
BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
}
/* ack status block only if something was actually handled */
bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
/*
* Must be called after the EQ processing (since eq leads to sriov
* ramrod completion flows).
* This flow may have been scheduled by the arrival of a ramrod
* completion, or by the sriov code rescheduling itself.
*/
// XXX bxe_iov_sp_task(sc);
}
static void
bxe_handle_fp_tq(void *context,
int pending)
{
struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
struct bxe_softc *sc = fp->sc;
/* uint8_t more_tx = FALSE; */
uint8_t more_rx = FALSE;
BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
/* XXX
* IFF_DRV_RUNNING state can't be checked here since we process
* slowpath events on a client queue during setup. Instead
* we need to add a "process/continue" flag here that the driver
* can use to tell the task here not to do anything.
*/
#if 0
if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
return;
}
#endif
/* update the fastpath index */
bxe_update_fp_sb_idx(fp);
/* XXX add loop here if ever support multiple tx CoS */
/* fp->txdata[cos] */
if (bxe_has_tx_work(fp)) {
BXE_FP_TX_LOCK(fp);
/* more_tx = */ bxe_txeof(sc, fp);
BXE_FP_TX_UNLOCK(fp);
}
if (bxe_has_rx_work(fp)) {
more_rx = bxe_rxeof(sc, fp);
}
if (more_rx /*|| more_tx*/) {
/* still more work to do */
taskqueue_enqueue(fp->tq, &fp->tq_task);
return;
}
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
}
static void
bxe_task_fp(struct bxe_fastpath *fp)
{
struct bxe_softc *sc = fp->sc;
/* uint8_t more_tx = FALSE; */
uint8_t more_rx = FALSE;
BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
/* update the fastpath index */
bxe_update_fp_sb_idx(fp);
/* XXX add loop here if ever support multiple tx CoS */
/* fp->txdata[cos] */
if (bxe_has_tx_work(fp)) {
BXE_FP_TX_LOCK(fp);
/* more_tx = */ bxe_txeof(sc, fp);
BXE_FP_TX_UNLOCK(fp);
}
if (bxe_has_rx_work(fp)) {
more_rx = bxe_rxeof(sc, fp);
}
if (more_rx /*|| more_tx*/) {
/* still more work to do, bail out if this ISR and process later */
taskqueue_enqueue(fp->tq, &fp->tq_task);
return;
}
/*
* Here we write the fastpath index taken before doing any tx or rx work.
* It is very well possible other hw events occurred up to this point and
* they were actually processed accordingly above. Since we're going to
* write an older fastpath index, an interrupt is coming which we might
* not do any work in.
*/
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
}
/*
* Legacy interrupt entry point.
*
* Verifies that the controller generated the interrupt and
* then calls a separate routine to handle the various
* interrupt causes: link, RX, and TX.
*/
static void
bxe_intr_legacy(void *xsc)
{
struct bxe_softc *sc = (struct bxe_softc *)xsc;
struct bxe_fastpath *fp;
uint16_t status, mask;
int i;
BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
/*
* 0 for ustorm, 1 for cstorm
* the bits returned from ack_int() are 0-15
* bit 0 = attention status block
* bit 1 = fast path status block
* a mask of 0x2 or more = tx/rx event
* a mask of 1 = slow path event
*/
status = bxe_ack_int(sc);
/* the interrupt is not for us */
if (__predict_false(status == 0)) {
BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
return;
}
BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
FOR_EACH_ETH_QUEUE(sc, i) {
fp = &sc->fp[i];
mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
if (status & mask) {
/* acknowledge and disable further fastpath interrupts */
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
bxe_task_fp(fp);
status &= ~mask;
}
}
if (__predict_false(status & 0x1)) {
/* acknowledge and disable further slowpath interrupts */
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
/* schedule slowpath handler */
taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
status &= ~0x1;
}
if (__predict_false(status)) {
BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
}
}
/* slowpath interrupt entry point */
static void
bxe_intr_sp(void *xsc)
{
struct bxe_softc *sc = (struct bxe_softc *)xsc;
BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
/* acknowledge and disable further slowpath interrupts */
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
/* schedule slowpath handler */
taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
}
/* fastpath interrupt entry point */
static void
bxe_intr_fp(void *xfp)
{
struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
struct bxe_softc *sc = fp->sc;
BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
BLOGD(sc, DBG_INTR,
"(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
/* acknowledge and disable further fastpath interrupts */
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
bxe_task_fp(fp);
}
/* Release all interrupts allocated by the driver. */
static void
bxe_interrupt_free(struct bxe_softc *sc)
{
int i;
switch (sc->interrupt_mode) {
case INTR_MODE_INTX:
BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
if (sc->intr[0].resource != NULL) {
bus_release_resource(sc->dev,
SYS_RES_IRQ,
sc->intr[0].rid,
sc->intr[0].resource);
}
break;
case INTR_MODE_MSI:
for (i = 0; i < sc->intr_count; i++) {
BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
if (sc->intr[i].resource && sc->intr[i].rid) {
bus_release_resource(sc->dev,
SYS_RES_IRQ,
sc->intr[i].rid,
sc->intr[i].resource);
}
}
pci_release_msi(sc->dev);
break;
case INTR_MODE_MSIX:
for (i = 0; i < sc->intr_count; i++) {
BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
if (sc->intr[i].resource && sc->intr[i].rid) {
bus_release_resource(sc->dev,
SYS_RES_IRQ,
sc->intr[i].rid,
sc->intr[i].resource);
}
}
pci_release_msi(sc->dev);
break;
default:
/* nothing to do as initial allocation failed */
break;
}
}
/*
* This function determines and allocates the appropriate
* interrupt based on system capabilites and user request.
*
* The user may force a particular interrupt mode, specify
* the number of receive queues, specify the method for
* distribuitng received frames to receive queues, or use
* the default settings which will automatically select the
* best supported combination. In addition, the OS may or
* may not support certain combinations of these settings.
* This routine attempts to reconcile the settings requested
* by the user with the capabilites available from the system
* to select the optimal combination of features.
*
* Returns:
* 0 = Success, !0 = Failure.
*/
static int
bxe_interrupt_alloc(struct bxe_softc *sc)
{
int msix_count = 0;
int msi_count = 0;
int num_requested = 0;
int num_allocated = 0;
int rid, i, j;
int rc;
/* get the number of available MSI/MSI-X interrupts from the OS */
if (sc->interrupt_mode > 0) {
if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
msix_count = pci_msix_count(sc->dev);
}
if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
msi_count = pci_msi_count(sc->dev);
}
BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
msi_count, msix_count);
}
do { /* try allocating MSI-X interrupt resources (at least 2) */
if (sc->interrupt_mode != INTR_MODE_MSIX) {
break;
}
if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
(msix_count < 2)) {
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
break;
}
/* ask for the necessary number of MSI-X vectors */
num_requested = min((sc->num_queues + 1), msix_count);
BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
num_allocated = num_requested;
if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
break;
}
if (num_allocated < 2) { /* possible? */
BLOGE(sc, "MSI-X allocation less than 2!\n");
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
pci_release_msi(sc->dev);
break;
}
BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
num_requested, num_allocated);
/* best effort so use the number of vectors allocated to us */
sc->intr_count = num_allocated;
sc->num_queues = num_allocated - 1;
rid = 1; /* initial resource identifier */
/* allocate the MSI-X vectors */
for (i = 0; i < num_allocated; i++) {
sc->intr[i].rid = (rid + i);
if ((sc->intr[i].resource =
bus_alloc_resource_any(sc->dev,
SYS_RES_IRQ,
&sc->intr[i].rid,
RF_ACTIVE)) == NULL) {
BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
i, (rid + i));
for (j = (i - 1); j >= 0; j--) {
bus_release_resource(sc->dev,
SYS_RES_IRQ,
sc->intr[j].rid,
sc->intr[j].resource);
}
sc->intr_count = 0;
sc->num_queues = 0;
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
pci_release_msi(sc->dev);
break;
}
BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
}
} while (0);
do { /* try allocating MSI vector resources (at least 2) */
if (sc->interrupt_mode != INTR_MODE_MSI) {
break;
}
if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
(msi_count < 1)) {
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
break;
}
/* ask for a single MSI vector */
num_requested = 1;
BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
num_allocated = num_requested;
if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
break;
}
if (num_allocated != 1) { /* possible? */
BLOGE(sc, "MSI allocation is not 1!\n");
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
pci_release_msi(sc->dev);
break;
}
BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
num_requested, num_allocated);
/* best effort so use the number of vectors allocated to us */
sc->intr_count = num_allocated;
sc->num_queues = num_allocated;
rid = 1; /* initial resource identifier */
sc->intr[0].rid = rid;
if ((sc->intr[0].resource =
bus_alloc_resource_any(sc->dev,
SYS_RES_IRQ,
&sc->intr[0].rid,
RF_ACTIVE)) == NULL) {
BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
sc->intr_count = 0;
sc->num_queues = 0;
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
pci_release_msi(sc->dev);
break;
}
BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
} while (0);
do { /* try allocating INTx vector resources */
if (sc->interrupt_mode != INTR_MODE_INTX) {
break;
}
BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
/* only one vector for INTx */
sc->intr_count = 1;
sc->num_queues = 1;
rid = 0; /* initial resource identifier */
sc->intr[0].rid = rid;
if ((sc->intr[0].resource =
bus_alloc_resource_any(sc->dev,
SYS_RES_IRQ,
&sc->intr[0].rid,
(RF_ACTIVE | RF_SHAREABLE))) == NULL) {
BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
sc->intr_count = 0;
sc->num_queues = 0;
sc->interrupt_mode = -1; /* Failed! */
break;
}
BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
} while (0);
if (sc->interrupt_mode == -1) {
BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
rc = 1;
} else {
BLOGD(sc, DBG_LOAD,
"Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
sc->interrupt_mode, sc->num_queues);
rc = 0;
}
return (rc);
}
static void
bxe_interrupt_detach(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i;
/* release interrupt resources */
for (i = 0; i < sc->intr_count; i++) {
if (sc->intr[i].resource && sc->intr[i].tag) {
BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
}
}
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
if (fp->tq) {
taskqueue_drain(fp->tq, &fp->tq_task);
taskqueue_drain(fp->tq, &fp->tx_task);
while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
NULL))
taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
}
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
if (fp->tq != NULL) {
taskqueue_free(fp->tq);
fp->tq = NULL;
}
}
}
if (sc->sp_tq) {
taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
taskqueue_free(sc->sp_tq);
sc->sp_tq = NULL;
}
}
/*
* Enables interrupts and attach to the ISR.
*
* When using multiple MSI/MSI-X vectors the first vector
* is used for slowpath operations while all remaining
* vectors are used for fastpath operations. If only a
* single MSI/MSI-X vector is used (SINGLE_ISR) then the
* ISR must look for both slowpath and fastpath completions.
*/
static int
bxe_interrupt_attach(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int rc = 0;
int i;
snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
"bxe%d_sp_tq", sc->unit);
TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&sc->sp_tq);
taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
"%s", sc->sp_tq_name);
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
snprintf(fp->tq_name, sizeof(fp->tq_name),
"bxe%d_fp%d_tq", sc->unit, i);
NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&fp->tq);
TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
bxe_tx_mq_start_deferred, fp);
taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
"%s", fp->tq_name);
}
/* setup interrupt handlers */
if (sc->interrupt_mode == INTR_MODE_MSIX) {
BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
/*
* Setup the interrupt handler. Note that we pass the driver instance
* to the interrupt handler for the slowpath.
*/
if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, bxe_intr_sp, sc,
&sc->intr[0].tag)) != 0) {
BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
goto bxe_interrupt_attach_exit;
}
bus_describe_intr(sc->dev, sc->intr[0].resource,
sc->intr[0].tag, "sp");
/* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
/* initialize the fastpath vectors (note the first was used for sp) */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
/*
* Setup the interrupt handler. Note that we pass the
* fastpath context to the interrupt handler in this
* case.
*/
if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, bxe_intr_fp, fp,
&sc->intr[i + 1].tag)) != 0) {
BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
(i + 1), rc);
goto bxe_interrupt_attach_exit;
}
bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
sc->intr[i + 1].tag, "fp%02d", i);
/* bind the fastpath instance to a cpu */
if (sc->num_queues > 1) {
bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
}
fp->state = BXE_FP_STATE_IRQ;
}
} else if (sc->interrupt_mode == INTR_MODE_MSI) {
BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
/*
* Setup the interrupt handler. Note that we pass the
* driver instance to the interrupt handler which
* will handle both the slowpath and fastpath.
*/
if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, bxe_intr_legacy, sc,
&sc->intr[0].tag)) != 0) {
BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
goto bxe_interrupt_attach_exit;
}
} else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
/*
* Setup the interrupt handler. Note that we pass the
* driver instance to the interrupt handler which
* will handle both the slowpath and fastpath.
*/
if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, bxe_intr_legacy, sc,
&sc->intr[0].tag)) != 0) {
BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
goto bxe_interrupt_attach_exit;
}
}
bxe_interrupt_attach_exit:
return (rc);
}
static int bxe_init_hw_common_chip(struct bxe_softc *sc);
static int bxe_init_hw_common(struct bxe_softc *sc);
static int bxe_init_hw_port(struct bxe_softc *sc);
static int bxe_init_hw_func(struct bxe_softc *sc);
static void bxe_reset_common(struct bxe_softc *sc);
static void bxe_reset_port(struct bxe_softc *sc);
static void bxe_reset_func(struct bxe_softc *sc);
static int bxe_gunzip_init(struct bxe_softc *sc);
static void bxe_gunzip_end(struct bxe_softc *sc);
static int bxe_init_firmware(struct bxe_softc *sc);
static void bxe_release_firmware(struct bxe_softc *sc);
static struct
ecore_func_sp_drv_ops bxe_func_sp_drv = {
.init_hw_cmn_chip = bxe_init_hw_common_chip,
.init_hw_cmn = bxe_init_hw_common,
.init_hw_port = bxe_init_hw_port,
.init_hw_func = bxe_init_hw_func,
.reset_hw_cmn = bxe_reset_common,
.reset_hw_port = bxe_reset_port,
.reset_hw_func = bxe_reset_func,
.gunzip_init = bxe_gunzip_init,
.gunzip_end = bxe_gunzip_end,
.init_fw = bxe_init_firmware,
.release_fw = bxe_release_firmware,
};
static void
bxe_init_func_obj(struct bxe_softc *sc)
{
sc->dmae_ready = 0;
ecore_init_func_obj(sc,
&sc->func_obj,
BXE_SP(sc, func_rdata),
BXE_SP_MAPPING(sc, func_rdata),
BXE_SP(sc, func_afex_rdata),
BXE_SP_MAPPING(sc, func_afex_rdata),
&bxe_func_sp_drv);
}
static int
bxe_init_hw(struct bxe_softc *sc,
uint32_t load_code)
{
struct ecore_func_state_params func_params = { NULL };
int rc;
/* prepare the parameters for function state transitions */
bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_HW_INIT;
func_params.params.hw_init.load_phase = load_code;
/*
* Via a plethora of function pointers, we will eventually reach
* bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
*/
rc = ecore_func_state_change(sc, &func_params);
return (rc);
}
static void
bxe_fill(struct bxe_softc *sc,
uint32_t addr,
int fill,
uint32_t len)
{
uint32_t i;
if (!(len % 4) && !(addr % 4)) {
for (i = 0; i < len; i += 4) {
REG_WR(sc, (addr + i), fill);
}
} else {
for (i = 0; i < len; i++) {
REG_WR8(sc, (addr + i), fill);
}
}
}
/* writes FP SP data to FW - data_size in dwords */
static void
bxe_wr_fp_sb_data(struct bxe_softc *sc,
int fw_sb_id,
uint32_t *sb_data_p,
uint32_t data_size)
{
int index;
for (index = 0; index < data_size; index++) {
REG_WR(sc,
(BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
(sizeof(uint32_t) * index)),
*(sb_data_p + index));
}
}
static void
bxe_zero_fp_sb(struct bxe_softc *sc,
int fw_sb_id)
{
struct hc_status_block_data_e2 sb_data_e2;
struct hc_status_block_data_e1x sb_data_e1x;
uint32_t *sb_data_p;
uint32_t data_size = 0;
if (!CHIP_IS_E1x(sc)) {
memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
sb_data_e2.common.state = SB_DISABLED;
sb_data_e2.common.p_func.vf_valid = FALSE;
sb_data_p = (uint32_t *)&sb_data_e2;
data_size = (sizeof(struct hc_status_block_data_e2) /
sizeof(uint32_t));
} else {
memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
sb_data_e1x.common.state = SB_DISABLED;
sb_data_e1x.common.p_func.vf_valid = FALSE;
sb_data_p = (uint32_t *)&sb_data_e1x;
data_size = (sizeof(struct hc_status_block_data_e1x) /
sizeof(uint32_t));
}
bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
0, CSTORM_STATUS_BLOCK_SIZE);
bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
0, CSTORM_SYNC_BLOCK_SIZE);
}
static void
bxe_wr_sp_sb_data(struct bxe_softc *sc,
struct hc_sp_status_block_data *sp_sb_data)
{
int i;
for (i = 0;
i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
i++) {
REG_WR(sc,
(BAR_CSTRORM_INTMEM +
CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
(i * sizeof(uint32_t))),
*((uint32_t *)sp_sb_data + i));
}
}
static void
bxe_zero_sp_sb(struct bxe_softc *sc)
{
struct hc_sp_status_block_data sp_sb_data;
memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
sp_sb_data.state = SB_DISABLED;
sp_sb_data.p_func.vf_valid = FALSE;
bxe_wr_sp_sb_data(sc, &sp_sb_data);
bxe_fill(sc,
(BAR_CSTRORM_INTMEM +
CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
0, CSTORM_SP_STATUS_BLOCK_SIZE);
bxe_fill(sc,
(BAR_CSTRORM_INTMEM +
CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
0, CSTORM_SP_SYNC_BLOCK_SIZE);
}
static void
bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
int igu_sb_id,
int igu_seg_id)
{
hc_sm->igu_sb_id = igu_sb_id;
hc_sm->igu_seg_id = igu_seg_id;
hc_sm->timer_value = 0xFF;
hc_sm->time_to_expire = 0xFFFFFFFF;
}
static void
bxe_map_sb_state_machines(struct hc_index_data *index_data)
{
/* zero out state machine indices */
/* rx indices */
index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
/* tx indices */
index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
/* map indices */
/* rx indices */
index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
(SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
/* tx indices */
index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
}
static void
bxe_init_sb(struct bxe_softc *sc,
bus_addr_t busaddr,
int vfid,
uint8_t vf_valid,
int fw_sb_id,
int igu_sb_id)
{
struct hc_status_block_data_e2 sb_data_e2;
struct hc_status_block_data_e1x sb_data_e1x;
struct hc_status_block_sm *hc_sm_p;
uint32_t *sb_data_p;
int igu_seg_id;
int data_size;
if (CHIP_INT_MODE_IS_BC(sc)) {
igu_seg_id = HC_SEG_ACCESS_NORM;
} else {
igu_seg_id = IGU_SEG_ACCESS_NORM;
}
bxe_zero_fp_sb(sc, fw_sb_id);
if (!CHIP_IS_E1x(sc)) {
memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
sb_data_e2.common.state = SB_ENABLED;
sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
sb_data_e2.common.p_func.vf_id = vfid;
sb_data_e2.common.p_func.vf_valid = vf_valid;
sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
sb_data_e2.common.same_igu_sb_1b = TRUE;
sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
hc_sm_p = sb_data_e2.common.state_machine;
sb_data_p = (uint32_t *)&sb_data_e2;
data_size = (sizeof(struct hc_status_block_data_e2) /
sizeof(uint32_t));
bxe_map_sb_state_machines(sb_data_e2.index_data);
} else {
memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
sb_data_e1x.common.state = SB_ENABLED;
sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
sb_data_e1x.common.p_func.vf_id = 0xff;
sb_data_e1x.common.p_func.vf_valid = FALSE;
sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
sb_data_e1x.common.same_igu_sb_1b = TRUE;
sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
hc_sm_p = sb_data_e1x.common.state_machine;
sb_data_p = (uint32_t *)&sb_data_e1x;
data_size = (sizeof(struct hc_status_block_data_e1x) /
sizeof(uint32_t));
bxe_map_sb_state_machines(sb_data_e1x.index_data);
}
bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
/* write indices to HW - PCI guarantees endianity of regpairs */
bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
}
static inline uint8_t
bxe_fp_qzone_id(struct bxe_fastpath *fp)
{
if (CHIP_IS_E1x(fp->sc)) {
return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
} else {
return (fp->cl_id);
}
}
static inline uint32_t
bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
struct bxe_fastpath *fp)
{
uint32_t offset = BAR_USTRORM_INTMEM;
if (!CHIP_IS_E1x(sc)) {
offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
} else {
offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
}
return (offset);
}
static void
bxe_init_eth_fp(struct bxe_softc *sc,
int idx)
{
struct bxe_fastpath *fp = &sc->fp[idx];
uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
unsigned long q_type = 0;
int cos;
fp->sc = sc;
fp->index = idx;
fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
fp->cl_id = (CHIP_IS_E1x(sc)) ?
(SC_L_ID(sc) + idx) :
/* want client ID same as IGU SB ID for non-E1 */
fp->igu_sb_id;
fp->cl_qzone_id = bxe_fp_qzone_id(fp);
/* setup sb indices */
if (!CHIP_IS_E1x(sc)) {
fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
} else {
fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
}
/* init shortcut */
fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
/*
* XXX If multiple CoS is ever supported then each fastpath structure
* will need to maintain tx producer/consumer/dma/etc values *per* CoS.
*/
for (cos = 0; cos < sc->max_cos; cos++) {
cids[cos] = idx;
}
fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
/* nothing more for a VF to do */
if (IS_VF(sc)) {
return;
}
bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
fp->fw_sb_id, fp->igu_sb_id);
bxe_update_fp_sb_idx(fp);
/* Configure Queue State object */
bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
ecore_init_queue_obj(sc,
&sc->sp_objs[idx].q_obj,
fp->cl_id,
cids,
sc->max_cos,
SC_FUNC(sc),
BXE_SP(sc, q_rdata),
BXE_SP_MAPPING(sc, q_rdata),
q_type);
/* configure classification DBs */
ecore_init_mac_obj(sc,
&sc->sp_objs[idx].mac_obj,
fp->cl_id,
idx,
SC_FUNC(sc),
BXE_SP(sc, mac_rdata),
BXE_SP_MAPPING(sc, mac_rdata),
ECORE_FILTER_MAC_PENDING,
&sc->sp_state,
ECORE_OBJ_TYPE_RX_TX,
&sc->macs_pool);
BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
}
static inline void
bxe_update_rx_prod(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint16_t rx_bd_prod,
uint16_t rx_cq_prod,
uint16_t rx_sge_prod)
{
struct ustorm_eth_rx_producers rx_prods = { 0 };
uint32_t i;
/* update producers */
rx_prods.bd_prod = rx_bd_prod;
rx_prods.cqe_prod = rx_cq_prod;
rx_prods.sge_prod = rx_sge_prod;
/*
* Make sure that the BD and SGE data is updated before updating the
* producers since FW might read the BD/SGE right after the producer
* is updated.
* This is only applicable for weak-ordered memory model archs such
* as IA-64. The following barrier is also mandatory since FW will
* assumes BDs must have buffers.
*/
wmb();
for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
REG_WR(sc,
(fp->ustorm_rx_prods_offset + (i * 4)),
((uint32_t *)&rx_prods)[i]);
}
wmb(); /* keep prod updates ordered */
BLOGD(sc, DBG_RX,
"RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
}
static void
bxe_init_rx_rings(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
fp->rx_bd_cons = 0;
/*
* Activate the BD ring...
* Warning, this will generate an interrupt (to the TSTORM)
* so this can only be done after the chip is initialized
*/
bxe_update_rx_prod(sc, fp,
fp->rx_bd_prod,
fp->rx_cq_prod,
fp->rx_sge_prod);
if (i != 0) {
continue;
}
if (CHIP_IS_E1(sc)) {
REG_WR(sc,
(BAR_USTRORM_INTMEM +
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
U64_LO(fp->rcq_dma.paddr));
REG_WR(sc,
(BAR_USTRORM_INTMEM +
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
U64_HI(fp->rcq_dma.paddr));
}
}
}
static void
bxe_init_tx_ring_one(struct bxe_fastpath *fp)
{
SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
fp->tx_db.data.zero_fill1 = 0;
fp->tx_db.data.prod = 0;
fp->tx_pkt_prod = 0;
fp->tx_pkt_cons = 0;
fp->tx_bd_prod = 0;
fp->tx_bd_cons = 0;
fp->eth_q_stats.tx_pkts = 0;
}
static inline void
bxe_init_tx_rings(struct bxe_softc *sc)
{
int i;
for (i = 0; i < sc->num_queues; i++) {
bxe_init_tx_ring_one(&sc->fp[i]);
}
}
static void
bxe_init_def_sb(struct bxe_softc *sc)
{
struct host_sp_status_block *def_sb = sc->def_sb;
bus_addr_t mapping = sc->def_sb_dma.paddr;
int igu_sp_sb_index;
int igu_seg_id;
int port = SC_PORT(sc);
int func = SC_FUNC(sc);
int reg_offset, reg_offset_en5;
uint64_t section;
int index, sindex;
struct hc_sp_status_block_data sp_sb_data;
memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
if (CHIP_INT_MODE_IS_BC(sc)) {
igu_sp_sb_index = DEF_SB_IGU_ID;
igu_seg_id = HC_SEG_ACCESS_DEF;
} else {
igu_sp_sb_index = sc->igu_dsb_id;
igu_seg_id = IGU_SEG_ACCESS_DEF;
}
/* attentions */
section = ((uint64_t)mapping +
offsetof(struct host_sp_status_block, atten_status_block));
def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
sc->attn_state = 0;
reg_offset = (port) ?
MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
reg_offset_en5 = (port) ?
MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
/* take care of sig[0]..sig[4] */
for (sindex = 0; sindex < 4; sindex++) {
sc->attn_group[index].sig[sindex] =
REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
}
if (!CHIP_IS_E1x(sc)) {
/*
* enable5 is separate from the rest of the registers,
* and the address skip is 4 and not 16 between the
* different groups
*/
sc->attn_group[index].sig[4] =
REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
} else {
sc->attn_group[index].sig[4] = 0;
}
}
if (sc->devinfo.int_block == INT_BLOCK_HC) {
reg_offset = (port) ?
HC_REG_ATTN_MSG1_ADDR_L :
HC_REG_ATTN_MSG0_ADDR_L;
REG_WR(sc, reg_offset, U64_LO(section));
REG_WR(sc, (reg_offset + 4), U64_HI(section));
} else if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
}
section = ((uint64_t)mapping +
offsetof(struct host_sp_status_block, sp_sb));
bxe_zero_sp_sb(sc);
/* PCI guarantees endianity of regpair */
sp_sb_data.state = SB_ENABLED;
sp_sb_data.host_sb_addr.lo = U64_LO(section);
sp_sb_data.host_sb_addr.hi = U64_HI(section);
sp_sb_data.igu_sb_id = igu_sp_sb_index;
sp_sb_data.igu_seg_id = igu_seg_id;
sp_sb_data.p_func.pf_id = func;
sp_sb_data.p_func.vnic_id = SC_VN(sc);
sp_sb_data.p_func.vf_id = 0xff;
bxe_wr_sp_sb_data(sc, &sp_sb_data);
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
}
static void
bxe_init_sp_ring(struct bxe_softc *sc)
{
atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
sc->spq_prod_idx = 0;
sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
sc->spq_prod_bd = sc->spq;
sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
}
static void
bxe_init_eq_ring(struct bxe_softc *sc)
{
union event_ring_elem *elem;
int i;
for (i = 1; i <= NUM_EQ_PAGES; i++) {
elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
BCM_PAGE_SIZE *
(i % NUM_EQ_PAGES)));
elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
BCM_PAGE_SIZE *
(i % NUM_EQ_PAGES)));
}
sc->eq_cons = 0;
sc->eq_prod = NUM_EQ_DESC;
sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
atomic_store_rel_long(&sc->eq_spq_left,
(min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
NUM_EQ_DESC) - 1));
}
static void
bxe_init_internal_common(struct bxe_softc *sc)
{
int i;
/*
* Zero this manually as its initialization is currently missing
* in the initTool.
*/
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
REG_WR(sc,
(BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
0);
}
if (!CHIP_IS_E1x(sc)) {
REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
}
}
static void
bxe_init_internal(struct bxe_softc *sc,
uint32_t load_code)
{
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON:
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
bxe_init_internal_common(sc);
/* no break */
case FW_MSG_CODE_DRV_LOAD_PORT:
/* nothing to do */
/* no break */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
/* internal memory per function is initialized inside bxe_pf_init */
break;
default:
BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
break;
}
}
static void
storm_memset_func_cfg(struct bxe_softc *sc,
struct tstorm_eth_function_common_config *tcfg,
uint16_t abs_fid)
{
uint32_t addr;
size_t size;
addr = (BAR_TSTRORM_INTMEM +
TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
size = sizeof(struct tstorm_eth_function_common_config);
ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
}
static void
bxe_func_init(struct bxe_softc *sc,
struct bxe_func_init_params *p)
{
struct tstorm_eth_function_common_config tcfg = { 0 };
if (CHIP_IS_E1x(sc)) {
storm_memset_func_cfg(sc, &tcfg, p->func_id);
}
/* Enable the function in the FW */
storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
storm_memset_func_en(sc, p->func_id, 1);
/* spq */
if (p->func_flgs & FUNC_FLG_SPQ) {
storm_memset_spq_addr(sc, p->spq_map, p->func_id);
REG_WR(sc,
(XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
p->spq_prod);
}
}
/*
* Calculates the sum of vn_min_rates.
* It's needed for further normalizing of the min_rates.
* Returns:
* sum of vn_min_rates.
* or
* 0 - if all the min_rates are 0.
* In the later case fainess algorithm should be deactivated.
* If all min rates are not zero then those that are zeroes will be set to 1.
*/
static void
bxe_calc_vn_min(struct bxe_softc *sc,
struct cmng_init_input *input)
{
uint32_t vn_cfg;
uint32_t vn_min_rate;
int all_zero = 1;
int vn;
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
vn_cfg = sc->devinfo.mf_info.mf_config[vn];
vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
/* skip hidden VNs */
vn_min_rate = 0;
} else if (!vn_min_rate) {
/* If min rate is zero - set it to 100 */
vn_min_rate = DEF_MIN_RATE;
} else {
all_zero = 0;
}
input->vnic_min_rate[vn] = vn_min_rate;
}
/* if ETS or all min rates are zeros - disable fairness */
if (BXE_IS_ETS_ENABLED(sc)) {
input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
} else if (all_zero) {
input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
BLOGD(sc, DBG_LOAD,
"Fariness disabled (all MIN values are zeroes)\n");
} else {
input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
}
static inline uint16_t
bxe_extract_max_cfg(struct bxe_softc *sc,
uint32_t mf_cfg)
{
uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT);
if (!max_cfg) {
BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
max_cfg = 100;
}
return (max_cfg);
}
static void
bxe_calc_vn_max(struct bxe_softc *sc,
int vn,
struct cmng_init_input *input)
{
uint16_t vn_max_rate;
uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
uint32_t max_cfg;
if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
vn_max_rate = 0;
} else {
max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
if (IS_MF_SI(sc)) {
/* max_cfg in percents of linkspeed */
vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
} else { /* SD modes */
/* max_cfg is absolute in 100Mb units */
vn_max_rate = (max_cfg * 100);
}
}
BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
input->vnic_max_rate[vn] = vn_max_rate;
}
static void
bxe_cmng_fns_init(struct bxe_softc *sc,
uint8_t read_cfg,
uint8_t cmng_type)
{
struct cmng_init_input input;
int vn;
memset(&input, 0, sizeof(struct cmng_init_input));
input.port_rate = sc->link_vars.line_speed;
if (cmng_type == CMNG_FNS_MINMAX) {
/* read mf conf from shmem */
if (read_cfg) {
bxe_read_mf_cfg(sc);
}
/* get VN min rate and enable fairness if not 0 */
bxe_calc_vn_min(sc, &input);
/* get VN max rate */
if (sc->port.pmf) {
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
bxe_calc_vn_max(sc, vn, &input);
}
}
/* always enable rate shaping and fairness */
input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
ecore_init_cmng(&input, &sc->cmng);
return;
}
/* rate shaping and fairness are disabled */
BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
}
static int
bxe_get_cmng_fns_mode(struct bxe_softc *sc)
{
if (CHIP_REV_IS_SLOW(sc)) {
return (CMNG_FNS_NONE);
}
if (IS_MF(sc)) {
return (CMNG_FNS_MINMAX);
}
return (CMNG_FNS_NONE);
}
static void
storm_memset_cmng(struct bxe_softc *sc,
struct cmng_init *cmng,
uint8_t port)
{
int vn;
int func;
uint32_t addr;
size_t size;
addr = (BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
size = sizeof(struct cmng_struct_per_port);
ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
func = func_by_vn(sc, vn);
addr = (BAR_XSTRORM_INTMEM +
XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
size = sizeof(struct rate_shaping_vars_per_vn);
ecore_storm_memset_struct(sc, addr, size,
(uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
addr = (BAR_XSTRORM_INTMEM +
XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
size = sizeof(struct fairness_vars_per_vn);
ecore_storm_memset_struct(sc, addr, size,
(uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
}
}
static void
bxe_pf_init(struct bxe_softc *sc)
{
struct bxe_func_init_params func_init = { 0 };
struct event_ring_data eq_data = { { 0 } };
uint16_t flags;
if (!CHIP_IS_E1x(sc)) {
/* reset IGU PF statistics: MSIX + ATTN */
/* PF */
REG_WR(sc,
(IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
(BXE_IGU_STAS_MSG_VF_CNT * 4) +
((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
0);
/* ATTN */
REG_WR(sc,
(IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
(BXE_IGU_STAS_MSG_VF_CNT * 4) +
(BXE_IGU_STAS_MSG_PF_CNT * 4) +
((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
0);
}
/* function setup flags */
flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
/*
* This flag is relevant for E1x only.
* E2 doesn't have a TPA configuration in a function level.
*/
flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
func_init.func_flgs = flags;
func_init.pf_id = SC_FUNC(sc);
func_init.func_id = SC_FUNC(sc);
func_init.spq_map = sc->spq_dma.paddr;
func_init.spq_prod = sc->spq_prod_idx;
bxe_func_init(sc, &func_init);
memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
/*
* Congestion management values depend on the link rate.
* There is no active link so initial link rate is set to 10Gbps.
* When the link comes up the congestion management values are
* re-calculated according to the actual link rate.
*/
sc->link_vars.line_speed = SPEED_10000;
bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
/* Only the PMF sets the HW */
if (sc->port.pmf) {
storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
}
/* init Event Queue - PCI bus guarantees correct endainity */
eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
eq_data.producer = sc->eq_prod;
eq_data.index_id = HC_SP_INDEX_EQ_CONS;
eq_data.sb_id = DEF_SB_ID;
storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
}
static void
bxe_hc_int_enable(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
uint32_t val = REG_RD(sc, addr);
uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
(sc->intr_count == 1)) ? TRUE : FALSE;
uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
if (msix) {
val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
HC_CONFIG_0_REG_INT_LINE_EN_0);
val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
if (single_msix) {
val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
}
} else if (msi) {
val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
} else {
val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
HC_CONFIG_0_REG_INT_LINE_EN_0 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
if (!CHIP_IS_E1(sc)) {
BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
val, port, addr);
REG_WR(sc, addr, val);
val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
}
}
if (CHIP_IS_E1(sc)) {
REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
}
BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
REG_WR(sc, addr, val);
/* ensure that HC_CONFIG is written before leading/trailing edge config */
mb();
if (!CHIP_IS_E1(sc)) {
/* init leading/trailing edge */
if (IS_MF(sc)) {
val = (0xee0f | (1 << (SC_VN(sc) + 4)));
if (sc->port.pmf) {
/* enable nig and gpio3 attention */
val |= 0x1100;
}
} else {
val = 0xffff;
}
REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
}
/* make sure that interrupts are indeed enabled from here on */
mb();
}
static void
bxe_igu_int_enable(struct bxe_softc *sc)
{
uint32_t val;
uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
(sc->intr_count == 1)) ? TRUE : FALSE;
uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
if (msix) {
val &= ~(IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN);
if (single_msix) {
val |= IGU_PF_CONF_SINGLE_ISR_EN;
}
} else if (msi) {
val &= ~IGU_PF_CONF_INT_LINE_EN;
val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
} else {
val &= ~IGU_PF_CONF_MSI_MSIX_EN;
val |= (IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
}
/* clean previous status - need to configure igu prior to ack*/
if ((!msix) || single_msix) {
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
bxe_ack_int(sc);
}
val |= IGU_PF_CONF_FUNC_EN;
BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
mb();
/* init leading/trailing edge */
if (IS_MF(sc)) {
val = (0xee0f | (1 << (SC_VN(sc) + 4)));
if (sc->port.pmf) {
/* enable nig and gpio3 attention */
val |= 0x1100;
}
} else {
val = 0xffff;
}
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
/* make sure that interrupts are indeed enabled from here on */
mb();
}
static void
bxe_int_enable(struct bxe_softc *sc)
{
if (sc->devinfo.int_block == INT_BLOCK_HC) {
bxe_hc_int_enable(sc);
} else {
bxe_igu_int_enable(sc);
}
}
static void
bxe_hc_int_disable(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
uint32_t val = REG_RD(sc, addr);
/*
* In E1 we must use only PCI configuration space to disable MSI/MSIX
* capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
* block
*/
if (CHIP_IS_E1(sc)) {
/*
* Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
* to prevent from HC sending interrupts after we exit the function
*/
REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
HC_CONFIG_0_REG_INT_LINE_EN_0 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
} else {
val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
HC_CONFIG_0_REG_INT_LINE_EN_0 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
}
BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
/* flush all outstanding writes */
mb();
REG_WR(sc, addr, val);
if (REG_RD(sc, addr) != val) {
BLOGE(sc, "proper val not read from HC IGU!\n");
}
}
static void
bxe_igu_int_disable(struct bxe_softc *sc)
{
uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN);
BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
/* flush all outstanding writes */
mb();
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
BLOGE(sc, "proper val not read from IGU!\n");
}
}
static void
bxe_int_disable(struct bxe_softc *sc)
{
if (sc->devinfo.int_block == INT_BLOCK_HC) {
bxe_hc_int_disable(sc);
} else {
bxe_igu_int_disable(sc);
}
}
static void
bxe_nic_init(struct bxe_softc *sc,
int load_code)
{
int i;
for (i = 0; i < sc->num_queues; i++) {
bxe_init_eth_fp(sc, i);
}
rmb(); /* ensure status block indices were read */
bxe_init_rx_rings(sc);
bxe_init_tx_rings(sc);
if (IS_VF(sc)) {
return;
}
/* initialize MOD_ABS interrupts */
elink_init_mod_abs_int(sc, &sc->link_vars,
sc->devinfo.chip_id,
sc->devinfo.shmem_base,
sc->devinfo.shmem2_base,
SC_PORT(sc));
bxe_init_def_sb(sc);
bxe_update_dsb_idx(sc);
bxe_init_sp_ring(sc);
bxe_init_eq_ring(sc);
bxe_init_internal(sc, load_code);
bxe_pf_init(sc);
bxe_stats_init(sc);
/* flush all before enabling interrupts */
mb();
bxe_int_enable(sc);
/* check for SPIO5 */
bxe_attn_int_deasserted0(sc,
REG_RD(sc,
(MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
SC_PORT(sc)*4)) &
AEU_INPUTS_ATTN_BITS_SPIO5);
}
static inline void
bxe_init_objs(struct bxe_softc *sc)
{
/* mcast rules must be added to tx if tx switching is enabled */
ecore_obj_type o_type =
(sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
ECORE_OBJ_TYPE_RX;
/* RX_MODE controlling object */
ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
/* multicast configuration controlling object */
ecore_init_mcast_obj(sc,
&sc->mcast_obj,
sc->fp[0].cl_id,
sc->fp[0].index,
SC_FUNC(sc),
SC_FUNC(sc),
BXE_SP(sc, mcast_rdata),
BXE_SP_MAPPING(sc, mcast_rdata),
ECORE_FILTER_MCAST_PENDING,
&sc->sp_state,
o_type);
/* Setup CAM credit pools */
ecore_init_mac_credit_pool(sc,
&sc->macs_pool,
SC_FUNC(sc),
CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
VNICS_PER_PATH(sc));
ecore_init_vlan_credit_pool(sc,
&sc->vlans_pool,
SC_ABS_FUNC(sc) >> 1,
CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
VNICS_PER_PATH(sc));
/* RSS configuration object */
ecore_init_rss_config_obj(sc,
&sc->rss_conf_obj,
sc->fp[0].cl_id,
sc->fp[0].index,
SC_FUNC(sc),
SC_FUNC(sc),
BXE_SP(sc, rss_rdata),
BXE_SP_MAPPING(sc, rss_rdata),
ECORE_FILTER_RSS_CONF_PENDING,
&sc->sp_state, ECORE_OBJ_TYPE_RX);
}
/*
* Initialize the function. This must be called before sending CLIENT_SETUP
* for the first client.
*/
static inline int
bxe_func_start(struct bxe_softc *sc)
{
struct ecore_func_state_params func_params = { NULL };
struct ecore_func_start_params *start_params = &func_params.params.start;
/* Prepare parameters for function state transitions */
bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
func_params.f_obj = &sc->func_obj;
func_params.cmd = ECORE_F_CMD_START;
/* Function parameters */
start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
start_params->sd_vlan_tag = OVLAN(sc);
if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
start_params->network_cos_mode = STATIC_COS;
} else { /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
}
//start_params->gre_tunnel_mode = 0;
//start_params->gre_tunnel_rss = 0;
return (ecore_func_state_change(sc, &func_params));
}
static int
bxe_set_power_state(struct bxe_softc *sc,
uint8_t state)
{
uint16_t pmcsr;
/* If there is no power capability, silently succeed */
if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
BLOGW(sc, "No power capability\n");
return (0);
}
pmcsr = pci_read_config(sc->dev,
(sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
2);
switch (state) {
case PCI_PM_D0:
pci_write_config(sc->dev,
(sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
if (pmcsr & PCIM_PSTAT_DMASK) {
/* delay required during transition out of D3hot */
DELAY(20000);
}
break;
case PCI_PM_D3hot:
/* XXX if there are other clients above don't shut down the power */
/* don't shut down the power for emulation and FPGA */
if (CHIP_REV_IS_SLOW(sc)) {
return (0);
}
pmcsr &= ~PCIM_PSTAT_DMASK;
pmcsr |= PCIM_PSTAT_D3;
if (sc->wol) {
pmcsr |= PCIM_PSTAT_PMEENABLE;
}
pci_write_config(sc->dev,
(sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
pmcsr, 4);
/*
* No more memory access after this point until device is brought back
* to D0 state.
*/
break;
default:
BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
state, pmcsr);
return (-1);
}
return (0);
}
/* return true if succeeded to acquire the lock */
static uint8_t
bxe_trylock_hw_lock(struct bxe_softc *sc,
uint32_t resource)
{
uint32_t lock_status;
uint32_t resource_bit = (1 << resource);
int func = SC_FUNC(sc);
uint32_t hw_lock_control_reg;
BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
BLOGD(sc, DBG_LOAD,
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return (FALSE);
}
if (func <= 5) {
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
} else {
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
}
/* try to acquire the lock */
REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
lock_status = REG_RD(sc, hw_lock_control_reg);
if (lock_status & resource_bit) {
return (TRUE);
}
BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
"lock_status 0x%x resource_bit 0x%x\n", resource, func,
lock_status, resource_bit);
return (FALSE);
}
/*
* Get the recovery leader resource id according to the engine this function
* belongs to. Currently only only 2 engines is supported.
*/
static int
bxe_get_leader_lock_resource(struct bxe_softc *sc)
{
if (SC_PATH(sc)) {
return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
} else {
return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
}
}
/* try to acquire a leader lock for current engine */
static uint8_t
bxe_trylock_leader_lock(struct bxe_softc *sc)
{
return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
}
static int
bxe_release_leader_lock(struct bxe_softc *sc)
{
return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
}
/* close gates #2, #3 and #4 */
static void
bxe_set_234_gates(struct bxe_softc *sc,
uint8_t close)
{
uint32_t val;
/* gates #2 and #4a are closed/opened for "not E1" only */
if (!CHIP_IS_E1(sc)) {
/* #4 */
REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
/* #2 */
REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
}
/* #3 */
if (CHIP_IS_E1x(sc)) {
/* prevent interrupts from HC on both ports */
val = REG_RD(sc, HC_REG_CONFIG_1);
REG_WR(sc, HC_REG_CONFIG_1,
(!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
(val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
val = REG_RD(sc, HC_REG_CONFIG_0);
REG_WR(sc, HC_REG_CONFIG_0,
(!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
(val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
/* Prevent incoming interrupts in IGU */
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
(!close) ?
(val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
(val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
}
BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
wmb();
}
/* poll for pending writes bit, it should get cleared in no more than 1s */
static int
bxe_er_poll_igu_vq(struct bxe_softc *sc)
{
uint32_t cnt = 1000;
uint32_t pend_bits = 0;
do {
pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
if (pend_bits == 0) {
break;
}
DELAY(1000);
} while (--cnt > 0);
if (cnt == 0) {
BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
return (-1);
}
return (0);
}
#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
static void
bxe_clp_reset_prep(struct bxe_softc *sc,
uint32_t *magic_val)
{
/* Do some magic... */
uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
*magic_val = val & SHARED_MF_CLP_MAGIC;
MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
}
/* restore the value of the 'magic' bit */
static void
bxe_clp_reset_done(struct bxe_softc *sc,
uint32_t magic_val)
{
/* Restore the 'magic' bit value... */
uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
MFCFG_WR(sc, shared_mf_config.clp_mb,
(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
}
/* prepare for MCP reset, takes care of CLP configurations */
static void
bxe_reset_mcp_prep(struct bxe_softc *sc,
uint32_t *magic_val)
{
uint32_t shmem;
uint32_t validity_offset;
/* set `magic' bit in order to save MF config */
if (!CHIP_IS_E1(sc)) {
bxe_clp_reset_prep(sc, magic_val);
}
/* get shmem offset */
shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
validity_offset =
offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
/* Clear validity map flags */
if (shmem > 0) {
REG_WR(sc, shmem + validity_offset, 0);
}
}
#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
#define MCP_ONE_TIMEOUT 100 /* 100 ms */
static void
bxe_mcp_wait_one(struct bxe_softc *sc)
{
/* special handling for emulation and FPGA (10 times longer) */
if (CHIP_REV_IS_SLOW(sc)) {
DELAY((MCP_ONE_TIMEOUT*10) * 1000);
} else {
DELAY((MCP_ONE_TIMEOUT) * 1000);
}
}
/* initialize shmem_base and waits for validity signature to appear */
static int
bxe_init_shmem(struct bxe_softc *sc)
{
int cnt = 0;
uint32_t val = 0;
do {
sc->devinfo.shmem_base =
sc->link_params.shmem_base =
REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
if (sc->devinfo.shmem_base) {
val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
if (val & SHR_MEM_VALIDITY_MB)
return (0);
}
bxe_mcp_wait_one(sc);
} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
BLOGE(sc, "BAD MCP validity signature\n");
return (-1);
}
static int
bxe_reset_mcp_comp(struct bxe_softc *sc,
uint32_t magic_val)
{
int rc = bxe_init_shmem(sc);
/* Restore the `magic' bit value */
if (!CHIP_IS_E1(sc)) {
bxe_clp_reset_done(sc, magic_val);
}
return (rc);
}
static void
bxe_pxp_prep(struct bxe_softc *sc)
{
if (!CHIP_IS_E1(sc)) {
REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
wmb();
}
}
/*
* Reset the whole chip except for:
* - PCIE core
* - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
* - IGU
* - MISC (including AEU)
* - GRC
* - RBCN, RBCP
*/
static void
bxe_process_kill_chip_reset(struct bxe_softc *sc,
uint8_t global)
{
uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
uint32_t global_bits2, stay_reset2;
/*
* Bits that have to be set in reset_mask2 if we want to reset 'global'
* (per chip) blocks.
*/
global_bits2 =
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
/*
* Don't reset the following blocks.
* Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
* reset, as in 4 port device they might still be owned
* by the MCP (there is only one leader per path).
*/
not_reset_mask1 =
MISC_REGISTERS_RESET_REG_1_RST_HC |
MISC_REGISTERS_RESET_REG_1_RST_PXPV |
MISC_REGISTERS_RESET_REG_1_RST_PXP;
not_reset_mask2 =
MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
MISC_REGISTERS_RESET_REG_2_RST_RBCN |
MISC_REGISTERS_RESET_REG_2_RST_GRC |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
MISC_REGISTERS_RESET_REG_2_RST_ATC |
MISC_REGISTERS_RESET_REG_2_PGLC |
MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
MISC_REGISTERS_RESET_REG_2_UMAC0 |
MISC_REGISTERS_RESET_REG_2_UMAC1;
/*
* Keep the following blocks in reset:
* - all xxMACs are handled by the elink code.
*/
stay_reset2 =
MISC_REGISTERS_RESET_REG_2_XMAC |
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
/* Full reset masks according to the chip */
reset_mask1 = 0xffffffff;
if (CHIP_IS_E1(sc))
reset_mask2 = 0xffff;
else if (CHIP_IS_E1H(sc))
reset_mask2 = 0x1ffff;
else if (CHIP_IS_E2(sc))
reset_mask2 = 0xfffff;
else /* CHIP_IS_E3 */
reset_mask2 = 0x3ffffff;
/* Don't reset global blocks unless we need to */
if (!global)
reset_mask2 &= ~global_bits2;
/*
* In case of attention in the QM, we need to reset PXP
* (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
* because otherwise QM reset would release 'close the gates' shortly
* before resetting the PXP, then the PSWRQ would send a write
* request to PGLUE. Then when PXP is reset, PGLUE would try to
* read the payload data from PSWWR, but PSWWR would not
* respond. The write queue in PGLUE would stuck, dmae commands
* would not return. Therefore it's important to reset the second
* reset register (containing the
* MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
* first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
* bit).
*/
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
reset_mask2 & (~not_reset_mask2));
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
reset_mask1 & (~not_reset_mask1));
mb();
wmb();
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
reset_mask2 & (~stay_reset2));
mb();
wmb();
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
wmb();
}
static int
bxe_process_kill(struct bxe_softc *sc,
uint8_t global)
{
int cnt = 1000;
uint32_t val = 0;
uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
uint32_t tags_63_32 = 0;
/* Empty the Tetris buffer, wait for 1s */
do {
sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
if (CHIP_IS_E3(sc)) {
tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
}
if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
((port_is_idle_0 & 0x1) == 0x1) &&
((port_is_idle_1 & 0x1) == 0x1) &&
(pgl_exp_rom2 == 0xffffffff) &&
(!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
break;
DELAY(1000);
} while (cnt-- > 0);
if (cnt <= 0) {
BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
"are still outstanding read requests after 1s! "
"sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
"port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
sr_cnt, blk_cnt, port_is_idle_0,
port_is_idle_1, pgl_exp_rom2);
return (-1);
}
mb();
/* Close gates #2, #3 and #4 */
bxe_set_234_gates(sc, TRUE);
/* Poll for IGU VQs for 57712 and newer chips */
if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
return (-1);
}
/* XXX indicate that "process kill" is in progress to MCP */
/* clear "unprepared" bit */
REG_WR(sc, MISC_REG_UNPREPARED, 0);
mb();
/* Make sure all is written to the chip before the reset */
wmb();
/*
* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
DELAY(1000);
/* Prepare to chip reset: */
/* MCP */
if (global) {
bxe_reset_mcp_prep(sc, &val);
}
/* PXP */
bxe_pxp_prep(sc);
mb();
/* reset the chip */
bxe_process_kill_chip_reset(sc, global);
mb();
/* clear errors in PGB */
if (!CHIP_IS_E1(sc))
REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
/* Recover after reset: */
/* MCP */
if (global && bxe_reset_mcp_comp(sc, val)) {
return (-1);
}
/* XXX add resetting the NO_MCP mode DB here */
/* Open the gates #2, #3 and #4 */
bxe_set_234_gates(sc, FALSE);
/* XXX
* IGU/AEU preparation bring back the AEU/IGU to a reset state
* re-enable attentions
*/
return (0);
}
static int
bxe_leader_reset(struct bxe_softc *sc)
{
int rc = 0;
uint8_t global = bxe_reset_is_global(sc);
uint32_t load_code;
/*
* If not going to reset MCP, load "fake" driver to reset HW while
* driver is owner of the HW.
*/
if (!global && !BXE_NOMCP(sc)) {
load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
if (!load_code) {
BLOGE(sc, "MCP response failure, aborting\n");
rc = -1;
goto exit_leader_reset;
}
if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
(load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
BLOGE(sc, "MCP unexpected response, aborting\n");
rc = -1;
goto exit_leader_reset2;
}
load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
BLOGE(sc, "MCP response failure, aborting\n");
rc = -1;
goto exit_leader_reset2;
}
}
/* try to recover after the failure */
if (bxe_process_kill(sc, global)) {
BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
rc = -1;
goto exit_leader_reset2;
}
/*
* Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
* state.
*/
bxe_set_reset_done(sc);
if (global) {
bxe_clear_reset_global(sc);
}
exit_leader_reset2:
/* unload "fake driver" if it was loaded */
if (!global && !BXE_NOMCP(sc)) {
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
exit_leader_reset:
sc->is_leader = 0;
bxe_release_leader_lock(sc);
mb();
return (rc);
}
/*
* prepare INIT transition, parameters configured:
* - HC configuration
* - Queue's CDU context
*/
static void
bxe_pf_q_prep_init(struct bxe_softc *sc,
struct bxe_fastpath *fp,
struct ecore_queue_init_params *init_params)
{
uint8_t cos;
int cxt_index, cxt_offset;
bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
/* HC rate */
init_params->rx.hc_rate =
sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
init_params->tx.hc_rate =
sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
/* FW SB ID */
init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
/* CQ index among the SB indices */
init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
/* set maximum number of COSs supported by this queue */
init_params->max_cos = sc->max_cos;
BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
fp->index, init_params->max_cos);
/* set the context pointers queue object */
for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
/* XXX change index/cid here if ever support multiple tx CoS */
/* fp->txdata[cos]->cid */
cxt_index = fp->index / ILT_PAGE_CIDS;
cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
}
}
/* set flags that are common for the Tx-only and not normal connections */
static unsigned long
bxe_get_common_flags(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint8_t zero_stats)
{
unsigned long flags = 0;
/* PF driver will always initialize the Queue to an ACTIVE state */
bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
/*
* tx only connections collect statistics (on the same index as the
* parent connection). The statistics are zeroed when the parent
* connection is initialized.
*/
bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
if (zero_stats) {
bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
}
/*
* tx only connections can support tx-switching, though their
* CoS-ness doesn't survive the loopback
*/
if (sc->flags & BXE_TX_SWITCHING) {
bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
}
bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
return (flags);
}
static unsigned long
bxe_get_q_flags(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint8_t leading)
{
unsigned long flags = 0;
if (IS_MF_SD(sc)) {
bxe_set_bit(ECORE_Q_FLG_OV, &flags);
}
if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
}
if (leading) {
bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
}
bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
/* merge with common flags */
return (flags | bxe_get_common_flags(sc, fp, TRUE));
}
static void
bxe_pf_q_prep_general(struct bxe_softc *sc,
struct bxe_fastpath *fp,
struct ecore_general_setup_params *gen_init,
uint8_t cos)
{
gen_init->stat_id = bxe_stats_id(fp);
gen_init->spcl_id = fp->cl_id;
gen_init->mtu = sc->mtu;
gen_init->cos = cos;
}
static void
bxe_pf_rx_q_prep(struct bxe_softc *sc,
struct bxe_fastpath *fp,
struct rxq_pause_params *pause,
struct ecore_rxq_setup_params *rxq_init)
{
uint8_t max_sge = 0;
uint16_t sge_sz = 0;
uint16_t tpa_agg_size = 0;
pause->sge_th_lo = SGE_TH_LO(sc);
pause->sge_th_hi = SGE_TH_HI(sc);
/* validate SGE ring has enough to cross high threshold */
if (sc->dropless_fc &&
(pause->sge_th_hi + FW_PREFETCH_CNT) >
(RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
BLOGW(sc, "sge ring threshold limit\n");
}
/* minimum max_aggregation_size is 2*MTU (two full buffers) */
tpa_agg_size = (2 * sc->mtu);
if (tpa_agg_size < sc->max_aggregation_size) {
tpa_agg_size = sc->max_aggregation_size;
}
max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
max_sge = ((max_sge + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
/* pause - not for e1 */
if (!CHIP_IS_E1(sc)) {
pause->bd_th_lo = BD_TH_LO(sc);
pause->bd_th_hi = BD_TH_HI(sc);
pause->rcq_th_lo = RCQ_TH_LO(sc);
pause->rcq_th_hi = RCQ_TH_HI(sc);
/* validate rings have enough entries to cross high thresholds */
if (sc->dropless_fc &&
pause->bd_th_hi + FW_PREFETCH_CNT >
sc->rx_ring_size) {
BLOGW(sc, "rx bd ring threshold limit\n");
}
if (sc->dropless_fc &&
pause->rcq_th_hi + FW_PREFETCH_CNT >
RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
BLOGW(sc, "rcq ring threshold limit\n");
}
pause->pri_map = 1;
}
/* rxq setup */
rxq_init->dscr_map = fp->rx_dma.paddr;
rxq_init->sge_map = fp->rx_sge_dma.paddr;
rxq_init->rcq_map = fp->rcq_dma.paddr;
rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
/*
* This should be a maximum number of data bytes that may be
* placed on the BD (not including paddings).
*/
rxq_init->buf_sz = (fp->rx_buf_size -
IP_HEADER_ALIGNMENT_PADDING);
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->tpa_agg_sz = tpa_agg_size;
rxq_init->sge_buf_sz = sge_sz;
rxq_init->max_sges_pkt = max_sge;
rxq_init->rss_engine_id = SC_FUNC(sc);
rxq_init->mcast_engine_id = SC_FUNC(sc);
/*
* Maximum number or simultaneous TPA aggregation for this Queue.
* For PF Clients it should be the maximum available number.
* VF driver(s) may want to define it to a smaller value.
*/
rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id;
rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
/*
* configure silent vlan removal
* if multi function mode is afex, then mask default vlan
*/
if (IS_MF_AFEX(sc)) {
rxq_init->silent_removal_value =
sc->devinfo.mf_info.afex_def_vlan_tag;
rxq_init->silent_removal_mask = EVL_VLID_MASK;
}
}
static void
bxe_pf_tx_q_prep(struct bxe_softc *sc,
struct bxe_fastpath *fp,
struct ecore_txq_setup_params *txq_init,
uint8_t cos)
{
/*
* XXX If multiple CoS is ever supported then each fastpath structure
* will need to maintain tx producer/consumer/dma/etc values *per* CoS.
* fp->txdata[cos]->tx_dma.paddr;
*/
txq_init->dscr_map = fp->tx_dma.paddr;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
/*
* set the TSS leading client id for TX classfication to the
* leading RSS client id
*/
txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
}
/*
* This function performs 2 steps in a queue state machine:
* 1) RESET->INIT
* 2) INIT->SETUP
*/
static int
bxe_setup_queue(struct bxe_softc *sc,
struct bxe_fastpath *fp,
uint8_t leading)
{
struct ecore_queue_state_params q_params = { NULL };
struct ecore_queue_setup_params *setup_params =
&q_params.params.setup;
int rc;
BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
/* we want to wait for completion in this context */
bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
/* prepare the INIT parameters */
bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
/* Set the command */
q_params.cmd = ECORE_Q_CMD_INIT;
/* Change the state to INIT */
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
return (rc);
}
BLOGD(sc, DBG_LOAD, "init complete\n");
/* now move the Queue to the SETUP state */
memset(setup_params, 0, sizeof(*setup_params));
/* set Queue flags */
setup_params->flags = bxe_get_q_flags(sc, fp, leading);
/* set general SETUP parameters */
bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
FIRST_TX_COS_INDEX);
bxe_pf_rx_q_prep(sc, fp,
&setup_params->pause_params,
&setup_params->rxq_params);
bxe_pf_tx_q_prep(sc, fp,
&setup_params->txq_params,
FIRST_TX_COS_INDEX);
/* Set the command */
q_params.cmd = ECORE_Q_CMD_SETUP;
/* change the state to SETUP */
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
return (rc);
}
return (rc);
}
static int
bxe_setup_leading(struct bxe_softc *sc)
{
return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
}
static int
bxe_config_rss_pf(struct bxe_softc *sc,
struct ecore_rss_config_obj *rss_obj,
uint8_t config_hash)
{
struct ecore_config_rss_params params = { NULL };
int i;
/*
* Although RSS is meaningless when there is a single HW queue we
* still need it enabled in order to have HW Rx hash generated.
*/
params.rss_obj = rss_obj;
bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
/* RSS configuration */
bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
if (rss_obj->udp_rss_v4) {
bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
}
if (rss_obj->udp_rss_v6) {
bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
}
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
if (config_hash) {
/* RSS keys */
for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
params.rss_key[i] = arc4random();
}
bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
}
return (ecore_config_rss(sc, &params));
}
static int
bxe_config_rss_eth(struct bxe_softc *sc,
uint8_t config_hash)
{
return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
}
static int
bxe_init_rss_pf(struct bxe_softc *sc)
{
uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
int i;
/*
* Prepare the initial contents of the indirection table if
* RSS is enabled
*/
for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
sc->rss_conf_obj.ind_table[i] =
(sc->fp->cl_id + (i % num_eth_queues));
}
if (sc->udp_rss) {
sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
}
/*
* For 57710 and 57711 SEARCHER configuration (rss_keys) is
* per-port, so if explicit configuration is needed, do it only
* for a PMF.
*
* For 57712 and newer it's a per-function configuration.
*/
return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
}
static int
bxe_set_mac_one(struct bxe_softc *sc,
uint8_t *mac,
struct ecore_vlan_mac_obj *obj,
uint8_t set,
int mac_type,
unsigned long *ramrod_flags)
{
struct ecore_vlan_mac_ramrod_params ramrod_param;
int rc;
memset(&ramrod_param, 0, sizeof(ramrod_param));
/* fill in general parameters */
ramrod_param.vlan_mac_obj = obj;
ramrod_param.ramrod_flags = *ramrod_flags;
/* fill a user request section if needed */
if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
/* Set the command: ADD or DEL */
ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
ECORE_VLAN_MAC_DEL;
}
rc = ecore_config_vlan_mac(sc, &ramrod_param);
if (rc == ECORE_EXISTS) {
BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
/* do not treat adding same MAC as error */
rc = 0;
} else if (rc < 0) {
BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
}
return (rc);
}
static int
bxe_set_eth_mac(struct bxe_softc *sc,
uint8_t set)
{
unsigned long ramrod_flags = 0;
BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */
return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
&sc->sp_objs->mac_obj,
set, ECORE_ETH_MAC, &ramrod_flags));
}
static int
bxe_get_cur_phy_idx(struct bxe_softc *sc)
{
uint32_t sel_phy_idx = 0;
if (sc->link_params.num_phys <= 1) {
return (ELINK_INT_PHY);
}
if (sc->link_vars.link_up) {
sel_phy_idx = ELINK_EXT_PHY1;
/* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
(sc->link_params.phy[ELINK_EXT_PHY2].supported &
ELINK_SUPPORTED_FIBRE))
sel_phy_idx = ELINK_EXT_PHY2;
} else {
switch (elink_phy_selection(&sc->link_params)) {
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
sel_phy_idx = ELINK_EXT_PHY1;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
sel_phy_idx = ELINK_EXT_PHY2;
break;
}
}
return (sel_phy_idx);
}
static int
bxe_get_link_cfg_idx(struct bxe_softc *sc)
{
uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
/*
* The selected activated PHY is always after swapping (in case PHY
* swapping is enabled). So when swapping is enabled, we need to reverse
* the configuration
*/
if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
if (sel_phy_idx == ELINK_EXT_PHY1)
sel_phy_idx = ELINK_EXT_PHY2;
else if (sel_phy_idx == ELINK_EXT_PHY2)
sel_phy_idx = ELINK_EXT_PHY1;
}
return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
}
static void
bxe_set_requested_fc(struct bxe_softc *sc)
{
/*
* Initialize link parameters structure variables
* It is recommended to turn off RX FC for jumbo frames
* for better performance
*/
if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
} else {
sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
}
}
static void
bxe_calc_fc_adv(struct bxe_softc *sc)
{
uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
ADVERTISED_Pause);
switch (sc->link_vars.ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
ADVERTISED_Pause);
break;
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
break;
default:
break;
}
}
static uint16_t
bxe_get_mf_speed(struct bxe_softc *sc)
{
uint16_t line_speed = sc->link_vars.line_speed;
if (IS_MF(sc)) {
uint16_t maxCfg =
bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
/* calculate the current MAX line speed limit for the MF devices */
if (IS_MF_SI(sc)) {
line_speed = (line_speed * maxCfg) / 100;
} else { /* SD mode */
uint16_t vn_max_rate = maxCfg * 100;
if (vn_max_rate < line_speed) {
line_speed = vn_max_rate;
}
}
}
return (line_speed);
}
static void
bxe_fill_report_data(struct bxe_softc *sc,
struct bxe_link_report_data *data)
{
uint16_t line_speed = bxe_get_mf_speed(sc);
memset(data, 0, sizeof(*data));
/* fill the report data with the effective line speed */
data->line_speed = line_speed;
/* Link is down */
if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
}
/* Full DUPLEX */
if (sc->link_vars.duplex == DUPLEX_FULL) {
bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
}
/* Rx Flow Control is ON */
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
}
/* Tx Flow Control is ON */
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
}
}
/* report link status to OS, should be called under phy_lock */
static void
bxe_link_report_locked(struct bxe_softc *sc)
{
struct bxe_link_report_data cur_data;
/* reread mf_cfg */
if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
bxe_read_mf_cfg(sc);
}
/* Read the current link report info */
bxe_fill_report_data(sc, &cur_data);
/* Don't report link down or exactly the same link status twice */
if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
(bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
&sc->last_reported_link.link_report_flags) &&
bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags))) {
return;
}
ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
sc->link_cnt++;
ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
/* report new link params and remember the state for the next time */
memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags)) {
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
} else {
const char *duplex;
const char *flow;
if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
&cur_data.link_report_flags)) {
duplex = "full";
ELINK_DEBUG_P0(sc, "link set to full duplex\n");
} else {
duplex = "half";
ELINK_DEBUG_P0(sc, "link set to half duplex\n");
}
/*
* Handle the FC at the end so that only these flags would be
* possibly set. This way we may easily check if there is no FC
* enabled.
*/
if (cur_data.link_report_flags) {
if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
&cur_data.link_report_flags) &&
bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
&cur_data.link_report_flags)) {
flow = "ON - receive & transmit";
} else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
&cur_data.link_report_flags) &&
!bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
&cur_data.link_report_flags)) {
flow = "ON - receive";
} else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
&cur_data.link_report_flags) &&
bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
&cur_data.link_report_flags)) {
flow = "ON - transmit";
} else {
flow = "none"; /* possible? */
}
} else {
flow = "none";
}
if_link_state_change(sc->ifp, LINK_STATE_UP);
BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
cur_data.line_speed, duplex, flow);
}
}
static void
bxe_link_report(struct bxe_softc *sc)
{
bxe_acquire_phy_lock(sc);
bxe_link_report_locked(sc);
bxe_release_phy_lock(sc);
}
static void
bxe_link_status_update(struct bxe_softc *sc)
{
if (sc->state != BXE_STATE_OPEN) {
return;
}
if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
elink_link_status_update(&sc->link_params, &sc->link_vars);
} else {
sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
ELINK_SUPPORTED_10baseT_Full |
ELINK_SUPPORTED_100baseT_Half |
ELINK_SUPPORTED_100baseT_Full |
ELINK_SUPPORTED_1000baseT_Full |
ELINK_SUPPORTED_2500baseX_Full |
ELINK_SUPPORTED_10000baseT_Full |
ELINK_SUPPORTED_TP |
ELINK_SUPPORTED_FIBRE |
ELINK_SUPPORTED_Autoneg |
ELINK_SUPPORTED_Pause |
ELINK_SUPPORTED_Asym_Pause);
sc->port.advertising[0] = sc->port.supported[0];
sc->link_params.sc = sc;
sc->link_params.port = SC_PORT(sc);
sc->link_params.req_duplex[0] = DUPLEX_FULL;
sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
sc->link_params.req_line_speed[0] = SPEED_10000;
sc->link_params.speed_cap_mask[0] = 0x7f0000;
sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
if (CHIP_REV_IS_FPGA(sc)) {
sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
sc->link_vars.line_speed = ELINK_SPEED_1000;
sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
} else {
sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
sc->link_vars.line_speed = ELINK_SPEED_10000;
sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
}
sc->link_vars.link_up = 1;
sc->link_vars.duplex = DUPLEX_FULL;
sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
if (IS_PF(sc)) {
REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
bxe_link_report(sc);
}
}
if (IS_PF(sc)) {
if (sc->link_vars.link_up) {
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
} else {
bxe_stats_handle(sc, STATS_EVENT_STOP);
}
bxe_link_report(sc);
} else {
bxe_link_report(sc);
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
}
}
static int
bxe_initial_phy_init(struct bxe_softc *sc,
int load_mode)
{
int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
struct elink_params *lp = &sc->link_params;
bxe_set_requested_fc(sc);
if (CHIP_REV_IS_SLOW(sc)) {
uint32_t bond = CHIP_BOND_ID(sc);
uint32_t feat = 0;
if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
} else if (bond & 0x4) {
if (CHIP_IS_E3(sc)) {
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
} else {
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
}
} else if (bond & 0x8) {
if (CHIP_IS_E3(sc)) {
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
} else {
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
}
}
/* disable EMAC for E3 and above */
if (bond & 0x2) {
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
}
sc->link_params.feature_config_flags |= feat;
}
bxe_acquire_phy_lock(sc);
if (load_mode == LOAD_DIAG) {
lp->loopback_mode = ELINK_LOOPBACK_XGXS;
/* Prefer doing PHY loopback at 10G speed, if possible */
if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
if (lp->speed_cap_mask[cfg_idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
} else {
lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
}
}
}
if (load_mode == LOAD_LOOPBACK_EXT) {
lp->loopback_mode = ELINK_LOOPBACK_EXT;
}
rc = elink_phy_init(&sc->link_params, &sc->link_vars);
bxe_release_phy_lock(sc);
bxe_calc_fc_adv(sc);
if (sc->link_vars.link_up) {
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
bxe_link_report(sc);
}
if (!CHIP_REV_IS_SLOW(sc)) {
bxe_periodic_start(sc);
}
sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
return (rc);
}
static u_int
bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct ecore_mcast_list_elem *mc_mac = arg;
mc_mac += cnt;
mc_mac->mac = (uint8_t *)LLADDR(sdl);
return (1);
}
static int
bxe_init_mcast_macs_list(struct bxe_softc *sc,
struct ecore_mcast_ramrod_params *p)
{
if_t ifp = sc->ifp;
int mc_count;
struct ecore_mcast_list_elem *mc_mac;
ECORE_LIST_INIT(&p->mcast_list);
p->mcast_list_len = 0;
/* XXXGL: multicast count may change later */
mc_count = if_llmaddr_count(ifp);
if (!mc_count) {
return (0);
}
mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
(M_NOWAIT | M_ZERO));
if (!mc_mac) {
BLOGE(sc, "Failed to allocate temp mcast list\n");
return (-1);
}
bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
for (int i = 0; i < mc_count; i ++) {
ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
BLOGD(sc, DBG_LOAD,
"Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
mc_count);
}
p->mcast_list_len = mc_count;
return (0);
}
static void
bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
{
struct ecore_mcast_list_elem *mc_mac =
ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
struct ecore_mcast_list_elem,
link);
if (mc_mac) {
/* only a single free as all mc_macs are in the same heap array */
free(mc_mac, M_DEVBUF);
}
}
static int
bxe_set_mc_list(struct bxe_softc *sc)
{
struct ecore_mcast_ramrod_params rparam = { NULL };
int rc = 0;
rparam.mcast_obj = &sc->mcast_obj;
BXE_MCAST_LOCK(sc);
/* first, clear all configured multicast MACs */
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
if (rc < 0) {
BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
/* Manual backport parts of FreeBSD upstream r284470. */
BXE_MCAST_UNLOCK(sc);
return (rc);
}
/* configure a new MACs list */
rc = bxe_init_mcast_macs_list(sc, &rparam);
if (rc) {
BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
BXE_MCAST_UNLOCK(sc);
return (rc);
}
/* Now add the new MACs */
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
if (rc < 0) {
BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
}
bxe_free_mcast_macs_list(&rparam);
BXE_MCAST_UNLOCK(sc);
return (rc);
}
struct bxe_set_addr_ctx {
struct bxe_softc *sc;
unsigned long ramrod_flags;
int rc;
};
static u_int
bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct bxe_set_addr_ctx *ctx = arg;
struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
int rc;
if (ctx->rc < 0)
return (0);
rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
/* do not treat adding same MAC as an error */
if (rc == -EEXIST)
BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
else if (rc < 0) {
BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
ctx->rc = rc;
}
return (1);
}
static int
bxe_set_uc_list(struct bxe_softc *sc)
{
if_t ifp = sc->ifp;
struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
int rc;
/* first schedule a cleanup up of old configuration */
rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
if (rc < 0) {
BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
return (rc);
}
if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
if (ctx.rc < 0)
return (ctx.rc);
/* Execute the pending commands */
bit_set(&ctx.ramrod_flags, RAMROD_CONT);
return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
}
static void
bxe_set_rx_mode(struct bxe_softc *sc)
{
if_t ifp = sc->ifp;
uint32_t rx_mode = BXE_RX_MODE_NORMAL;
if (sc->state != BXE_STATE_OPEN) {
BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
return;
}
BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
if (if_getflags(ifp) & IFF_PROMISC) {
rx_mode = BXE_RX_MODE_PROMISC;
} else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
CHIP_IS_E1(sc))) {
rx_mode = BXE_RX_MODE_ALLMULTI;
} else {
if (IS_PF(sc)) {
/* some multicasts */
if (bxe_set_mc_list(sc) < 0) {
rx_mode = BXE_RX_MODE_ALLMULTI;
}
if (bxe_set_uc_list(sc) < 0) {
rx_mode = BXE_RX_MODE_PROMISC;
}
}
}
sc->rx_mode = rx_mode;
/* schedule the rx_mode command */
if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
return;
}
if (IS_PF(sc)) {
bxe_set_storm_rx_mode(sc);
}
}
/* update flags in shmem */
static void
bxe_update_drv_flags(struct bxe_softc *sc,
uint32_t flags,
uint32_t set)
{
uint32_t drv_flags;
if (SHMEM2_HAS(sc, drv_flags)) {
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
drv_flags = SHMEM2_RD(sc, drv_flags);
if (set) {
SET_FLAGS(drv_flags, flags);
} else {
RESET_FLAGS(drv_flags, flags);
}
SHMEM2_WR(sc, drv_flags, drv_flags);
BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
}
}
/* periodic timer callout routine, only runs when the interface is up */
static void
bxe_periodic_callout_func(void *xsc)
{
struct bxe_softc *sc = (struct bxe_softc *)xsc;
int i;
if (!BXE_CORE_TRYLOCK(sc)) {
/* just bail and try again next time */
if ((sc->state == BXE_STATE_OPEN) &&
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
/* schedule the next periodic callout */
callout_reset(&sc->periodic_callout, hz,
bxe_periodic_callout_func, sc);
}
return;
}
if ((sc->state != BXE_STATE_OPEN) ||
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
BXE_CORE_UNLOCK(sc);
return;
}
/* Check for TX timeouts on any fastpath. */
FOR_EACH_QUEUE(sc, i) {
if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
/* Ruh-Roh, chip was reset! */
break;
}
}
if (!CHIP_REV_IS_SLOW(sc)) {
/*
* This barrier is needed to ensure the ordering between the writing
* to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
* the reading here.
*/
mb();
if (sc->port.pmf) {
bxe_acquire_phy_lock(sc);
elink_period_func(&sc->link_params, &sc->link_vars);
bxe_release_phy_lock(sc);
}
}
if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
int mb_idx = SC_FW_MB_IDX(sc);
uint32_t drv_pulse;
uint32_t mcp_pulse;
++sc->fw_drv_pulse_wr_seq;
sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
drv_pulse = sc->fw_drv_pulse_wr_seq;
bxe_drv_pulse(sc);
mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
/*
* The delta between driver pulse and mcp response should
* be 1 (before mcp response) or 0 (after mcp response).
*/
if ((drv_pulse != mcp_pulse) &&
(drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
/* someone lost a heartbeat... */
BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
drv_pulse, mcp_pulse);
}
}
/* state is BXE_STATE_OPEN */
bxe_stats_handle(sc, STATS_EVENT_UPDATE);
BXE_CORE_UNLOCK(sc);
if ((sc->state == BXE_STATE_OPEN) &&
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
/* schedule the next periodic callout */
callout_reset(&sc->periodic_callout, hz,
bxe_periodic_callout_func, sc);
}
}
static void
bxe_periodic_start(struct bxe_softc *sc)
{
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
}
static void
bxe_periodic_stop(struct bxe_softc *sc)
{
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
callout_drain(&sc->periodic_callout);
}
void
bxe_parity_recover(struct bxe_softc *sc)
{
uint8_t global = FALSE;
uint32_t error_recovered, error_unrecovered;
if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
(sc->state == BXE_STATE_ERROR)) {
BLOGE(sc, "RECOVERY failed, "
"stack notified driver is NOT running! "
"Please reboot/power cycle the system.\n");
return;
}
while (1) {
BLOGD(sc, DBG_SP,
"%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
__func__, sc, sc->state, sc->recovery_state, sc->error_status);
switch(sc->recovery_state) {
case BXE_RECOVERY_INIT:
bxe_chk_parity_attn(sc, &global, FALSE);
if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
(sc->error_status & BXE_ERR_MCP_ASSERT) ||
(sc->error_status & BXE_ERR_GLOBAL)) {
BXE_CORE_LOCK(sc);
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
bxe_periodic_stop(sc);
}
bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
sc->state = BXE_STATE_ERROR;
sc->recovery_state = BXE_RECOVERY_FAILED;
BLOGE(sc, " No Recovery tried for error 0x%x"
" stack notified driver is NOT running!"
" Please reboot/power cycle the system.\n",
sc->error_status);
BXE_CORE_UNLOCK(sc);
return;
}
/* Try to get a LEADER_LOCK HW lock */
if (bxe_trylock_leader_lock(sc)) {
bxe_set_reset_in_progress(sc);
/*
* Check if there is a global attention and if
* there was a global attention, set the global
* reset bit.
*/
if (global) {
bxe_set_reset_global(sc);
}
sc->is_leader = 1;
}
/* If interface has been removed - break */
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
bxe_periodic_stop(sc);
}
BXE_CORE_LOCK(sc);
bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
sc->recovery_state = BXE_RECOVERY_WAIT;
BXE_CORE_UNLOCK(sc);
/*
* Ensure "is_leader", MCP command sequence and
* "recovery_state" update values are seen on other
* CPUs.
*/
mb();
break;
case BXE_RECOVERY_WAIT:
if (sc->is_leader) {
int other_engine = SC_PATH(sc) ? 0 : 1;
bool other_load_status =
bxe_get_load_status(sc, other_engine);
bool load_status =
bxe_get_load_status(sc, SC_PATH(sc));
global = bxe_reset_is_global(sc);
/*
* In case of a parity in a global block, let
* the first leader that performs a
* leader_reset() reset the global blocks in
* order to clear global attentions. Otherwise
* the gates will remain closed for that
* engine.
*/
if (load_status ||
(global && other_load_status)) {
/*
* Wait until all other functions get
* down.
*/
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
return;
} else {
/*
* If all other functions got down
* try to bring the chip back to
* normal. In any case it's an exit
* point for a leader.
*/
if (bxe_leader_reset(sc)) {
BLOGE(sc, "RECOVERY failed, "
"stack notified driver is NOT running!\n");
sc->recovery_state = BXE_RECOVERY_FAILED;
sc->state = BXE_STATE_ERROR;
mb();
return;
}
/*
* If we are here, means that the
* leader has succeeded and doesn't
* want to be a leader any more. Try
* to continue as a none-leader.
*/
break;
}
} else { /* non-leader */
if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
/*
* Try to get a LEADER_LOCK HW lock as
* long as a former leader may have
* been unloaded by the user or
* released a leadership by another
* reason.
*/
if (bxe_trylock_leader_lock(sc)) {
/*
* I'm a leader now! Restart a
* switch case.
*/
sc->is_leader = 1;
break;
}
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
return;
} else {
/*
* If there was a global attention, wait
* for it to be cleared.
*/
if (bxe_reset_is_global(sc)) {
taskqueue_enqueue_timeout(taskqueue_thread,
&sc->sp_err_timeout_task, hz/10);
return;
}
error_recovered =
sc->eth_stats.recoverable_error;
error_unrecovered =
sc->eth_stats.unrecoverable_error;
BXE_CORE_LOCK(sc);
sc->recovery_state =
BXE_RECOVERY_NIC_LOADING;
if (bxe_nic_load(sc, LOAD_NORMAL)) {
error_unrecovered++;
sc->recovery_state = BXE_RECOVERY_FAILED;
sc->state = BXE_STATE_ERROR;
BLOGE(sc, "Recovery is NOT successful, "
" state=0x%x recovery_state=0x%x error=%x\n",
sc->state, sc->recovery_state, sc->error_status);
sc->error_status = 0;
} else {
sc->recovery_state =
BXE_RECOVERY_DONE;
error_recovered++;
BLOGI(sc, "Recovery is successful from errors %x,"
" state=0x%x"
" recovery_state=0x%x \n", sc->error_status,
sc->state, sc->recovery_state);
mb();
}
sc->error_status = 0;
BXE_CORE_UNLOCK(sc);
sc->eth_stats.recoverable_error =
error_recovered;
sc->eth_stats.unrecoverable_error =
error_unrecovered;
return;
}
}
default:
return;
}
}
}
void
bxe_handle_error(struct bxe_softc * sc)
{
if(sc->recovery_state == BXE_RECOVERY_WAIT) {
return;
}
if(sc->error_status) {
if (sc->state == BXE_STATE_OPEN) {
bxe_int_disable(sc);
}
if (sc->link_vars.link_up) {
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
sc->recovery_state = BXE_RECOVERY_INIT;
BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
sc->unit, sc->error_status, sc->recovery_state);
bxe_parity_recover(sc);
}
}
static void
bxe_sp_err_timeout_task(void *arg, int pending)
{
struct bxe_softc *sc = (struct bxe_softc *)arg;
BLOGD(sc, DBG_SP,
"%s state = 0x%x rec state=0x%x error_status=%x\n",
__func__, sc->state, sc->recovery_state, sc->error_status);
if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
(sc->state == BXE_STATE_ERROR)) {
return;
}
/* if can be taken */
if ((sc->error_status) && (sc->trigger_grcdump)) {
bxe_grc_dump(sc);
}
if (sc->recovery_state != BXE_RECOVERY_DONE) {
bxe_handle_error(sc);
bxe_parity_recover(sc);
} else if (sc->error_status) {
bxe_handle_error(sc);
}
return;
}
/* start the controller */
static __noinline int
bxe_nic_load(struct bxe_softc *sc,
int load_mode)
{
uint32_t val;
int load_code = 0;
int i, rc = 0;
BXE_CORE_LOCK_ASSERT(sc);
BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
sc->state = BXE_STATE_OPENING_WAITING_LOAD;
if (IS_PF(sc)) {
/* must be called before memory allocation and HW init */
bxe_ilt_set_info(sc);
}
sc->last_reported_link_state = LINK_STATE_UNKNOWN;
bxe_set_fp_rx_buf_size(sc);
if (bxe_alloc_fp_buffers(sc) != 0) {
BLOGE(sc, "Failed to allocate fastpath memory\n");
sc->state = BXE_STATE_CLOSED;
rc = ENOMEM;
goto bxe_nic_load_error0;
}
if (bxe_alloc_mem(sc) != 0) {
sc->state = BXE_STATE_CLOSED;
rc = ENOMEM;
goto bxe_nic_load_error0;
}
if (bxe_alloc_fw_stats_mem(sc) != 0) {
sc->state = BXE_STATE_CLOSED;
rc = ENOMEM;
goto bxe_nic_load_error0;
}
if (IS_PF(sc)) {
/* set pf load just before approaching the MCP */
bxe_set_pf_load(sc);
/* if MCP exists send load request and analyze response */
if (!BXE_NOMCP(sc)) {
/* attempt to load pf */
if (bxe_nic_load_request(sc, &load_code) != 0) {
sc->state = BXE_STATE_CLOSED;
rc = ENXIO;
goto bxe_nic_load_error1;
}
/* what did the MCP say? */
if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
sc->state = BXE_STATE_CLOSED;
rc = ENXIO;
goto bxe_nic_load_error2;
}
} else {
BLOGI(sc, "Device has no MCP!\n");
load_code = bxe_nic_load_no_mcp(sc);
}
/* mark PMF if applicable */
bxe_nic_load_pmf(sc, load_code);
/* Init Function state controlling object */
bxe_init_func_obj(sc);
/* Initialize HW */
if (bxe_init_hw(sc, load_code) != 0) {
BLOGE(sc, "HW init failed\n");
bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
sc->state = BXE_STATE_CLOSED;
rc = ENXIO;
goto bxe_nic_load_error2;
}
}
/* set ALWAYS_ALIVE bit in shmem */
sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
bxe_drv_pulse(sc);
sc->flags |= BXE_NO_PULSE;
/* attach interrupts */
if (bxe_interrupt_attach(sc) != 0) {
sc->state = BXE_STATE_CLOSED;
rc = ENXIO;
goto bxe_nic_load_error2;
}
bxe_nic_init(sc, load_code);
/* Init per-function objects */
if (IS_PF(sc)) {
bxe_init_objs(sc);
// XXX bxe_iov_nic_init(sc);
/* set AFEX default VLAN tag to an invalid value */
sc->devinfo.mf_info.afex_def_vlan_tag = -1;
// XXX bxe_nic_load_afex_dcc(sc, load_code);
sc->state = BXE_STATE_OPENING_WAITING_PORT;
rc = bxe_func_start(sc);
if (rc) {
BLOGE(sc, "Function start failed! rc = %d\n", rc);
bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
sc->state = BXE_STATE_ERROR;
goto bxe_nic_load_error3;
}
/* send LOAD_DONE command to MCP */
if (!BXE_NOMCP(sc)) {
load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
BLOGE(sc, "MCP response failure, aborting\n");
sc->state = BXE_STATE_ERROR;
rc = ENXIO;
goto bxe_nic_load_error3;
}
}
rc = bxe_setup_leading(sc);
if (rc) {
BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
sc->state = BXE_STATE_ERROR;
goto bxe_nic_load_error3;
}
FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
if (rc) {
BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
sc->state = BXE_STATE_ERROR;
goto bxe_nic_load_error3;
}
}
rc = bxe_init_rss_pf(sc);
if (rc) {
BLOGE(sc, "PF RSS init failed\n");
sc->state = BXE_STATE_ERROR;
goto bxe_nic_load_error3;
}
}
/* XXX VF */
/* now when Clients are configured we are ready to work */
sc->state = BXE_STATE_OPEN;
/* Configure a ucast MAC */
if (IS_PF(sc)) {
rc = bxe_set_eth_mac(sc, TRUE);
}
if (rc) {
BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
sc->state = BXE_STATE_ERROR;
goto bxe_nic_load_error3;
}
if (sc->port.pmf) {
rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
if (rc) {
sc->state = BXE_STATE_ERROR;
goto bxe_nic_load_error3;
}
}
sc->link_params.feature_config_flags &=
~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
/* start fast path */
/* Initialize Rx filter */
bxe_set_rx_mode(sc);
/* start the Tx */
switch (/* XXX load_mode */LOAD_OPEN) {
case LOAD_NORMAL:
case LOAD_OPEN:
break;
case LOAD_DIAG:
case LOAD_LOOPBACK_EXT:
sc->state = BXE_STATE_DIAG;
break;
default:
break;
}
if (sc->port.pmf) {
bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
} else {
bxe_link_status_update(sc);
}
/* start the periodic timer callout */
bxe_periodic_start(sc);
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
/* mark driver is loaded in shmem2 */
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
(val |
DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
DRV_FLAGS_CAPABILITIES_LOADED_L2));
}
/* wait for all pending SP commands to complete */
if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
bxe_periodic_stop(sc);
bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
return (ENXIO);
}
/* Tell the stack the driver is running! */
if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
return (0);
bxe_nic_load_error3:
if (IS_PF(sc)) {
bxe_int_disable_sync(sc, 1);
/* clean out queued objects */
bxe_squeeze_objects(sc);
}
bxe_interrupt_detach(sc);
bxe_nic_load_error2:
if (IS_PF(sc) && !BXE_NOMCP(sc)) {
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
sc->port.pmf = 0;
bxe_nic_load_error1:
/* clear pf_load status, as it was already set */
if (IS_PF(sc)) {
bxe_clear_pf_load(sc);
}
bxe_nic_load_error0:
bxe_free_fw_stats_mem(sc);
bxe_free_fp_buffers(sc);
bxe_free_mem(sc);
return (rc);
}
static int
bxe_init_locked(struct bxe_softc *sc)
{
int other_engine = SC_PATH(sc) ? 0 : 1;
uint8_t other_load_status, load_status;
uint8_t global = FALSE;
int rc;
BXE_CORE_LOCK_ASSERT(sc);
/* check if the driver is already running */
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
return (0);
}
if((sc->state == BXE_STATE_ERROR) &&
(sc->recovery_state == BXE_RECOVERY_FAILED)) {
BLOGE(sc, "Initialization not done, "
"as previous recovery failed."
"Reboot/Power-cycle the system\n" );
return (ENXIO);
}
bxe_set_power_state(sc, PCI_PM_D0);
/*
* If parity occurred during the unload, then attentions and/or
* RECOVERY_IN_PROGRES may still be set. If so we want the first function
* loaded on the current engine to complete the recovery. Parity recovery
* is only relevant for PF driver.
*/
if (IS_PF(sc)) {
other_load_status = bxe_get_load_status(sc, other_engine);
load_status = bxe_get_load_status(sc, SC_PATH(sc));
if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
bxe_chk_parity_attn(sc, &global, TRUE)) {
do {
/*
* If there are attentions and they are in global blocks, set
* the GLOBAL_RESET bit regardless whether it will be this
* function that will complete the recovery or not.
*/
if (global) {
bxe_set_reset_global(sc);
}
/*
* Only the first function on the current engine should try
* to recover in open. In case of attentions in global blocks
* only the first in the chip should try to recover.
*/
if ((!load_status && (!global || !other_load_status)) &&
bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
BLOGI(sc, "Recovered during init\n");
break;
}
/* recovery has failed... */
bxe_set_power_state(sc, PCI_PM_D3hot);
sc->recovery_state = BXE_RECOVERY_FAILED;
BLOGE(sc, "Recovery flow hasn't properly "
"completed yet, try again later. "
"If you still see this message after a "
"few retries then power cycle is required.\n");
rc = ENXIO;
goto bxe_init_locked_done;
} while (0);
}
}
sc->recovery_state = BXE_RECOVERY_DONE;
rc = bxe_nic_load(sc, LOAD_OPEN);
bxe_init_locked_done:
if (rc) {
/* Tell the stack the driver is NOT running! */
BLOGE(sc, "Initialization failed, "
"stack notified driver is NOT running!\n");
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
}
return (rc);
}
static int
bxe_stop_locked(struct bxe_softc *sc)
{
BXE_CORE_LOCK_ASSERT(sc);
return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
}
/*
* Handles controller initialization when called from an unlocked routine.
* ifconfig calls this function.
*
* Returns:
* void
*/
static void
bxe_init(void *xsc)
{
struct bxe_softc *sc = (struct bxe_softc *)xsc;
BXE_CORE_LOCK(sc);
bxe_init_locked(sc);
BXE_CORE_UNLOCK(sc);
}
-static int
+static void
bxe_init_ifnet(struct bxe_softc *sc)
{
if_t ifp;
int capabilities;
/* ifconfig entrypoint for media type/status reporting */
ifmedia_init(&sc->ifmedia, IFM_IMASK,
bxe_ifmedia_update,
bxe_ifmedia_status);
/* set the default interface values */
ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
/* allocate the ifnet structure */
- if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
- BLOGE(sc, "Interface allocation failed!\n");
- return (ENXIO);
- }
+ ifp = if_gethandle(IFT_ETHER);
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
if_setioctlfn(ifp, bxe_ioctl);
if_setstartfn(ifp, bxe_tx_start);
if_setgetcounterfn(ifp, bxe_get_counter);
if_settransmitfn(ifp, bxe_tx_mq_start);
if_setqflushfn(ifp, bxe_mq_flush);
if_setinitfn(ifp, bxe_init);
if_setmtu(ifp, sc->mtu);
if_sethwassist(ifp, (CSUM_IP |
CSUM_TCP |
CSUM_UDP |
CSUM_TSO |
CSUM_TCP_IPV6 |
CSUM_UDP_IPV6));
capabilities =
(IFCAP_VLAN_MTU |
IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWTSO |
IFCAP_VLAN_HWFILTER |
IFCAP_VLAN_HWCSUM |
IFCAP_HWCSUM |
IFCAP_JUMBO_MTU |
IFCAP_LRO |
IFCAP_TSO4 |
IFCAP_TSO6 |
IFCAP_WOL_MAGIC);
if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setbaudrate(ifp, IF_Gbps(10));
/* XXX */
if_setsendqlen(ifp, sc->tx_ring_size);
if_setsendqready(ifp);
/* XXX */
sc->ifp = ifp;
/* attach to the Ethernet interface list */
ether_ifattach(ifp, sc->link_params.mac_addr);
/* Attach driver debugnet methods. */
DEBUGNET_SET(ifp, bxe);
-
- return (0);
}
static void
bxe_deallocate_bars(struct bxe_softc *sc)
{
int i;
for (i = 0; i < MAX_BARS; i++) {
if (sc->bar[i].resource != NULL) {
bus_release_resource(sc->dev,
SYS_RES_MEMORY,
sc->bar[i].rid,
sc->bar[i].resource);
BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
i, PCIR_BAR(i));
}
}
}
static int
bxe_allocate_bars(struct bxe_softc *sc)
{
u_int flags;
int i;
memset(sc->bar, 0, sizeof(sc->bar));
for (i = 0; i < MAX_BARS; i++) {
/* memory resources reside at BARs 0, 2, 4 */
/* Run `pciconf -lb` to see mappings */
if ((i != 0) && (i != 2) && (i != 4)) {
continue;
}
sc->bar[i].rid = PCIR_BAR(i);
flags = RF_ACTIVE;
if (i == 0) {
flags |= RF_SHAREABLE;
}
if ((sc->bar[i].resource =
bus_alloc_resource_any(sc->dev,
SYS_RES_MEMORY,
&sc->bar[i].rid,
flags)) == NULL) {
return (0);
}
sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
i, PCIR_BAR(i),
rman_get_start(sc->bar[i].resource),
rman_get_end(sc->bar[i].resource),
rman_get_size(sc->bar[i].resource),
(uintmax_t)sc->bar[i].kva);
}
return (0);
}
static void
bxe_get_function_num(struct bxe_softc *sc)
{
uint32_t val = 0;
/*
* Read the ME register to get the function number. The ME register
* holds the relative-function number and absolute-function number. The
* absolute-function number appears only in E2 and above. Before that
* these bits always contained zero, therefore we cannot blindly use them.
*/
val = REG_RD(sc, BAR_ME_REGISTER);
sc->pfunc_rel =
(uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
sc->path_id =
(uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
} else {
sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
}
BLOGD(sc, DBG_LOAD,
"Relative function %d, Absolute function %d, Path %d\n",
sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
}
static uint32_t
bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
{
uint32_t shmem2_size;
uint32_t offset;
uint32_t mf_cfg_offset_value;
/* Non 57712 */
offset = (SHMEM_RD(sc, func_mb) +
(MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
/* 57712 plus */
if (sc->devinfo.shmem2_base != 0) {
shmem2_size = SHMEM2_RD(sc, size);
if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
offset = mf_cfg_offset_value;
}
}
}
return (offset);
}
static uint32_t
bxe_pcie_capability_read(struct bxe_softc *sc,
int reg,
int width)
{
int pcie_reg;
/* ensure PCIe capability is enabled */
if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
if (pcie_reg != 0) {
BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
return (pci_read_config(sc->dev, (pcie_reg + reg), width));
}
}
BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
return (0);
}
static uint8_t
bxe_is_pcie_pending(struct bxe_softc *sc)
{
return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
PCIEM_STA_TRANSACTION_PND);
}
/*
* Walk the PCI capabiites list for the device to find what features are
* supported. These capabilites may be enabled/disabled by firmware so it's
* best to walk the list rather than make assumptions.
*/
static void
bxe_probe_pci_caps(struct bxe_softc *sc)
{
uint16_t link_status;
int reg;
/* check if PCI Power Management is enabled */
if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
if (reg != 0) {
BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
}
}
link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
/* handle PCIe 2.0 workarounds for 57710 */
if (CHIP_IS_E1(sc)) {
/* workaround for 57710 errata E4_57710_27462 */
sc->devinfo.pcie_link_speed =
(REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
/* workaround for 57710 errata E4_57710_27488 */
sc->devinfo.pcie_link_width =
((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
if (sc->devinfo.pcie_link_speed > 1) {
sc->devinfo.pcie_link_width =
((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
}
} else {
sc->devinfo.pcie_link_speed =
(link_status & PCIEM_LINK_STA_SPEED);
sc->devinfo.pcie_link_width =
((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
}
BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
/* check if MSI capability is enabled */
if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
if (reg != 0) {
BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
}
}
/* check if MSI-X capability is enabled */
if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
if (reg != 0) {
BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
}
}
}
static int
bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
{
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
uint32_t val;
/* get the outer vlan if we're in switch-dependent mode */
val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
mf_info->ext_id = (uint16_t)val;
mf_info->multi_vnics_mode = 1;
if (!VALID_OVLAN(mf_info->ext_id)) {
BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
return (1);
}
/* get the capabilities */
if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
FUNC_MF_CFG_PROTOCOL_ISCSI) {
mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
} else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
FUNC_MF_CFG_PROTOCOL_FCOE) {
mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
} else {
mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
}
mf_info->vnics_per_port =
(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
return (0);
}
static uint32_t
bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
{
uint32_t retval = 0;
uint32_t val;
val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
retval |= MF_PROTO_SUPPORT_ETHERNET;
}
if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
retval |= MF_PROTO_SUPPORT_ISCSI;
}
if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
retval |= MF_PROTO_SUPPORT_FCOE;
}
}
return (retval);
}
static int
bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
{
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
uint32_t val;
/*
* There is no outer vlan if we're in switch-independent mode.
* If the mac is valid then assume multi-function.
*/
val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
mf_info->vnics_per_port =
(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
return (0);
}
static int
bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
{
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
uint32_t e1hov_tag;
uint32_t func_config;
uint32_t niv_config;
mf_info->multi_vnics_mode = 1;
e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
mf_info->ext_id =
(uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
FUNC_MF_CFG_E1HOV_TAG_SHIFT);
mf_info->default_vlan =
(uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
FUNC_MF_CFG_AFEX_VLAN_SHIFT);
mf_info->niv_allowed_priorities =
(uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
mf_info->niv_default_cos =
(uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
mf_info->afex_vlan_mode =
((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
mf_info->niv_mba_enabled =
((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
mf_info->vnics_per_port =
(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
return (0);
}
static int
bxe_check_valid_mf_cfg(struct bxe_softc *sc)
{
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
uint32_t mf_cfg1;
uint32_t mf_cfg2;
uint32_t ovlan1;
uint32_t ovlan2;
uint8_t i, j;
BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
SC_PORT(sc));
BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
mf_info->mf_config[SC_VN(sc)]);
BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
mf_info->multi_vnics_mode);
BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
mf_info->vnics_per_port);
BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
mf_info->ext_id);
BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
mf_info->min_bw[0], mf_info->min_bw[1],
mf_info->min_bw[2], mf_info->min_bw[3]);
BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
mf_info->max_bw[0], mf_info->max_bw[1],
mf_info->max_bw[2], mf_info->max_bw[3]);
BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
sc->mac_addr_str);
/* various MF mode sanity checks... */
if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
BLOGE(sc, "Enumerated function %d is marked as hidden\n",
SC_PORT(sc));
return (1);
}
if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
mf_info->vnics_per_port, mf_info->multi_vnics_mode);
return (1);
}
if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
/* vnic id > 0 must have valid ovlan in switch-dependent mode */
if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
SC_VN(sc), OVLAN(sc));
return (1);
}
if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
mf_info->multi_vnics_mode, OVLAN(sc));
return (1);
}
/*
* Verify all functions are either MF or SF mode. If MF, make sure
* sure that all non-hidden functions have a valid ovlan. If SF,
* make sure that all non-hidden functions have an invalid ovlan.
*/
FOREACH_ABS_FUNC_IN_PORT(sc, i) {
mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
(((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
BLOGE(sc, "mf_mode=SD function %d MF config "
"mismatch, multi_vnics_mode=%d ovlan=%d\n",
i, mf_info->multi_vnics_mode, ovlan1);
return (1);
}
}
/* Verify all funcs on the same port each have a different ovlan. */
FOREACH_ABS_FUNC_IN_PORT(sc, i) {
mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
/* iterate from the next function on the port to the max func */
for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
VALID_OVLAN(ovlan1) &&
!(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
VALID_OVLAN(ovlan2) &&
(ovlan1 == ovlan2)) {
BLOGE(sc, "mf_mode=SD functions %d and %d "
"have the same ovlan (%d)\n",
i, j, ovlan1);
return (1);
}
}
}
} /* MULTI_FUNCTION_SD */
return (0);
}
static int
bxe_get_mf_cfg_info(struct bxe_softc *sc)
{
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
uint32_t val, mac_upper;
uint8_t i, vnic;
/* initialize mf_info defaults */
mf_info->vnics_per_port = 1;
mf_info->multi_vnics_mode = FALSE;
mf_info->path_has_ovlan = FALSE;
mf_info->mf_mode = SINGLE_FUNCTION;
if (!CHIP_IS_MF_CAP(sc)) {
return (0);
}
if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
BLOGE(sc, "Invalid mf_cfg_base!\n");
return (1);
}
/* get the MF mode (switch dependent / independent / single-function) */
val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
{
case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
/* check for legal upper mac bytes */
if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
mf_info->mf_mode = MULTI_FUNCTION_SI;
} else {
BLOGE(sc, "Invalid config for Switch Independent mode\n");
}
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
/* get outer vlan configuration */
val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
mf_info->mf_mode = MULTI_FUNCTION_SD;
} else {
BLOGE(sc, "Invalid config for Switch Dependent mode\n");
}
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
/* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
return (0);
case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
/*
* Mark MF mode as NIV if MCP version includes NPAR-SD support
* and the MAC address is valid.
*/
mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
if ((SHMEM2_HAS(sc, afex_driver_support)) &&
(mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
mf_info->mf_mode = MULTI_FUNCTION_AFEX;
} else {
BLOGE(sc, "Invalid config for AFEX mode\n");
}
break;
default:
BLOGE(sc, "Unknown MF mode (0x%08x)\n",
(val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
return (1);
}
/* set path mf_mode (which could be different than function mf_mode) */
if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
mf_info->path_has_ovlan = TRUE;
} else if (mf_info->mf_mode == SINGLE_FUNCTION) {
/*
* Decide on path multi vnics mode. If we're not in MF mode and in
* 4-port mode, this is good enough to check vnic-0 of the other port
* on the same path
*/
if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
uint8_t other_port = !(PORT_ID(sc) & 1);
uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
}
}
if (mf_info->mf_mode == SINGLE_FUNCTION) {
/* invalid MF config */
if (SC_VN(sc) >= 1) {
BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
return (1);
}
return (0);
}
/* get the MF configuration */
mf_info->mf_config[SC_VN(sc)] =
MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
switch(mf_info->mf_mode)
{
case MULTI_FUNCTION_SD:
bxe_get_shmem_mf_cfg_info_sd(sc);
break;
case MULTI_FUNCTION_SI:
bxe_get_shmem_mf_cfg_info_si(sc);
break;
case MULTI_FUNCTION_AFEX:
bxe_get_shmem_mf_cfg_info_niv(sc);
break;
default:
BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
mf_info->mf_mode);
return (1);
}
/* get the congestion management parameters */
vnic = 0;
FOREACH_ABS_FUNC_IN_PORT(sc, i) {
/* get min/max bw */
val = MFCFG_RD(sc, func_mf_config[i].config);
mf_info->min_bw[vnic] =
((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
mf_info->max_bw[vnic] =
((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
vnic++;
}
return (bxe_check_valid_mf_cfg(sc));
}
static int
bxe_get_shmem_info(struct bxe_softc *sc)
{
int port;
uint32_t mac_hi, mac_lo, val;
port = SC_PORT(sc);
mac_hi = mac_lo = 0;
sc->link_params.sc = sc;
sc->link_params.port = port;
/* get the hardware config info */
sc->devinfo.hw_config =
SHMEM_RD(sc, dev_info.shared_hw_config.config);
sc->devinfo.hw_config2 =
SHMEM_RD(sc, dev_info.shared_hw_config.config2);
sc->link_params.hw_led_mode =
((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
SHARED_HW_CFG_LED_MODE_SHIFT);
/* get the port feature config */
sc->port.config =
SHMEM_RD(sc, dev_info.port_feature_config[port].config);
/* get the link params */
sc->link_params.speed_cap_mask[0] =
SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
sc->link_params.speed_cap_mask[1] =
SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
/* get the lane config */
sc->link_params.lane_config =
SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
/* get the link config */
val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
sc->port.link_config[ELINK_INT_PHY] = val;
sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
sc->port.link_config[ELINK_EXT_PHY1] =
SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
/* get the override preemphasis flag and enable it or turn it off */
val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
sc->link_params.feature_config_flags |=
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
} else {
sc->link_params.feature_config_flags &=
~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
}
/* get the initial value of the link params */
sc->link_params.multi_phy_config =
SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
/* get external phy info */
sc->port.ext_phy_config =
SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
/* get the multifunction configuration */
bxe_get_mf_cfg_info(sc);
/* get the mac address */
if (IS_MF(sc)) {
mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
} else {
mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
}
if ((mac_lo == 0) && (mac_hi == 0)) {
*sc->mac_addr_str = 0;
BLOGE(sc, "No Ethernet address programmed!\n");
} else {
sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
"%02x:%02x:%02x:%02x:%02x:%02x",
sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
}
return (0);
}
static void
bxe_get_tunable_params(struct bxe_softc *sc)
{
/* sanity checks */
if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
(bxe_interrupt_mode != INTR_MODE_MSI) &&
(bxe_interrupt_mode != INTR_MODE_MSIX)) {
BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
bxe_interrupt_mode = INTR_MODE_MSIX;
}
if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
bxe_queue_count = 0;
}
if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
if (bxe_max_rx_bufs == 0) {
bxe_max_rx_bufs = RX_BD_USABLE;
} else {
BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
bxe_max_rx_bufs = 2048;
}
}
if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
bxe_hc_rx_ticks = 25;
}
if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
bxe_hc_tx_ticks = 50;
}
if (bxe_max_aggregation_size == 0) {
bxe_max_aggregation_size = TPA_AGG_SIZE;
}
if (bxe_max_aggregation_size > 0xffff) {
BLOGW(sc, "invalid max_aggregation_size (%d)\n",
bxe_max_aggregation_size);
bxe_max_aggregation_size = TPA_AGG_SIZE;
}
if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
bxe_mrrs = -1;
}
if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
bxe_autogreeen = 0;
}
if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
bxe_udp_rss = 0;
}
/* pull in user settings */
sc->interrupt_mode = bxe_interrupt_mode;
sc->max_rx_bufs = bxe_max_rx_bufs;
sc->hc_rx_ticks = bxe_hc_rx_ticks;
sc->hc_tx_ticks = bxe_hc_tx_ticks;
sc->max_aggregation_size = bxe_max_aggregation_size;
sc->mrrs = bxe_mrrs;
sc->autogreeen = bxe_autogreeen;
sc->udp_rss = bxe_udp_rss;
if (bxe_interrupt_mode == INTR_MODE_INTX) {
sc->num_queues = 1;
} else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
sc->num_queues =
min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
MAX_RSS_CHAINS);
if (sc->num_queues > mp_ncpus) {
sc->num_queues = mp_ncpus;
}
}
BLOGD(sc, DBG_LOAD,
"User Config: "
"debug=0x%lx "
"interrupt_mode=%d "
"queue_count=%d "
"hc_rx_ticks=%d "
"hc_tx_ticks=%d "
"rx_budget=%d "
"max_aggregation_size=%d "
"mrrs=%d "
"autogreeen=%d "
"udp_rss=%d\n",
bxe_debug,
sc->interrupt_mode,
sc->num_queues,
sc->hc_rx_ticks,
sc->hc_tx_ticks,
bxe_rx_budget,
sc->max_aggregation_size,
sc->mrrs,
sc->autogreeen,
sc->udp_rss);
}
static int
bxe_media_detect(struct bxe_softc *sc)
{
int port_type;
uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
switch (sc->link_params.phy[phy_idx].media_type) {
case ELINK_ETH_PHY_SFPP_10G_FIBER:
case ELINK_ETH_PHY_XFP_FIBER:
BLOGI(sc, "Found 10Gb Fiber media.\n");
sc->media = IFM_10G_SR;
port_type = PORT_FIBRE;
break;
case ELINK_ETH_PHY_SFP_1G_FIBER:
BLOGI(sc, "Found 1Gb Fiber media.\n");
sc->media = IFM_1000_SX;
port_type = PORT_FIBRE;
break;
case ELINK_ETH_PHY_KR:
case ELINK_ETH_PHY_CX4:
BLOGI(sc, "Found 10GBase-CX4 media.\n");
sc->media = IFM_10G_CX4;
port_type = PORT_FIBRE;
break;
case ELINK_ETH_PHY_DA_TWINAX:
BLOGI(sc, "Found 10Gb Twinax media.\n");
sc->media = IFM_10G_TWINAX;
port_type = PORT_DA;
break;
case ELINK_ETH_PHY_BASE_T:
if (sc->link_params.speed_cap_mask[0] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
BLOGI(sc, "Found 10GBase-T media.\n");
sc->media = IFM_10G_T;
port_type = PORT_TP;
} else {
BLOGI(sc, "Found 1000Base-T media.\n");
sc->media = IFM_1000_T;
port_type = PORT_TP;
}
break;
case ELINK_ETH_PHY_NOT_PRESENT:
BLOGI(sc, "Media not present.\n");
sc->media = 0;
port_type = PORT_OTHER;
break;
case ELINK_ETH_PHY_UNSPECIFIED:
default:
BLOGI(sc, "Unknown media!\n");
sc->media = 0;
port_type = PORT_OTHER;
break;
}
return port_type;
}
#define GET_FIELD(value, fname) \
(((value) & (fname##_MASK)) >> (fname##_SHIFT))
#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
static int
bxe_get_igu_cam_info(struct bxe_softc *sc)
{
int pfid = SC_FUNC(sc);
int igu_sb_id;
uint32_t val;
uint8_t fid, igu_sb_cnt = 0;
sc->igu_base_sb = 0xff;
if (CHIP_INT_MODE_IS_BC(sc)) {
int vn = SC_VN(sc);
igu_sb_cnt = sc->igu_sb_cnt;
sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
FP_SB_MAX_E1x);
sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
(CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
return (0);
}
/* IGU in normal mode - read CAM */
for (igu_sb_id = 0;
igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
igu_sb_id++) {
val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
continue;
}
fid = IGU_FID(val);
if ((fid & IGU_FID_ENCODE_IS_PF)) {
if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
continue;
}
if (IGU_VEC(val) == 0) {
/* default status block */
sc->igu_dsb_id = igu_sb_id;
} else {
if (sc->igu_base_sb == 0xff) {
sc->igu_base_sb = igu_sb_id;
}
igu_sb_cnt++;
}
}
}
/*
* Due to new PF resource allocation by MFW T7.4 and above, it's optional
* that number of CAM entries will not be equal to the value advertised in
* PCI. Driver should use the minimal value of both as the actual status
* block count
*/
sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
if (igu_sb_cnt == 0) {
BLOGE(sc, "CAM configuration error\n");
return (-1);
}
return (0);
}
/*
* Gather various information from the device config space, the device itself,
* shmem, and the user input.
*/
static int
bxe_get_device_info(struct bxe_softc *sc)
{
uint32_t val;
int rc;
/* Get the data for the device */
sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
sc->devinfo.device_id = pci_get_device(sc->dev);
sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
/* get the chip revision (chip metal comes from pci config space) */
sc->devinfo.chip_id =
sc->link_params.chip_id =
(((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
(((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
/* force 57811 according to MISC register */
if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
if (CHIP_IS_57810(sc)) {
sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
(sc->devinfo.chip_id & 0x0000ffff));
} else if (CHIP_IS_57810_MF(sc)) {
sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
(sc->devinfo.chip_id & 0x0000ffff));
}
sc->devinfo.chip_id |= 0x1;
}
BLOGD(sc, DBG_LOAD,
"chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
sc->devinfo.chip_id,
((sc->devinfo.chip_id >> 16) & 0xffff),
((sc->devinfo.chip_id >> 12) & 0xf),
((sc->devinfo.chip_id >> 4) & 0xff),
((sc->devinfo.chip_id >> 0) & 0xf));
val = (REG_RD(sc, 0x2874) & 0x55);
if ((sc->devinfo.chip_id & 0x1) ||
(CHIP_IS_E1(sc) && val) ||
(CHIP_IS_E1H(sc) && (val == 0x55))) {
sc->flags |= BXE_ONE_PORT_FLAG;
BLOGD(sc, DBG_LOAD, "single port device\n");
}
/* set the doorbell size */
sc->doorbell_size = (1 << BXE_DB_SHIFT);
/* determine whether the device is in 2 port or 4 port mode */
sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
if (CHIP_IS_E2E3(sc)) {
/*
* Read port4mode_en_ovwr[0]:
* If 1, four port mode is in port4mode_en_ovwr[1].
* If 0, four port mode is in port4mode_en[0].
*/
val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
if (val & 1) {
val = ((val >> 1) & 1);
} else {
val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
}
sc->devinfo.chip_port_mode =
(val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
}
/* get the function and path info for the device */
bxe_get_function_num(sc);
/* get the shared memory base address */
sc->devinfo.shmem_base =
sc->link_params.shmem_base =
REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
sc->devinfo.shmem2_base =
REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
MISC_REG_GENERIC_CR_0));
BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
if (!sc->devinfo.shmem_base) {
/* this should ONLY prevent upcoming shmem reads */
BLOGI(sc, "MCP not active\n");
sc->flags |= BXE_NO_MCP_FLAG;
return (0);
}
/* make sure the shared memory contents are valid */
val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
(SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
return (0);
}
BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
/* get the bootcode version */
sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
snprintf(sc->devinfo.bc_ver_str,
sizeof(sc->devinfo.bc_ver_str),
"%d.%d.%d",
((sc->devinfo.bc_ver >> 24) & 0xff),
((sc->devinfo.bc_ver >> 16) & 0xff),
((sc->devinfo.bc_ver >> 8) & 0xff));
BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
/* get the bootcode shmem address */
sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
/* clean indirect addresses as they're not used */
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
if (IS_PF(sc)) {
REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
if (CHIP_IS_E1x(sc)) {
REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
}
/*
* Enable internal target-read (in case we are probed after PF
* FLR). Must be done prior to any BAR read access. Only for
* 57712 and up
*/
if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
}
}
/* get the nvram size */
val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
sc->devinfo.flash_size =
(NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
/* get PCI capabilites */
bxe_probe_pci_caps(sc);
bxe_set_power_state(sc, PCI_PM_D0);
/* get various configuration parameters from shmem */
bxe_get_shmem_info(sc);
if (sc->devinfo.pcie_msix_cap_reg != 0) {
val = pci_read_config(sc->dev,
(sc->devinfo.pcie_msix_cap_reg +
PCIR_MSIX_CTRL),
2);
sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
} else {
sc->igu_sb_cnt = 1;
}
sc->igu_base_addr = BAR_IGU_INTMEM;
/* initialize IGU parameters */
if (CHIP_IS_E1x(sc)) {
sc->devinfo.int_block = INT_BLOCK_HC;
sc->igu_dsb_id = DEF_SB_IGU_ID;
sc->igu_base_sb = 0;
} else {
sc->devinfo.int_block = INT_BLOCK_IGU;
/* do not allow device reset during IGU info preocessing */
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
int tout = 5000;
BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
tout--;
DELAY(1000);
}
if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
return (-1);
}
}
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
} else {
BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
}
rc = bxe_get_igu_cam_info(sc);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
if (rc) {
return (rc);
}
}
/*
* Get base FW non-default (fast path) status block ID. This value is
* used to initialize the fw_sb_id saved on the fp/queue structure to
* determine the id used by the FW.
*/
if (CHIP_IS_E1x(sc)) {
sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
} else {
/*
* 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
* the same queue are indicated on the same IGU SB). So we prefer
* FW and IGU SBs to be the same value.
*/
sc->base_fw_ndsb = sc->igu_base_sb;
}
BLOGD(sc, DBG_LOAD,
"igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
sc->igu_dsb_id, sc->igu_base_sb,
sc->igu_sb_cnt, sc->base_fw_ndsb);
elink_phy_probe(&sc->link_params);
return (0);
}
static void
bxe_link_settings_supported(struct bxe_softc *sc,
uint32_t switch_cfg)
{
uint32_t cfg_size = 0;
uint32_t idx;
uint8_t port = SC_PORT(sc);
/* aggregation of supported attributes of all external phys */
sc->port.supported[0] = 0;
sc->port.supported[1] = 0;
switch (sc->link_params.num_phys) {
case 1:
sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
cfg_size = 1;
break;
case 2:
sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
cfg_size = 1;
break;
case 3:
if (sc->link_params.multi_phy_config &
PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
sc->port.supported[1] =
sc->link_params.phy[ELINK_EXT_PHY1].supported;
sc->port.supported[0] =
sc->link_params.phy[ELINK_EXT_PHY2].supported;
} else {
sc->port.supported[0] =
sc->link_params.phy[ELINK_EXT_PHY1].supported;
sc->port.supported[1] =
sc->link_params.phy[ELINK_EXT_PHY2].supported;
}
cfg_size = 2;
break;
}
if (!(sc->port.supported[0] || sc->port.supported[1])) {
BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
SHMEM_RD(sc,
dev_info.port_hw_config[port].external_phy_config),
SHMEM_RD(sc,
dev_info.port_hw_config[port].external_phy_config2));
return;
}
if (CHIP_IS_E3(sc))
sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
else {
switch (switch_cfg) {
case ELINK_SWITCH_CFG_1G:
sc->port.phy_addr =
REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
break;
case ELINK_SWITCH_CFG_10G:
sc->port.phy_addr =
REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
break;
default:
BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
sc->port.link_config[0]);
return;
}
}
BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
/* mask what we support according to speed_cap_mask per configuration */
for (idx = 0; idx < cfg_size; idx++) {
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
}
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
}
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
}
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
}
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
}
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
}
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
}
if (!(sc->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
}
}
BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
sc->port.supported[0], sc->port.supported[1]);
ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
sc->port.supported[0], sc->port.supported[1]);
}
static void
bxe_link_settings_requested(struct bxe_softc *sc)
{
uint32_t link_config;
uint32_t idx;
uint32_t cfg_size = 0;
sc->port.advertising[0] = 0;
sc->port.advertising[1] = 0;
switch (sc->link_params.num_phys) {
case 1:
case 2:
cfg_size = 1;
break;
case 3:
cfg_size = 2;
break;
}
for (idx = 0; idx < cfg_size; idx++) {
sc->link_params.req_duplex[idx] = DUPLEX_FULL;
link_config = sc->port.link_config[idx];
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_AUTO:
if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
sc->port.advertising[idx] |= sc->port.supported[idx];
if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
sc->port.advertising[idx] |=
(ELINK_SUPPORTED_100baseT_Half |
ELINK_SUPPORTED_100baseT_Full);
} else {
/* force 10G, no AN */
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
sc->port.advertising[idx] |=
(ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
continue;
}
break;
case PORT_FEATURE_LINK_SPEED_10M_FULL:
if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_10M_HALF:
if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
sc->link_params.req_duplex[idx] = DUPLEX_HALF;
sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
ADVERTISED_TP);
ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
sc->link_params.req_duplex[idx]);
} else {
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_100M_FULL:
if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
sc->link_params.req_duplex[idx] = DUPLEX_HALF;
sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_1G:
if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_2_5G:
if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_10G_CX4:
if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_20G:
sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
break;
default:
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x\n",
link_config, sc->link_params.speed_cap_mask[idx]);
sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
sc->port.advertising[idx] = sc->port.supported[idx];
break;
}
sc->link_params.req_flow_ctrl[idx] =
(link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
} else {
bxe_set_requested_fc(sc);
}
}
BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
"req_flow_ctrl=0x%x advertising=0x%x\n",
sc->link_params.req_line_speed[idx],
sc->link_params.req_duplex[idx],
sc->link_params.req_flow_ctrl[idx],
sc->port.advertising[idx]);
ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
"advertising=0x%x\n",
sc->link_params.req_line_speed[idx],
sc->link_params.req_duplex[idx],
sc->port.advertising[idx]);
}
}
static void
bxe_get_phy_info(struct bxe_softc *sc)
{
uint8_t port = SC_PORT(sc);
uint32_t config = sc->port.config;
uint32_t eee_mode;
/* shmem data already read in bxe_get_shmem_info() */
ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
"link_config0=0x%08x\n",
sc->link_params.lane_config,
sc->link_params.speed_cap_mask[0],
sc->port.link_config[0]);
bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
bxe_link_settings_requested(sc);
if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
sc->link_params.feature_config_flags |=
ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
} else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
sc->link_params.feature_config_flags &=
~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
} else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
sc->link_params.feature_config_flags |=
ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
}
/* configure link feature according to nvram value */
eee_mode =
(((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
ELINK_EEE_MODE_ENABLE_LPI |
ELINK_EEE_MODE_OUTPUT_TIME);
} else {
sc->link_params.eee_mode = 0;
}
/* get the media type */
bxe_media_detect(sc);
ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
}
static void
bxe_get_params(struct bxe_softc *sc)
{
/* get user tunable params */
bxe_get_tunable_params(sc);
/* select the RX and TX ring sizes */
sc->tx_ring_size = TX_BD_USABLE;
sc->rx_ring_size = RX_BD_USABLE;
/* XXX disable WoL */
sc->wol = 0;
}
static void
bxe_set_modes_bitmap(struct bxe_softc *sc)
{
uint32_t flags = 0;
if (CHIP_REV_IS_FPGA(sc)) {
SET_FLAGS(flags, MODE_FPGA);
} else if (CHIP_REV_IS_EMUL(sc)) {
SET_FLAGS(flags, MODE_EMUL);
} else {
SET_FLAGS(flags, MODE_ASIC);
}
if (CHIP_IS_MODE_4_PORT(sc)) {
SET_FLAGS(flags, MODE_PORT4);
} else {
SET_FLAGS(flags, MODE_PORT2);
}
if (CHIP_IS_E2(sc)) {
SET_FLAGS(flags, MODE_E2);
} else if (CHIP_IS_E3(sc)) {
SET_FLAGS(flags, MODE_E3);
if (CHIP_REV(sc) == CHIP_REV_Ax) {
SET_FLAGS(flags, MODE_E3_A0);
} else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
}
}
if (IS_MF(sc)) {
SET_FLAGS(flags, MODE_MF);
switch (sc->devinfo.mf_info.mf_mode) {
case MULTI_FUNCTION_SD:
SET_FLAGS(flags, MODE_MF_SD);
break;
case MULTI_FUNCTION_SI:
SET_FLAGS(flags, MODE_MF_SI);
break;
case MULTI_FUNCTION_AFEX:
SET_FLAGS(flags, MODE_MF_AFEX);
break;
}
} else {
SET_FLAGS(flags, MODE_SF);
}
#if defined(__LITTLE_ENDIAN)
SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
#else /* __BIG_ENDIAN */
SET_FLAGS(flags, MODE_BIG_ENDIAN);
#endif
INIT_MODE_FLAGS(sc) = flags;
}
static int
bxe_alloc_hsi_mem(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
bus_addr_t busaddr;
int max_agg_queues;
int max_segments;
bus_size_t max_size;
bus_size_t max_seg_size;
char buf[32];
int rc;
int i, j;
/* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
/* allocate the parent bus DMA tag */
rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
1, /* alignment */
0, /* boundary limit */
BUS_SPACE_MAXADDR, /* restricted low */
BUS_SPACE_MAXADDR, /* restricted hi */
NULL, /* addr filter() */
NULL, /* addr filter() arg */
BUS_SPACE_MAXSIZE_32BIT, /* max map size */
BUS_SPACE_UNRESTRICTED, /* num discontinuous */
BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
0, /* flags */
NULL, /* lock() */
NULL, /* lock() arg */
&sc->parent_dma_tag); /* returned dma tag */
if (rc != 0) {
BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
return (1);
}
/************************/
/* DEFAULT STATUS BLOCK */
/************************/
if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
&sc->def_sb_dma, "default status block") != 0) {
/* XXX */
bus_dma_tag_destroy(sc->parent_dma_tag);
return (1);
}
sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
/***************/
/* EVENT QUEUE */
/***************/
if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
&sc->eq_dma, "event queue") != 0) {
/* XXX */
bxe_dma_free(sc, &sc->def_sb_dma);
sc->def_sb = NULL;
bus_dma_tag_destroy(sc->parent_dma_tag);
return (1);
}
sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
/*************/
/* SLOW PATH */
/*************/
if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
&sc->sp_dma, "slow path") != 0) {
/* XXX */
bxe_dma_free(sc, &sc->eq_dma);
sc->eq = NULL;
bxe_dma_free(sc, &sc->def_sb_dma);
sc->def_sb = NULL;
bus_dma_tag_destroy(sc->parent_dma_tag);
return (1);
}
sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
/*******************/
/* SLOW PATH QUEUE */
/*******************/
if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
&sc->spq_dma, "slow path queue") != 0) {
/* XXX */
bxe_dma_free(sc, &sc->sp_dma);
sc->sp = NULL;
bxe_dma_free(sc, &sc->eq_dma);
sc->eq = NULL;
bxe_dma_free(sc, &sc->def_sb_dma);
sc->def_sb = NULL;
bus_dma_tag_destroy(sc->parent_dma_tag);
return (1);
}
sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
/***************************/
/* FW DECOMPRESSION BUFFER */
/***************************/
if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
"fw decompression buffer") != 0) {
/* XXX */
bxe_dma_free(sc, &sc->spq_dma);
sc->spq = NULL;
bxe_dma_free(sc, &sc->sp_dma);
sc->sp = NULL;
bxe_dma_free(sc, &sc->eq_dma);
sc->eq = NULL;
bxe_dma_free(sc, &sc->def_sb_dma);
sc->def_sb = NULL;
bus_dma_tag_destroy(sc->parent_dma_tag);
return (1);
}
sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
if ((sc->gz_strm =
malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
/* XXX */
bxe_dma_free(sc, &sc->gz_buf_dma);
sc->gz_buf = NULL;
bxe_dma_free(sc, &sc->spq_dma);
sc->spq = NULL;
bxe_dma_free(sc, &sc->sp_dma);
sc->sp = NULL;
bxe_dma_free(sc, &sc->eq_dma);
sc->eq = NULL;
bxe_dma_free(sc, &sc->def_sb_dma);
sc->def_sb = NULL;
bus_dma_tag_destroy(sc->parent_dma_tag);
return (1);
}
/*************/
/* FASTPATHS */
/*************/
/* allocate DMA memory for each fastpath structure */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
fp->sc = sc;
fp->index = i;
/*******************/
/* FP STATUS BLOCK */
/*******************/
snprintf(buf, sizeof(buf), "fp %d status block", i);
if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
&fp->sb_dma, buf) != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to alloc %s\n", buf);
return (1);
} else {
if (CHIP_IS_E2E3(sc)) {
fp->status_block.e2_sb =
(struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
} else {
fp->status_block.e1x_sb =
(struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
}
}
/******************/
/* FP TX BD CHAIN */
/******************/
snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
&fp->tx_dma, buf) != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to alloc %s\n", buf);
return (1);
} else {
fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
}
/* link together the tx bd chain pages */
for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
/* index into the tx bd chain array to last entry per page */
struct eth_tx_next_bd *tx_next_bd =
&fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
/* point to the next page and wrap from last page */
busaddr = (fp->tx_dma.paddr +
(BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
}
/******************/
/* FP RX BD CHAIN */
/******************/
snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
&fp->rx_dma, buf) != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to alloc %s\n", buf);
return (1);
} else {
fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
}
/* link together the rx bd chain pages */
for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
/* index into the rx bd chain array to last entry per page */
struct eth_rx_bd *rx_bd =
&fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
/* point to the next page and wrap from last page */
busaddr = (fp->rx_dma.paddr +
(BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
rx_bd->addr_hi = htole32(U64_HI(busaddr));
rx_bd->addr_lo = htole32(U64_LO(busaddr));
}
/*******************/
/* FP RX RCQ CHAIN */
/*******************/
snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
&fp->rcq_dma, buf) != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to alloc %s\n", buf);
return (1);
} else {
fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
}
/* link together the rcq chain pages */
for (j = 1; j <= RCQ_NUM_PAGES; j++) {
/* index into the rcq chain array to last entry per page */
struct eth_rx_cqe_next_page *rx_cqe_next =
(struct eth_rx_cqe_next_page *)
&fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
/* point to the next page and wrap from last page */
busaddr = (fp->rcq_dma.paddr +
(BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
}
/*******************/
/* FP RX SGE CHAIN */
/*******************/
snprintf(buf, sizeof(buf), "fp %d sge chain", i);
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
&fp->rx_sge_dma, buf) != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to alloc %s\n", buf);
return (1);
} else {
fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
}
/* link together the sge chain pages */
for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
/* index into the rcq chain array to last entry per page */
struct eth_rx_sge *rx_sge =
&fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
/* point to the next page and wrap from last page */
busaddr = (fp->rx_sge_dma.paddr +
(BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
rx_sge->addr_hi = htole32(U64_HI(busaddr));
rx_sge->addr_lo = htole32(U64_LO(busaddr));
}
/***********************/
/* FP TX MBUF DMA MAPS */
/***********************/
/* set required sizes before mapping to conserve resources */
if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
max_size = BXE_TSO_MAX_SIZE;
max_segments = BXE_TSO_MAX_SEGMENTS;
max_seg_size = BXE_TSO_MAX_SEG_SIZE;
} else {
max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
max_segments = BXE_MAX_SEGMENTS;
max_seg_size = MCLBYTES;
}
/* create a dma tag for the tx mbufs */
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
1, /* alignment */
0, /* boundary limit */
BUS_SPACE_MAXADDR, /* restricted low */
BUS_SPACE_MAXADDR, /* restricted hi */
NULL, /* addr filter() */
NULL, /* addr filter() arg */
max_size, /* max map size */
max_segments, /* num discontinuous */
max_seg_size, /* max seg size */
0, /* flags */
NULL, /* lock() */
NULL, /* lock() arg */
&fp->tx_mbuf_tag); /* returned dma tag */
if (rc != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma tag for "
"'fp %d tx mbufs' (%d)\n", i, rc);
return (1);
}
/* create dma maps for each of the tx mbuf clusters */
for (j = 0; j < TX_BD_TOTAL; j++) {
if (bus_dmamap_create(fp->tx_mbuf_tag,
BUS_DMA_NOWAIT,
&fp->tx_mbuf_chain[j].m_map)) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma map for "
"'fp %d tx mbuf %d' (%d)\n", i, j, rc);
return (1);
}
}
/***********************/
/* FP RX MBUF DMA MAPS */
/***********************/
/* create a dma tag for the rx mbufs */
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
1, /* alignment */
0, /* boundary limit */
BUS_SPACE_MAXADDR, /* restricted low */
BUS_SPACE_MAXADDR, /* restricted hi */
NULL, /* addr filter() */
NULL, /* addr filter() arg */
MJUM9BYTES, /* max map size */
1, /* num discontinuous */
MJUM9BYTES, /* max seg size */
0, /* flags */
NULL, /* lock() */
NULL, /* lock() arg */
&fp->rx_mbuf_tag); /* returned dma tag */
if (rc != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma tag for "
"'fp %d rx mbufs' (%d)\n", i, rc);
return (1);
}
/* create dma maps for each of the rx mbuf clusters */
for (j = 0; j < RX_BD_TOTAL; j++) {
if (bus_dmamap_create(fp->rx_mbuf_tag,
BUS_DMA_NOWAIT,
&fp->rx_mbuf_chain[j].m_map)) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma map for "
"'fp %d rx mbuf %d' (%d)\n", i, j, rc);
return (1);
}
}
/* create dma map for the spare rx mbuf cluster */
if (bus_dmamap_create(fp->rx_mbuf_tag,
BUS_DMA_NOWAIT,
&fp->rx_mbuf_spare_map)) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma map for "
"'fp %d spare rx mbuf' (%d)\n", i, rc);
return (1);
}
/***************************/
/* FP RX SGE MBUF DMA MAPS */
/***************************/
/* create a dma tag for the rx sge mbufs */
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
1, /* alignment */
0, /* boundary limit */
BUS_SPACE_MAXADDR, /* restricted low */
BUS_SPACE_MAXADDR, /* restricted hi */
NULL, /* addr filter() */
NULL, /* addr filter() arg */
BCM_PAGE_SIZE, /* max map size */
1, /* num discontinuous */
BCM_PAGE_SIZE, /* max seg size */
0, /* flags */
NULL, /* lock() */
NULL, /* lock() arg */
&fp->rx_sge_mbuf_tag); /* returned dma tag */
if (rc != 0) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma tag for "
"'fp %d rx sge mbufs' (%d)\n", i, rc);
return (1);
}
/* create dma maps for the rx sge mbuf clusters */
for (j = 0; j < RX_SGE_TOTAL; j++) {
if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
BUS_DMA_NOWAIT,
&fp->rx_sge_mbuf_chain[j].m_map)) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma map for "
"'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
return (1);
}
}
/* create dma map for the spare rx sge mbuf cluster */
if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
BUS_DMA_NOWAIT,
&fp->rx_sge_mbuf_spare_map)) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma map for "
"'fp %d spare rx sge mbuf' (%d)\n", i, rc);
return (1);
}
/***************************/
/* FP RX TPA MBUF DMA MAPS */
/***************************/
/* create dma maps for the rx tpa mbuf clusters */
max_agg_queues = MAX_AGG_QS(sc);
for (j = 0; j < max_agg_queues; j++) {
if (bus_dmamap_create(fp->rx_mbuf_tag,
BUS_DMA_NOWAIT,
&fp->rx_tpa_info[j].bd.m_map)) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma map for "
"'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
return (1);
}
}
/* create dma map for the spare rx tpa mbuf cluster */
if (bus_dmamap_create(fp->rx_mbuf_tag,
BUS_DMA_NOWAIT,
&fp->rx_tpa_info_mbuf_spare_map)) {
/* XXX unwind and free previous fastpath allocations */
BLOGE(sc, "Failed to create dma map for "
"'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
return (1);
}
bxe_init_sge_ring_bit_mask(fp);
}
return (0);
}
static void
bxe_free_hsi_mem(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int max_agg_queues;
int i, j;
if (sc->parent_dma_tag == NULL) {
return; /* assume nothing was allocated */
}
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
/*******************/
/* FP STATUS BLOCK */
/*******************/
bxe_dma_free(sc, &fp->sb_dma);
memset(&fp->status_block, 0, sizeof(fp->status_block));
/******************/
/* FP TX BD CHAIN */
/******************/
bxe_dma_free(sc, &fp->tx_dma);
fp->tx_chain = NULL;
/******************/
/* FP RX BD CHAIN */
/******************/
bxe_dma_free(sc, &fp->rx_dma);
fp->rx_chain = NULL;
/*******************/
/* FP RX RCQ CHAIN */
/*******************/
bxe_dma_free(sc, &fp->rcq_dma);
fp->rcq_chain = NULL;
/*******************/
/* FP RX SGE CHAIN */
/*******************/
bxe_dma_free(sc, &fp->rx_sge_dma);
fp->rx_sge_chain = NULL;
/***********************/
/* FP TX MBUF DMA MAPS */
/***********************/
if (fp->tx_mbuf_tag != NULL) {
for (j = 0; j < TX_BD_TOTAL; j++) {
if (fp->tx_mbuf_chain[j].m_map != NULL) {
bus_dmamap_unload(fp->tx_mbuf_tag,
fp->tx_mbuf_chain[j].m_map);
bus_dmamap_destroy(fp->tx_mbuf_tag,
fp->tx_mbuf_chain[j].m_map);
}
}
bus_dma_tag_destroy(fp->tx_mbuf_tag);
fp->tx_mbuf_tag = NULL;
}
/***********************/
/* FP RX MBUF DMA MAPS */
/***********************/
if (fp->rx_mbuf_tag != NULL) {
for (j = 0; j < RX_BD_TOTAL; j++) {
if (fp->rx_mbuf_chain[j].m_map != NULL) {
bus_dmamap_unload(fp->rx_mbuf_tag,
fp->rx_mbuf_chain[j].m_map);
bus_dmamap_destroy(fp->rx_mbuf_tag,
fp->rx_mbuf_chain[j].m_map);
}
}
if (fp->rx_mbuf_spare_map != NULL) {
bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
}
/***************************/
/* FP RX TPA MBUF DMA MAPS */
/***************************/
max_agg_queues = MAX_AGG_QS(sc);
for (j = 0; j < max_agg_queues; j++) {
if (fp->rx_tpa_info[j].bd.m_map != NULL) {
bus_dmamap_unload(fp->rx_mbuf_tag,
fp->rx_tpa_info[j].bd.m_map);
bus_dmamap_destroy(fp->rx_mbuf_tag,
fp->rx_tpa_info[j].bd.m_map);
}
}
if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
bus_dmamap_unload(fp->rx_mbuf_tag,
fp->rx_tpa_info_mbuf_spare_map);
bus_dmamap_destroy(fp->rx_mbuf_tag,
fp->rx_tpa_info_mbuf_spare_map);
}
bus_dma_tag_destroy(fp->rx_mbuf_tag);
fp->rx_mbuf_tag = NULL;
}
/***************************/
/* FP RX SGE MBUF DMA MAPS */
/***************************/
if (fp->rx_sge_mbuf_tag != NULL) {
for (j = 0; j < RX_SGE_TOTAL; j++) {
if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
bus_dmamap_unload(fp->rx_sge_mbuf_tag,
fp->rx_sge_mbuf_chain[j].m_map);
bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
fp->rx_sge_mbuf_chain[j].m_map);
}
}
if (fp->rx_sge_mbuf_spare_map != NULL) {
bus_dmamap_unload(fp->rx_sge_mbuf_tag,
fp->rx_sge_mbuf_spare_map);
bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
fp->rx_sge_mbuf_spare_map);
}
bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
fp->rx_sge_mbuf_tag = NULL;
}
}
/***************************/
/* FW DECOMPRESSION BUFFER */
/***************************/
bxe_dma_free(sc, &sc->gz_buf_dma);
sc->gz_buf = NULL;
free(sc->gz_strm, M_DEVBUF);
sc->gz_strm = NULL;
/*******************/
/* SLOW PATH QUEUE */
/*******************/
bxe_dma_free(sc, &sc->spq_dma);
sc->spq = NULL;
/*************/
/* SLOW PATH */
/*************/
bxe_dma_free(sc, &sc->sp_dma);
sc->sp = NULL;
/***************/
/* EVENT QUEUE */
/***************/
bxe_dma_free(sc, &sc->eq_dma);
sc->eq = NULL;
/************************/
/* DEFAULT STATUS BLOCK */
/************************/
bxe_dma_free(sc, &sc->def_sb_dma);
sc->def_sb = NULL;
bus_dma_tag_destroy(sc->parent_dma_tag);
sc->parent_dma_tag = NULL;
}
/*
* Previous driver DMAE transaction may have occurred when pre-boot stage
* ended and boot began. This would invalidate the addresses of the
* transaction, resulting in was-error bit set in the PCI causing all
* hw-to-host PCIe transactions to timeout. If this happened we want to clear
* the interrupt which detected this from the pglueb and the was-done bit
*/
static void
bxe_prev_interrupted_dmae(struct bxe_softc *sc)
{
uint32_t val;
if (!CHIP_IS_E1x(sc)) {
val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
BLOGD(sc, DBG_LOAD,
"Clearing 'was-error' bit that was set in pglueb");
REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
}
}
}
static int
bxe_prev_mcp_done(struct bxe_softc *sc)
{
uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
if (!rc) {
BLOGE(sc, "MCP response failure, aborting\n");
return (-1);
}
return (0);
}
static struct bxe_prev_list_node *
bxe_prev_path_get_entry(struct bxe_softc *sc)
{
struct bxe_prev_list_node *tmp;
LIST_FOREACH(tmp, &bxe_prev_list, node) {
if ((sc->pcie_bus == tmp->bus) &&
(sc->pcie_device == tmp->slot) &&
(SC_PATH(sc) == tmp->path)) {
return (tmp);
}
}
return (NULL);
}
static uint8_t
bxe_prev_is_path_marked(struct bxe_softc *sc)
{
struct bxe_prev_list_node *tmp;
int rc = FALSE;
mtx_lock(&bxe_prev_mtx);
tmp = bxe_prev_path_get_entry(sc);
if (tmp) {
if (tmp->aer) {
BLOGD(sc, DBG_LOAD,
"Path %d/%d/%d was marked by AER\n",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
} else {
rc = TRUE;
BLOGD(sc, DBG_LOAD,
"Path %d/%d/%d was already cleaned from previous drivers\n",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
}
}
mtx_unlock(&bxe_prev_mtx);
return (rc);
}
static int
bxe_prev_mark_path(struct bxe_softc *sc,
uint8_t after_undi)
{
struct bxe_prev_list_node *tmp;
mtx_lock(&bxe_prev_mtx);
/* Check whether the entry for this path already exists */
tmp = bxe_prev_path_get_entry(sc);
if (tmp) {
if (!tmp->aer) {
BLOGD(sc, DBG_LOAD,
"Re-marking AER in path %d/%d/%d\n",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
} else {
BLOGD(sc, DBG_LOAD,
"Removing AER indication from path %d/%d/%d\n",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
tmp->aer = 0;
}
mtx_unlock(&bxe_prev_mtx);
return (0);
}
mtx_unlock(&bxe_prev_mtx);
/* Create an entry for this path and add it */
tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
(M_NOWAIT | M_ZERO));
if (!tmp) {
BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
return (-1);
}
tmp->bus = sc->pcie_bus;
tmp->slot = sc->pcie_device;
tmp->path = SC_PATH(sc);
tmp->aer = 0;
tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
mtx_lock(&bxe_prev_mtx);
BLOGD(sc, DBG_LOAD,
"Marked path %d/%d/%d - finished previous unload\n",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
mtx_unlock(&bxe_prev_mtx);
return (0);
}
static int
bxe_do_flr(struct bxe_softc *sc)
{
int i;
/* only E2 and onwards support FLR */
if (CHIP_IS_E1x(sc)) {
BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
return (-1);
}
/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
sc->devinfo.bc_ver);
return (-1);
}
/* Wait for Transaction Pending bit clean */
for (i = 0; i < 4; i++) {
if (i) {
DELAY(((1 << (i - 1)) * 100) * 1000);
}
if (!bxe_is_pcie_pending(sc)) {
goto clear;
}
}
BLOGE(sc, "PCIE transaction is not cleared, "
"proceeding with reset anyway\n");
clear:
BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
return (0);
}
struct bxe_mac_vals {
uint32_t xmac_addr;
uint32_t xmac_val;
uint32_t emac_addr;
uint32_t emac_val;
uint32_t umac_addr;
uint32_t umac_val;
uint32_t bmac_addr;
uint32_t bmac_val[2];
};
static void
bxe_prev_unload_close_mac(struct bxe_softc *sc,
struct bxe_mac_vals *vals)
{
uint32_t val, base_addr, offset, mask, reset_reg;
uint8_t mac_stopped = FALSE;
uint8_t port = SC_PORT(sc);
uint32_t wb_data[2];
/* reset addresses as they also mark which values were changed */
vals->bmac_addr = 0;
vals->umac_addr = 0;
vals->xmac_addr = 0;
vals->emac_addr = 0;
reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
if (!CHIP_IS_E3(sc)) {
val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
if ((mask & reset_reg) && val) {
BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
: NIG_REG_INGRESS_BMAC0_MEM;
offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
: BIGMAC_REGISTER_BMAC_CONTROL;
/*
* use rd/wr since we cannot use dmae. This is safe
* since MCP won't access the bus due to the request
* to unload, and no function on the path can be
* loaded at this time.
*/
wb_data[0] = REG_RD(sc, base_addr + offset);
wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
vals->bmac_addr = base_addr + offset;
vals->bmac_val[0] = wb_data[0];
vals->bmac_val[1] = wb_data[1];
wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
REG_WR(sc, vals->bmac_addr, wb_data[0]);
REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
}
BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
vals->emac_val = REG_RD(sc, vals->emac_addr);
REG_WR(sc, vals->emac_addr, 0);
mac_stopped = TRUE;
} else {
if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
vals->xmac_addr = base_addr + XMAC_REG_CTRL;
vals->xmac_val = REG_RD(sc, vals->xmac_addr);
REG_WR(sc, vals->xmac_addr, 0);
mac_stopped = TRUE;
}
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
if (mask & reset_reg) {
BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
vals->umac_val = REG_RD(sc, vals->umac_addr);
REG_WR(sc, vals->umac_addr, 0);
mac_stopped = TRUE;
}
}
if (mac_stopped) {
DELAY(20000);
}
}
#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
static void
bxe_prev_unload_undi_inc(struct bxe_softc *sc,
uint8_t port,
uint8_t inc)
{
uint16_t rcq, bd;
uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
BLOGD(sc, DBG_LOAD,
"UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
port, bd, rcq);
}
static int
bxe_prev_unload_common(struct bxe_softc *sc)
{
uint32_t reset_reg, tmp_reg = 0, rc;
uint8_t prev_undi = FALSE;
struct bxe_mac_vals mac_vals;
uint32_t timer_count = 1000;
uint32_t prev_brb;
/*
* It is possible a previous function received 'common' answer,
* but hasn't loaded yet, therefore creating a scenario of
* multiple functions receiving 'common' on the same path.
*/
BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
memset(&mac_vals, 0, sizeof(mac_vals));
if (bxe_prev_is_path_marked(sc)) {
return (bxe_prev_mcp_done(sc));
}
reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
/* Close the MAC Rx to prevent BRB from filling up */
bxe_prev_unload_close_mac(sc, &mac_vals);
/* close LLH filters towards the BRB */
elink_set_rx_filter(&sc->link_params, 0);
/*
* Check if the UNDI driver was previously loaded.
* UNDI driver initializes CID offset for normal bell to 0x7
*/
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) {
BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
prev_undi = TRUE;
/* clear the UNDI indication */
REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
/* clear possible idle check errors */
REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
}
}
/* wait until BRB is empty */
tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
while (timer_count) {
prev_brb = tmp_reg;
tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
if (!tmp_reg) {
break;
}
BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
/* reset timer as long as BRB actually gets emptied */
if (prev_brb > tmp_reg) {
timer_count = 1000;
} else {
timer_count--;
}
/* If UNDI resides in memory, manually increment it */
if (prev_undi) {
bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
}
DELAY(10);
}
if (!timer_count) {
BLOGE(sc, "Failed to empty BRB\n");
}
}
/* No packets are in the pipeline, path is ready for reset */
bxe_reset_common(sc);
if (mac_vals.xmac_addr) {
REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
}
if (mac_vals.umac_addr) {
REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
}
if (mac_vals.emac_addr) {
REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
}
if (mac_vals.bmac_addr) {
REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
}
rc = bxe_prev_mark_path(sc, prev_undi);
if (rc) {
bxe_prev_mcp_done(sc);
return (rc);
}
return (bxe_prev_mcp_done(sc));
}
static int
bxe_prev_unload_uncommon(struct bxe_softc *sc)
{
int rc;
BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
/* Test if previous unload process was already finished for this path */
if (bxe_prev_is_path_marked(sc)) {
return (bxe_prev_mcp_done(sc));
}
BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
/*
* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
if (!rc) {
/* fw version is good */
BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
rc = bxe_do_flr(sc);
}
if (!rc) {
/* FLR was performed */
BLOGD(sc, DBG_LOAD, "FLR successful\n");
return (0);
}
BLOGD(sc, DBG_LOAD, "Could not FLR\n");
/* Close the MCP request, return failure*/
rc = bxe_prev_mcp_done(sc);
if (!rc) {
rc = BXE_PREV_WAIT_NEEDED;
}
return (rc);
}
static int
bxe_prev_unload(struct bxe_softc *sc)
{
int time_counter = 10;
uint32_t fw, hw_lock_reg, hw_lock_val;
uint32_t rc = 0;
/*
* Clear HW from errors which may have resulted from an interrupted
* DMAE transaction.
*/
bxe_prev_interrupted_dmae(sc);
/* Release previously held locks */
hw_lock_reg =
(SC_FUNC(sc) <= 5) ?
(MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
(MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
hw_lock_val = (REG_RD(sc, hw_lock_reg));
if (hw_lock_val) {
if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
(MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
}
BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
REG_WR(sc, hw_lock_reg, 0xffffffff);
} else {
BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
}
if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
do {
/* Lock MCP using an unload request */
fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
BLOGE(sc, "MCP response failure, aborting\n");
rc = -1;
break;
}
if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
rc = bxe_prev_unload_common(sc);
break;
}
/* non-common reply from MCP night require looping */
rc = bxe_prev_unload_uncommon(sc);
if (rc != BXE_PREV_WAIT_NEEDED) {
break;
}
DELAY(20000);
} while (--time_counter);
if (!time_counter || rc) {
BLOGE(sc, "Failed to unload previous driver!"
" time_counter %d rc %d\n", time_counter, rc);
rc = -1;
}
return (rc);
}
void
bxe_dcbx_set_state(struct bxe_softc *sc,
uint8_t dcb_on,
uint32_t dcbx_enabled)
{
if (!CHIP_IS_E1x(sc)) {
sc->dcb_state = dcb_on;
sc->dcbx_enabled = dcbx_enabled;
} else {
sc->dcb_state = FALSE;
sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
}
BLOGD(sc, DBG_LOAD,
"DCB state [%s:%s]\n",
dcb_on ? "ON" : "OFF",
(dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
(dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
(dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
"on-chip with negotiation" : "invalid");
}
/* must be called after sriov-enable */
static int
bxe_set_qm_cid_count(struct bxe_softc *sc)
{
int cid_count = BXE_L2_MAX_CID(sc);
if (IS_SRIOV(sc)) {
cid_count += BXE_VF_CIDS;
}
if (CNIC_SUPPORT(sc)) {
cid_count += CNIC_CID_MAX;
}
return (roundup(cid_count, QM_CID_ROUND));
}
static void
bxe_init_multi_cos(struct bxe_softc *sc)
{
int pri, cos;
uint32_t pri_map = 0; /* XXX change to user config */
for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
if (cos < sc->max_cos) {
sc->prio_to_cos[pri] = cos;
} else {
BLOGW(sc, "Invalid COS %d for priority %d "
"(max COS is %d), setting to 0\n",
cos, pri, (sc->max_cos - 1));
sc->prio_to_cos[pri] = 0;
}
}
}
static int
bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
{
struct bxe_softc *sc;
int error, result;
result = 0;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr) {
return (error);
}
if (result == 1) {
uint32_t temp;
sc = (struct bxe_softc *)arg1;
BLOGI(sc, "... dumping driver state ...\n");
temp = SHMEM2_RD(sc, temperature_in_half_celsius);
BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
}
return (error);
}
static int
bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
{
struct bxe_softc *sc = (struct bxe_softc *)arg1;
uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
uint32_t *offset;
uint64_t value = 0;
int index = (int)arg2;
if (index >= BXE_NUM_ETH_STATS) {
BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
return (-1);
}
offset = (eth_stats + bxe_eth_stats_arr[index].offset);
switch (bxe_eth_stats_arr[index].size) {
case 4:
value = (uint64_t)*offset;
break;
case 8:
value = HILO_U64(*offset, *(offset + 1));
break;
default:
BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
index, bxe_eth_stats_arr[index].size);
return (-1);
}
return (sysctl_handle_64(oidp, &value, 0, req));
}
static int
bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
{
struct bxe_softc *sc = (struct bxe_softc *)arg1;
uint32_t *eth_stats;
uint32_t *offset;
uint64_t value = 0;
uint32_t q_stat = (uint32_t)arg2;
uint32_t fp_index = ((q_stat >> 16) & 0xffff);
uint32_t index = (q_stat & 0xffff);
eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
if (index >= BXE_NUM_ETH_Q_STATS) {
BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
return (-1);
}
offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
switch (bxe_eth_q_stats_arr[index].size) {
case 4:
value = (uint64_t)*offset;
break;
case 8:
value = HILO_U64(*offset, *(offset + 1));
break;
default:
BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
index, bxe_eth_q_stats_arr[index].size);
return (-1);
}
return (sysctl_handle_64(oidp, &value, 0, req));
}
static void bxe_force_link_reset(struct bxe_softc *sc)
{
bxe_acquire_phy_lock(sc);
elink_link_reset(&sc->link_params, &sc->link_vars, 1);
bxe_release_phy_lock(sc);
}
static int
bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
{
struct bxe_softc *sc = (struct bxe_softc *)arg1;
uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
int rc = 0;
int error;
int result;
error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
if (error || !req->newptr) {
return (error);
}
if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) {
BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param);
sc->bxe_pause_param = 8;
}
result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) {
BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
return -EINVAL;
}
if(IS_MF(sc))
return 0;
sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
if(result & ELINK_FLOW_CTRL_RX)
sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
if(result & ELINK_FLOW_CTRL_TX)
sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
if(result & 0x400) {
if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
sc->link_params.req_flow_ctrl[cfg_idx] =
ELINK_FLOW_CTRL_AUTO;
}
sc->link_params.req_fc_auto_adv = 0;
if (result & ELINK_FLOW_CTRL_RX)
sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
if (result & ELINK_FLOW_CTRL_TX)
sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
if (!sc->link_params.req_fc_auto_adv)
sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
}
if (IS_PF(sc)) {
if (sc->link_vars.link_up) {
bxe_stats_handle(sc, STATS_EVENT_STOP);
}
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
bxe_force_link_reset(sc);
bxe_acquire_phy_lock(sc);
rc = elink_phy_init(&sc->link_params, &sc->link_vars);
bxe_release_phy_lock(sc);
bxe_calc_fc_adv(sc);
}
}
return rc;
}
static void
bxe_add_sysctls(struct bxe_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
struct sysctl_oid *queue_top, *queue;
struct sysctl_oid_list *queue_top_children, *queue_children;
char queue_num_buf[32];
uint32_t q_stat;
int i, j;
ctx = device_get_sysctl_ctx(sc->dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
"version");
snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
BCM_5710_FW_REVISION_VERSION,
BCM_5710_FW_ENGINEERING_VERSION);
snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
(sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
(sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
(sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
"Unknown"));
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
"multifunction vnics per port");
snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
(sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
(sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
"???GT/s"),
sc->devinfo.pcie_link_width);
sc->debug = bxe_debug;
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
"bootcode version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
CTLFLAG_RD, sc->fw_ver_str, 0,
"firmware version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
CTLFLAG_RD, sc->mf_mode_str, 0,
"multifunction mode");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
CTLFLAG_RD, sc->mac_addr_str, 0,
"mac address");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
CTLFLAG_RD, sc->pci_link_str, 0,
"pci link status");
SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
CTLFLAG_RW, &sc->debug,
"debug logging mode");
sc->trigger_grcdump = 0;
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
CTLFLAG_RW, &sc->trigger_grcdump, 0,
"trigger grcdump should be invoked"
" before collecting grcdump");
sc->grcdump_started = 0;
sc->grcdump_done = 0;
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
CTLFLAG_RD, &sc->grcdump_done, 0,
"set by driver when grcdump is done");
sc->rx_budget = bxe_rx_budget;
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
CTLFLAG_RW, &sc->rx_budget, 0,
"rx processing budget");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
bxe_sysctl_pauseparam, "IU",
"need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
bxe_sysctl_state, "IU", "dump driver state");
for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
bxe_eth_stats_arr[i].string,
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
}
/* add a new parent node for all queues "dev.bxe.#.queue" */
queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
queue_top_children = SYSCTL_CHILDREN(queue_top);
for (i = 0; i < sc->num_queues; i++) {
/* add a new parent node for a single queue "dev.bxe.#.queue.#" */
snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
queue_children = SYSCTL_CHILDREN(queue);
for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
q_stat = ((i << 16) | j);
SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
bxe_eth_q_stats_arr[j].string,
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
}
}
}
static int
bxe_alloc_buf_rings(struct bxe_softc *sc)
{
int i;
struct bxe_fastpath *fp;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
M_NOWAIT, &fp->tx_mtx);
if (fp->tx_br == NULL)
return (-1);
}
return (0);
}
static void
bxe_free_buf_rings(struct bxe_softc *sc)
{
int i;
struct bxe_fastpath *fp;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
if (fp->tx_br) {
buf_ring_free(fp->tx_br, M_DEVBUF);
fp->tx_br = NULL;
}
}
}
static void
bxe_init_fp_mutexs(struct bxe_softc *sc)
{
int i;
struct bxe_fastpath *fp;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
"bxe%d_fp%d_tx_lock", sc->unit, i);
mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
"bxe%d_fp%d_rx_lock", sc->unit, i);
mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
}
}
static void
bxe_destroy_fp_mutexs(struct bxe_softc *sc)
{
int i;
struct bxe_fastpath *fp;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
if (mtx_initialized(&fp->tx_mtx)) {
mtx_destroy(&fp->tx_mtx);
}
if (mtx_initialized(&fp->rx_mtx)) {
mtx_destroy(&fp->rx_mtx);
}
}
}
/*
* Device attach function.
*
* Allocates device resources, performs secondary chip identification, and
* initializes driver instance variables. This function is called from driver
* load after a successful probe.
*
* Returns:
* 0 = Success, >0 = Failure
*/
static int
bxe_attach(device_t dev)
{
struct bxe_softc *sc;
sc = device_get_softc(dev);
BLOGD(sc, DBG_LOAD, "Starting attach...\n");
sc->state = BXE_STATE_CLOSED;
sc->dev = dev;
sc->unit = device_get_unit(dev);
BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
sc->pcie_bus = pci_get_bus(dev);
sc->pcie_device = pci_get_slot(dev);
sc->pcie_func = pci_get_function(dev);
/* enable bus master capability */
pci_enable_busmaster(dev);
/* get the BARs */
if (bxe_allocate_bars(sc) != 0) {
return (ENXIO);
}
/* initialize the mutexes */
bxe_init_mutexes(sc);
/* prepare the periodic callout */
callout_init(&sc->periodic_callout, 1);
/* prepare the chip taskqueue */
sc->chip_tq_flags = CHIP_TQ_NONE;
snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
"bxe%d_chip_tq", sc->unit);
TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&sc->chip_tq);
taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
"%s", sc->chip_tq_name);
TIMEOUT_TASK_INIT(taskqueue_thread,
&sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc);
/* get device info and set params */
if (bxe_get_device_info(sc) != 0) {
BLOGE(sc, "getting device info\n");
bxe_deallocate_bars(sc);
pci_disable_busmaster(dev);
return (ENXIO);
}
/* get final misc params */
bxe_get_params(sc);
/* set the default MTU (changed via ifconfig) */
sc->mtu = ETHERMTU;
bxe_set_modes_bitmap(sc);
/* XXX
* If in AFEX mode and the function is configured for FCoE
* then bail... no L2 allowed.
*/
/* get phy settings from shmem and 'and' against admin settings */
bxe_get_phy_info(sc);
/* initialize the FreeBSD ifnet interface */
- if (bxe_init_ifnet(sc) != 0) {
- bxe_release_mutexes(sc);
- bxe_deallocate_bars(sc);
- pci_disable_busmaster(dev);
- return (ENXIO);
- }
+ bxe_init_ifnet(sc);
if (bxe_add_cdev(sc) != 0) {
if (sc->ifp != NULL) {
ether_ifdetach(sc->ifp);
}
ifmedia_removeall(&sc->ifmedia);
bxe_release_mutexes(sc);
bxe_deallocate_bars(sc);
pci_disable_busmaster(dev);
return (ENXIO);
}
/* allocate device interrupts */
if (bxe_interrupt_alloc(sc) != 0) {
bxe_del_cdev(sc);
if (sc->ifp != NULL) {
ether_ifdetach(sc->ifp);
}
ifmedia_removeall(&sc->ifmedia);
bxe_release_mutexes(sc);
bxe_deallocate_bars(sc);
pci_disable_busmaster(dev);
return (ENXIO);
}
bxe_init_fp_mutexs(sc);
if (bxe_alloc_buf_rings(sc) != 0) {
bxe_free_buf_rings(sc);
bxe_interrupt_free(sc);
bxe_del_cdev(sc);
if (sc->ifp != NULL) {
ether_ifdetach(sc->ifp);
}
ifmedia_removeall(&sc->ifmedia);
bxe_release_mutexes(sc);
bxe_deallocate_bars(sc);
pci_disable_busmaster(dev);
return (ENXIO);
}
/* allocate ilt */
if (bxe_alloc_ilt_mem(sc) != 0) {
bxe_free_buf_rings(sc);
bxe_interrupt_free(sc);
bxe_del_cdev(sc);
if (sc->ifp != NULL) {
ether_ifdetach(sc->ifp);
}
ifmedia_removeall(&sc->ifmedia);
bxe_release_mutexes(sc);
bxe_deallocate_bars(sc);
pci_disable_busmaster(dev);
return (ENXIO);
}
/* allocate the host hardware/software hsi structures */
if (bxe_alloc_hsi_mem(sc) != 0) {
bxe_free_ilt_mem(sc);
bxe_free_buf_rings(sc);
bxe_interrupt_free(sc);
bxe_del_cdev(sc);
if (sc->ifp != NULL) {
ether_ifdetach(sc->ifp);
}
ifmedia_removeall(&sc->ifmedia);
bxe_release_mutexes(sc);
bxe_deallocate_bars(sc);
pci_disable_busmaster(dev);
return (ENXIO);
}
/* need to reset chip if UNDI was active */
if (IS_PF(sc) && !BXE_NOMCP(sc)) {
/* init fw_seq */
sc->fw_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
bxe_prev_unload(sc);
}
#if 1
/* XXX */
bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
#else
if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
bxe_dcbx_init_params(sc);
} else {
bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
}
#endif
/* calculate qm_cid_count */
sc->qm_cid_count = bxe_set_qm_cid_count(sc);
BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
sc->max_cos = 1;
bxe_init_multi_cos(sc);
bxe_add_sysctls(sc);
return (0);
}
/*
* Device detach function.
*
* Stops the controller, resets the controller, and releases resources.
*
* Returns:
* 0 = Success, >0 = Failure
*/
static int
bxe_detach(device_t dev)
{
struct bxe_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
BLOGD(sc, DBG_LOAD, "Starting detach...\n");
ifp = sc->ifp;
if (ifp != NULL && if_vlantrunkinuse(ifp)) {
BLOGE(sc, "Cannot detach while VLANs are in use.\n");
return(EBUSY);
}
bxe_del_cdev(sc);
/* stop the periodic callout */
bxe_periodic_stop(sc);
/* stop the chip taskqueue */
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
if (sc->chip_tq) {
taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
taskqueue_free(sc->chip_tq);
sc->chip_tq = NULL;
taskqueue_drain_timeout(taskqueue_thread,
&sc->sp_err_timeout_task);
}
/* stop and reset the controller if it was open */
if (sc->state != BXE_STATE_CLOSED) {
BXE_CORE_LOCK(sc);
bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
sc->state = BXE_STATE_DISABLED;
BXE_CORE_UNLOCK(sc);
}
/* release the network interface */
if (ifp != NULL) {
ether_ifdetach(ifp);
}
ifmedia_removeall(&sc->ifmedia);
/* XXX do the following based on driver state... */
/* free the host hardware/software hsi structures */
bxe_free_hsi_mem(sc);
/* free ilt */
bxe_free_ilt_mem(sc);
bxe_free_buf_rings(sc);
/* release the interrupts */
bxe_interrupt_free(sc);
/* Release the mutexes*/
bxe_destroy_fp_mutexs(sc);
bxe_release_mutexes(sc);
/* Release the PCIe BAR mapped memory */
bxe_deallocate_bars(sc);
/* Release the FreeBSD interface. */
if (sc->ifp != NULL) {
if_free(sc->ifp);
}
pci_disable_busmaster(dev);
return (0);
}
/*
* Device shutdown function.
*
* Stops and resets the controller.
*
* Returns:
* Nothing
*/
static int
bxe_shutdown(device_t dev)
{
struct bxe_softc *sc;
sc = device_get_softc(dev);
BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
/* stop the periodic callout */
bxe_periodic_stop(sc);
if (sc->state != BXE_STATE_CLOSED) {
BXE_CORE_LOCK(sc);
bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
BXE_CORE_UNLOCK(sc);
}
return (0);
}
void
bxe_igu_ack_sb(struct bxe_softc *sc,
uint8_t igu_sb_id,
uint8_t segment,
uint16_t index,
uint8_t op,
uint8_t update)
{
uint32_t igu_addr = sc->igu_base_addr;
igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
}
static void
bxe_igu_clear_sb_gen(struct bxe_softc *sc,
uint8_t func,
uint8_t idu_sb_id,
uint8_t is_pf)
{
uint32_t data, ctl, cnt = 100;
uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
uint32_t sb_bit = 1 << (idu_sb_id%32);
uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
if (CHIP_INT_MODE_IS_BC(sc)) {
return;
}
data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
IGU_REGULAR_CLEANUP_SET |
IGU_REGULAR_BCLEANUP);
ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
(func_encode << IGU_CTRL_REG_FID_SHIFT) |
(IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
data, igu_addr_data);
REG_WR(sc, igu_addr_data, data);
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
BUS_SPACE_BARRIER_WRITE);
mb();
BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(sc, igu_addr_ctl, ctl);
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
BUS_SPACE_BARRIER_WRITE);
mb();
/* wait for clean up to finish */
while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
DELAY(20000);
}
if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
BLOGD(sc, DBG_LOAD,
"Unable to finish IGU cleanup: "
"idu_sb_id %d offset %d bit %d (cnt %d)\n",
idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
}
}
static void
bxe_igu_clear_sb(struct bxe_softc *sc,
uint8_t idu_sb_id)
{
bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
}
/*******************/
/* ECORE CALLBACKS */
/*******************/
static void
bxe_reset_common(struct bxe_softc *sc)
{
uint32_t val = 0x1400;
/* reset_common */
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
if (CHIP_IS_E3(sc)) {
val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
}
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
}
static void
bxe_common_init_phy(struct bxe_softc *sc)
{
uint32_t shmem_base[2];
uint32_t shmem2_base[2];
/* Avoid common init in case MFW supports LFA */
if (SHMEM2_RD(sc, size) >
(uint32_t)offsetof(struct shmem2_region,
lfa_host_addr[SC_PORT(sc)])) {
return;
}
shmem_base[0] = sc->devinfo.shmem_base;
shmem2_base[0] = sc->devinfo.shmem2_base;
if (!CHIP_IS_E1x(sc)) {
shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
}
bxe_acquire_phy_lock(sc);
elink_common_init_phy(sc, shmem_base, shmem2_base,
sc->devinfo.chip_id, 0);
bxe_release_phy_lock(sc);
}
static void
bxe_pf_disable(struct bxe_softc *sc)
{
uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
val &= ~IGU_PF_CONF_FUNC_EN;
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
}
static void
bxe_init_pxp(struct bxe_softc *sc)
{
uint16_t devctl;
int r_order, w_order;
devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
if (sc->mrrs == -1) {
r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
} else {
BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
r_order = sc->mrrs;
}
ecore_init_pxp_arb(sc, r_order, w_order);
}
static uint32_t
bxe_get_pretend_reg(struct bxe_softc *sc)
{
uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
return (base + (SC_ABS_FUNC(sc)) * stride);
}
/*
* Called only on E1H or E2.
* When pretending to be PF, the pretend value is the function number 0..7.
* When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
* combination.
*/
static int
bxe_pretend_func(struct bxe_softc *sc,
uint16_t pretend_func_val)
{
uint32_t pretend_reg;
if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
return (-1);
}
/* get my own pretend register */
pretend_reg = bxe_get_pretend_reg(sc);
REG_WR(sc, pretend_reg, pretend_func_val);
REG_RD(sc, pretend_reg);
return (0);
}
static void
bxe_iov_init_dmae(struct bxe_softc *sc)
{
return;
}
static void
bxe_iov_init_dq(struct bxe_softc *sc)
{
return;
}
/* send a NIG loopback debug packet */
static void
bxe_lb_pckt(struct bxe_softc *sc)
{
uint32_t wb_write[3];
/* Ethernet source and destination addresses */
wb_write[0] = 0x55555555;
wb_write[1] = 0x55555555;
wb_write[2] = 0x20; /* SOP */
REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
/* NON-IP protocol */
wb_write[0] = 0x09000000;
wb_write[1] = 0x55555555;
wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
}
/*
* Some of the internal memories are not directly readable from the driver.
* To test them we send debug packets.
*/
static int
bxe_int_mem_test(struct bxe_softc *sc)
{
int factor;
int count, i;
uint32_t val = 0;
if (CHIP_REV_IS_FPGA(sc)) {
factor = 120;
} else if (CHIP_REV_IS_EMUL(sc)) {
factor = 200;
} else {
factor = 1;
}
/* disable inputs of parser neighbor blocks */
REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
REG_WR(sc, CFC_REG_DEBUG0, 0x1);
REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
/* write 0 to parser credits for CFC search request */
REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
/* send Ethernet packet */
bxe_lb_pckt(sc);
/* TODO do i reset NIG statistic? */
/* Wait until NIG register shows 1 packet of size 0x10 */
count = 1000 * factor;
while (count) {
bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
val = *BXE_SP(sc, wb_data[0]);
if (val == 0x10) {
break;
}
DELAY(10000);
count--;
}
if (val != 0x10) {
BLOGE(sc, "NIG timeout val=0x%x\n", val);
return (-1);
}
/* wait until PRS register shows 1 packet */
count = (1000 * factor);
while (count) {
val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
if (val == 1) {
break;
}
DELAY(10000);
count--;
}
if (val != 0x1) {
BLOGE(sc, "PRS timeout val=0x%x\n", val);
return (-2);
}
/* Reset and init BRB, PRS */
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
DELAY(50000);
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
DELAY(50000);
ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
/* Disable inputs of parser neighbor blocks */
REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
REG_WR(sc, CFC_REG_DEBUG0, 0x1);
REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
/* Write 0 to parser credits for CFC search request */
REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
/* send 10 Ethernet packets */
for (i = 0; i < 10; i++) {
bxe_lb_pckt(sc);
}
/* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
count = (1000 * factor);
while (count) {
bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
val = *BXE_SP(sc, wb_data[0]);
if (val == 0xb0) {
break;
}
DELAY(10000);
count--;
}
if (val != 0xb0) {
BLOGE(sc, "NIG timeout val=0x%x\n", val);
return (-3);
}
/* Wait until PRS register shows 2 packets */
val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
if (val != 2) {
BLOGE(sc, "PRS timeout val=0x%x\n", val);
}
/* Write 1 to parser credits for CFC search request */
REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
/* Wait until PRS register shows 3 packets */
DELAY(10000 * factor);
/* Wait until NIG register shows 1 packet of size 0x10 */
val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
if (val != 3) {
BLOGE(sc, "PRS timeout val=0x%x\n", val);
}
/* clear NIG EOP FIFO */
for (i = 0; i < 11; i++) {
REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
}
val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
if (val != 1) {
BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
return (-4);
}
/* Reset and init BRB, PRS, NIG */
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
DELAY(50000);
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
DELAY(50000);
ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
if (!CNIC_SUPPORT(sc)) {
/* set NIC mode */
REG_WR(sc, PRS_REG_NIC_MODE, 1);
}
/* Enable inputs of parser neighbor blocks */
REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
REG_WR(sc, CFC_REG_DEBUG0, 0x0);
REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
return (0);
}
static void
bxe_setup_fan_failure_detection(struct bxe_softc *sc)
{
int is_required;
uint32_t val;
int port;
is_required = 0;
val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
SHARED_HW_CFG_FAN_FAILURE_MASK);
if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
is_required = 1;
}
/*
* The fan failure mechanism is usually related to the PHY type since
* the power consumption of the board is affected by the PHY. Currently,
* fan is required for most designs with SFX7101, BCM8727 and BCM8481.
*/
else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
for (port = PORT_0; port < PORT_MAX; port++) {
is_required |= elink_fan_failure_det_req(sc,
sc->devinfo.shmem_base,
sc->devinfo.shmem2_base,
port);
}
}
BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
if (is_required == 0) {
return;
}
/* Fan failure is indicated by SPIO 5 */
bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
/* set to active low mode */
val = REG_RD(sc, MISC_REG_SPIO_INT);
val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
REG_WR(sc, MISC_REG_SPIO_INT, val);
/* enable interrupt to signal the IGU */
val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
val |= MISC_SPIO_SPIO5;
REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
}
static void
bxe_enable_blocks_attention(struct bxe_softc *sc)
{
uint32_t val;
REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
} else {
REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
}
REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
/*
* mask read length error interrupts in brb for parser
* (parsing unit and 'checksum and crc' unit)
* these errors are legal (PU reads fixed length and CAC can cause
* read length error on truncated packets)
*/
REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
REG_WR(sc, QM_REG_QM_INT_MASK, 0);
REG_WR(sc, TM_REG_TM_INT_MASK, 0);
REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
if (!CHIP_IS_E1x(sc)) {
val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
}
REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
if (!CHIP_IS_E1x(sc)) {
/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
}
REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
}
/**
* bxe_init_hw_common - initialize the HW at the COMMON phase.
*
* @sc: driver handle
*/
static int
bxe_init_hw_common(struct bxe_softc *sc)
{
uint8_t abs_func_id;
uint32_t val;
BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
SC_ABS_FUNC(sc));
/*
* take the RESET lock to protect undi_unload flow from accessing
* registers while we are resetting the chip
*/
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
bxe_reset_common(sc);
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
val = 0xfffc;
if (CHIP_IS_E3(sc)) {
val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
}
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
BLOGD(sc, DBG_LOAD, "after misc block init\n");
if (!CHIP_IS_E1x(sc)) {
/*
* 4-port mode or 2-port mode we need to turn off master-enable for
* everyone. After that we turn it back on for self. So, we disregard
* multi-function, and always disable all functions on the given path,
* this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
*/
for (abs_func_id = SC_PATH(sc);
abs_func_id < (E2_FUNC_MAX * 2);
abs_func_id += 2) {
if (abs_func_id == SC_ABS_FUNC(sc)) {
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
continue;
}
bxe_pretend_func(sc, abs_func_id);
/* clear pf enable */
bxe_pf_disable(sc);
bxe_pretend_func(sc, SC_ABS_FUNC(sc));
}
}
BLOGD(sc, DBG_LOAD, "after pf disable\n");
ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
if (CHIP_IS_E1(sc)) {
/*
* enable HW interrupt from PXP on USDM overflow
* bit 16 on INT_MASK_0
*/
REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
}
ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
bxe_init_pxp(sc);
#ifdef __BIG_ENDIAN
REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
/* make sure this value is 0 */
REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
//REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
#endif
ecore_ilt_init_page_size(sc, INITOP_SET);
if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
}
/* let the HW do it's magic... */
DELAY(100000);
/* finish PXP init */
val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
if (val != 1) {
BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
val);
return (-1);
}
val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
if (val != 1) {
BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
return (-1);
}
BLOGD(sc, DBG_LOAD, "after pxp init\n");
/*
* Timer bug workaround for E2 only. We need to set the entire ILT to have
* entries with value "0" and valid bit on. This needs to be done by the
* first PF that is loaded in a path (i.e. common phase)
*/
if (!CHIP_IS_E1x(sc)) {
/*
* In E2 there is a bug in the timers block that can cause function 6 / 7
* (i.e. vnic3) to start even if it is marked as "scan-off".
* This occurs when a different function (func2,3) is being marked
* as "scan-off". Real-life scenario for example: if a driver is being
* load-unloaded while func6,7 are down. This will cause the timer to access
* the ilt, translate to a logical address and send a request to read/write.
* Since the ilt for the function that is down is not valid, this will cause
* a translation error which is unrecoverable.
* The Workaround is intended to make sure that when this happens nothing
* fatal will occur. The workaround:
* 1. First PF driver which loads on a path will:
* a. After taking the chip out of reset, by using pretend,
* it will write "0" to the following registers of
* the other vnics.
* REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
* REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
* REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
* And for itself it will write '1' to
* PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
* dmae-operations (writing to pram for example.)
* note: can be done for only function 6,7 but cleaner this
* way.
* b. Write zero+valid to the entire ILT.
* c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
* VNIC3 (of that port). The range allocated will be the
* entire ILT. This is needed to prevent ILT range error.
* 2. Any PF driver load flow:
* a. ILT update with the physical addresses of the allocated
* logical pages.
* b. Wait 20msec. - note that this timeout is needed to make
* sure there are no requests in one of the PXP internal
* queues with "old" ILT addresses.
* c. PF enable in the PGLC.
* d. Clear the was_error of the PF in the PGLC. (could have
* occurred while driver was down)
* e. PF enable in the CFC (WEAK + STRONG)
* f. Timers scan enable
* 3. PF driver unload flow:
* a. Clear the Timers scan_en.
* b. Polling for scan_on=0 for that PF.
* c. Clear the PF enable bit in the PXP.
* d. Clear the PF enable in the CFC (WEAK + STRONG)
* e. Write zero+valid to all ILT entries (The valid bit must
* stay set)
* f. If this is VNIC 3 of a port then also init
* first_timers_ilt_entry to zero and last_timers_ilt_entry
* to the last entry in the ILT.
*
* Notes:
* Currently the PF error in the PGLC is non recoverable.
* In the future the there will be a recovery routine for this error.
* Currently attention is masked.
* Having an MCP lock on the load/unload process does not guarantee that
* there is no Timer disable during Func6/7 enable. This is because the
* Timers scan is currently being cleared by the MCP on FLR.
* Step 2.d can be done only for PF6/7 and the driver can also check if
* there is error before clearing it. But the flow above is simpler and
* more general.
* All ILT entries are written by zero+valid and not just PF6/7
* ILT entries since in the future the ILT entries allocation for
* PF-s might be dynamic.
*/
struct ilt_client_info ilt_cli;
struct ecore_ilt ilt;
memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
memset(&ilt, 0, sizeof(struct ecore_ilt));
/* initialize dummy TM client */
ilt_cli.start = 0;
ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
ilt_cli.client_num = ILT_CLIENT_TM;
/*
* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
* vnic (this code assumes existence of the vnic)
*
* both steps performed by call to ecore_ilt_client_init_op()
* with dummy TM client
*
* we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
* and his brother are split registers
*/
bxe_pretend_func(sc, (SC_PATH(sc) + 6));
ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
bxe_pretend_func(sc, SC_ABS_FUNC(sc));
REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
}
REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
if (!CHIP_IS_E1x(sc)) {
int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
(CHIP_REV_IS_FPGA(sc) ? 400 : 0);
ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
/* let the HW do it's magic... */
do {
DELAY(200000);
val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
} while (factor-- && (val != 1));
if (val != 1) {
BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
return (-1);
}
}
BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
bxe_iov_init_dmae(sc);
/* clean the DMAE memory */
sc->dmae_ready = 1;
ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
/* QM queues pointers table */
ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
/* soft reset pulse */
REG_WR(sc, QM_REG_SOFT_RESET, 1);
REG_WR(sc, QM_REG_SOFT_RESET, 0);
if (CNIC_SUPPORT(sc))
ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
if (!CHIP_REV_IS_SLOW(sc)) {
/* enable hw interrupt from doorbell Q */
REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
}
ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
if (!CHIP_IS_E1(sc)) {
REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
}
if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
if (IS_MF_AFEX(sc)) {
/*
* configure that AFEX and VLAN headers must be
* received in AFEX mode
*/
REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
} else {
/*
* Bit-map indicating which L2 hdrs may appear
* after the basic Ethernet header
*/
REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
}
}
ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
if (!CHIP_IS_E1x(sc)) {
/* reset VFC memories */
REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
VFC_MEMORIES_RST_REG_CAM_RST |
VFC_MEMORIES_RST_REG_RAM_RST);
REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
VFC_MEMORIES_RST_REG_CAM_RST |
VFC_MEMORIES_RST_REG_RAM_RST);
DELAY(20000);
}
ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
/* sync semi rtc */
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
0x80000000);
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
0x80000000);
ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
if (!CHIP_IS_E1x(sc)) {
if (IS_MF_AFEX(sc)) {
/*
* configure that AFEX and VLAN headers must be
* sent in AFEX mode
*/
REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
} else {
REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
}
}
REG_WR(sc, SRC_REG_SOFT_RST, 1);
ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
if (CNIC_SUPPORT(sc)) {
REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
}
REG_WR(sc, SRC_REG_SOFT_RST, 0);
if (sizeof(union cdu_context) != 1024) {
/* we currently assume that a context is 1024 bytes */
BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
(long)sizeof(union cdu_context));
}
ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
val = (4 << 24) + (0 << 12) + 1024;
REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
/* enable context validation interrupt from CFC */
REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
/* set the thresholds to prevent CFC/CDU race */
REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
}
ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
/* Reset PCIE errors for debug */
REG_WR(sc, 0x2814, 0xffffffff);
REG_WR(sc, 0x3820, 0xffffffff);
if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
(PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
(PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
(PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
}
ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
if (!CHIP_IS_E1(sc)) {
/* in E3 this done in per-port section */
if (!CHIP_IS_E3(sc))
REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
}
if (CHIP_IS_E1H(sc)) {
/* not applicable for E2 (and above ...) */
REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
}
if (CHIP_REV_IS_SLOW(sc)) {
DELAY(200000);
}
/* finish CFC init */
val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
if (val != 1) {
BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
return (-1);
}
val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
if (val != 1) {
BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
return (-1);
}
val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
if (val != 1) {
BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
return (-1);
}
REG_WR(sc, CFC_REG_DEBUG0, 0);
if (CHIP_IS_E1(sc)) {
/* read NIG statistic to see if this is our first up since powerup */
bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
val = *BXE_SP(sc, wb_data[0]);
/* do internal memory self test */
if ((val == 0) && bxe_int_mem_test(sc)) {
BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
return (-1);
}
}
bxe_setup_fan_failure_detection(sc);
/* clear PXP2 attentions */
REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
bxe_enable_blocks_attention(sc);
if (!CHIP_REV_IS_SLOW(sc)) {
ecore_enable_blocks_parity(sc);
}
if (!BXE_NOMCP(sc)) {
if (CHIP_IS_E1x(sc)) {
bxe_common_init_phy(sc);
}
}
return (0);
}
/**
* bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
*
* @sc: driver handle
*/
static int
bxe_init_hw_common_chip(struct bxe_softc *sc)
{
int rc = bxe_init_hw_common(sc);
if (rc) {
BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
return (rc);
}
/* In E2 2-PORT mode, same ext phy is used for the two paths */
if (!BXE_NOMCP(sc)) {
bxe_common_init_phy(sc);
}
return (0);
}
static int
bxe_init_hw_port(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
uint32_t low, high;
uint32_t val;
BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
ecore_init_block(sc, BLOCK_MISC, init_phase);
ecore_init_block(sc, BLOCK_PXP, init_phase);
ecore_init_block(sc, BLOCK_PXP2, init_phase);
/*
* Timers bug workaround: disables the pf_master bit in pglue at
* common phase, we need to enable it here before any dmae access are
* attempted. Therefore we manually added the enable-master to the
* port phase (it also happens in the function phase)
*/
if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
}
ecore_init_block(sc, BLOCK_ATC, init_phase);
ecore_init_block(sc, BLOCK_DMAE, init_phase);
ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
ecore_init_block(sc, BLOCK_QM, init_phase);
ecore_init_block(sc, BLOCK_TCM, init_phase);
ecore_init_block(sc, BLOCK_UCM, init_phase);
ecore_init_block(sc, BLOCK_CCM, init_phase);
ecore_init_block(sc, BLOCK_XCM, init_phase);
/* QM cid (connection) count */
ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
if (CNIC_SUPPORT(sc)) {
ecore_init_block(sc, BLOCK_TM, init_phase);
REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
}
ecore_init_block(sc, BLOCK_DORQ, init_phase);
ecore_init_block(sc, BLOCK_BRB1, init_phase);
if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
if (IS_MF(sc)) {
low = (BXE_ONE_PORT(sc) ? 160 : 246);
} else if (sc->mtu > 4096) {
if (BXE_ONE_PORT(sc)) {
low = 160;
} else {
val = sc->mtu;
/* (24*1024 + val*4)/256 */
low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
}
} else {
low = (BXE_ONE_PORT(sc) ? 80 : 160);
}
high = (low + 56); /* 14*1024/256 */
REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
}
if (CHIP_IS_MODE_4_PORT(sc)) {
REG_WR(sc, SC_PORT(sc) ?
BRB1_REG_MAC_GUARANTIED_1 :
BRB1_REG_MAC_GUARANTIED_0, 40);
}
ecore_init_block(sc, BLOCK_PRS, init_phase);
if (CHIP_IS_E3B0(sc)) {
if (IS_MF_AFEX(sc)) {
/* configure headers for AFEX mode */
REG_WR(sc, SC_PORT(sc) ?
PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
REG_WR(sc, SC_PORT(sc) ?
PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
REG_WR(sc, SC_PORT(sc) ?
PRS_REG_MUST_HAVE_HDRS_PORT_1 :
PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
} else {
/* Ovlan exists only if we are in multi-function +
* switch-dependent mode, in switch-independent there
* is no ovlan headers
*/
REG_WR(sc, SC_PORT(sc) ?
PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
PRS_REG_HDRS_AFTER_BASIC_PORT_0,
(sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
}
}
ecore_init_block(sc, BLOCK_TSDM, init_phase);
ecore_init_block(sc, BLOCK_CSDM, init_phase);
ecore_init_block(sc, BLOCK_USDM, init_phase);
ecore_init_block(sc, BLOCK_XSDM, init_phase);
ecore_init_block(sc, BLOCK_TSEM, init_phase);
ecore_init_block(sc, BLOCK_USEM, init_phase);
ecore_init_block(sc, BLOCK_CSEM, init_phase);
ecore_init_block(sc, BLOCK_XSEM, init_phase);
ecore_init_block(sc, BLOCK_UPB, init_phase);
ecore_init_block(sc, BLOCK_XPB, init_phase);
ecore_init_block(sc, BLOCK_PBF, init_phase);
if (CHIP_IS_E1x(sc)) {
/* configure PBF to work without PAUSE mtu 9000 */
REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
/* update threshold */
REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
/* update init credit */
REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
/* probe changes */
REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
DELAY(50);
REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
}
if (CNIC_SUPPORT(sc)) {
ecore_init_block(sc, BLOCK_SRC, init_phase);
}
ecore_init_block(sc, BLOCK_CDU, init_phase);
ecore_init_block(sc, BLOCK_CFC, init_phase);
if (CHIP_IS_E1(sc)) {
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
}
ecore_init_block(sc, BLOCK_HC, init_phase);
ecore_init_block(sc, BLOCK_IGU, init_phase);
ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
/* init aeu_mask_attn_func_0/1:
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
* bits 4-7 are used for "per vn group attention" */
val = IS_MF(sc) ? 0xF7 : 0x7;
/* Enable DCBX attention for all but E1 */
val |= CHIP_IS_E1(sc) ? 0 : 0x10;
REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
ecore_init_block(sc, BLOCK_NIG, init_phase);
if (!CHIP_IS_E1x(sc)) {
/* Bit-map indicating which L2 hdrs may appear after the
* basic Ethernet header
*/
if (IS_MF_AFEX(sc)) {
REG_WR(sc, SC_PORT(sc) ?
NIG_REG_P1_HDRS_AFTER_BASIC :
NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
} else {
REG_WR(sc, SC_PORT(sc) ?
NIG_REG_P1_HDRS_AFTER_BASIC :
NIG_REG_P0_HDRS_AFTER_BASIC,
IS_MF_SD(sc) ? 7 : 6);
}
if (CHIP_IS_E3(sc)) {
REG_WR(sc, SC_PORT(sc) ?
NIG_REG_LLH1_MF_MODE :
NIG_REG_LLH_MF_MODE, IS_MF(sc));
}
}
if (!CHIP_IS_E3(sc)) {
REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
}
if (!CHIP_IS_E1(sc)) {
/* 0x2 disable mf_ov, 0x1 enable */
REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
(IS_MF_SD(sc) ? 0x1 : 0x2));
if (!CHIP_IS_E1x(sc)) {
val = 0;
switch (sc->devinfo.mf_info.mf_mode) {
case MULTI_FUNCTION_SD:
val = 1;
break;
case MULTI_FUNCTION_SI:
case MULTI_FUNCTION_AFEX:
val = 2;
break;
}
REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
NIG_REG_LLH0_CLS_TYPE), val);
}
REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
}
/* If SPIO5 is set to generate interrupts, enable it for this port */
val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
if (val & MISC_SPIO_SPIO5) {
uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
val = REG_RD(sc, reg_addr);
val |= AEU_INPUTS_ATTN_BITS_SPIO5;
REG_WR(sc, reg_addr, val);
}
return (0);
}
static uint32_t
bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
uint32_t reg,
uint32_t expected,
uint32_t poll_count)
{
uint32_t cur_cnt = poll_count;
uint32_t val;
while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
DELAY(FLR_WAIT_INTERVAL);
}
return (val);
}
static int
bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
uint32_t reg,
char *msg,
uint32_t poll_cnt)
{
uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
if (val != 0) {
BLOGE(sc, "%s usage count=%d\n", msg, val);
return (1);
}
return (0);
}
/* Common routines with VF FLR cleanup */
static uint32_t
bxe_flr_clnup_poll_count(struct bxe_softc *sc)
{
/* adjust polling timeout */
if (CHIP_REV_IS_EMUL(sc)) {
return (FLR_POLL_CNT * 2000);
}
if (CHIP_REV_IS_FPGA(sc)) {
return (FLR_POLL_CNT * 120);
}
return (FLR_POLL_CNT);
}
static int
bxe_poll_hw_usage_counters(struct bxe_softc *sc,
uint32_t poll_cnt)
{
/* wait for CFC PF usage-counter to zero (includes all the VFs) */
if (bxe_flr_clnup_poll_hw_counter(sc,
CFC_REG_NUM_LCIDS_INSIDE_PF,
"CFC PF usage counter timed out",
poll_cnt)) {
return (1);
}
/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
if (bxe_flr_clnup_poll_hw_counter(sc,
DORQ_REG_PF_USAGE_CNT,
"DQ PF usage counter timed out",
poll_cnt)) {
return (1);
}
/* Wait for QM PF usage-counter to zero (until DQ cleanup) */
if (bxe_flr_clnup_poll_hw_counter(sc,
QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
"QM PF usage counter timed out",
poll_cnt)) {
return (1);
}
/* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
if (bxe_flr_clnup_poll_hw_counter(sc,
TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
"Timers VNIC usage counter timed out",
poll_cnt)) {
return (1);
}
if (bxe_flr_clnup_poll_hw_counter(sc,
TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
"Timers NUM_SCANS usage counter timed out",
poll_cnt)) {
return (1);
}
/* Wait DMAE PF usage counter to zero */
if (bxe_flr_clnup_poll_hw_counter(sc,
dmae_reg_go_c[INIT_DMAE_C(sc)],
"DMAE dommand register timed out",
poll_cnt)) {
return (1);
}
return (0);
}
#define OP_GEN_PARAM(param) \
(((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
#define OP_GEN_TYPE(type) \
(((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
#define OP_GEN_AGG_VECT(index) \
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
static int
bxe_send_final_clnup(struct bxe_softc *sc,
uint8_t clnup_func,
uint32_t poll_cnt)
{
uint32_t op_gen_command = 0;
uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
int ret = 0;
if (REG_RD(sc, comp_addr)) {
BLOGE(sc, "Cleanup complete was not 0 before sending\n");
return (1);
}
op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
BLOGE(sc, "FW final cleanup did not succeed\n");
BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
(REG_RD(sc, comp_addr)));
bxe_panic(sc, ("FLR cleanup failed\n"));
return (1);
}
/* Zero completion for nxt FLR */
REG_WR(sc, comp_addr, 0);
return (ret);
}
static void
bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
struct pbf_pN_buf_regs *regs,
uint32_t poll_count)
{
uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
uint32_t cur_cnt = poll_count;
crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
crd = crd_start = REG_RD(sc, regs->crd);
init_crd = REG_RD(sc, regs->init_crd);
BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
while ((crd != init_crd) &&
((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
(init_crd - crd_start))) {
if (cur_cnt--) {
DELAY(FLR_WAIT_INTERVAL);
crd = REG_RD(sc, regs->crd);
crd_freed = REG_RD(sc, regs->crd_freed);
} else {
BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
break;
}
}
BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}
static void
bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
struct pbf_pN_cmd_regs *regs,
uint32_t poll_count)
{
uint32_t occup, to_free, freed, freed_start;
uint32_t cur_cnt = poll_count;
occup = to_free = REG_RD(sc, regs->lines_occup);
freed = freed_start = REG_RD(sc, regs->lines_freed);
BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
while (occup &&
((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
if (cur_cnt--) {
DELAY(FLR_WAIT_INTERVAL);
occup = REG_RD(sc, regs->lines_occup);
freed = REG_RD(sc, regs->lines_freed);
} else {
BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
break;
}
}
BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}
static void
bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
{
struct pbf_pN_cmd_regs cmd_regs[] = {
{0, (CHIP_IS_E3B0(sc)) ?
PBF_REG_TQ_OCCUPANCY_Q0 :
PBF_REG_P0_TQ_OCCUPANCY,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_TQ_LINES_FREED_CNT_Q0 :
PBF_REG_P0_TQ_LINES_FREED_CNT},
{1, (CHIP_IS_E3B0(sc)) ?
PBF_REG_TQ_OCCUPANCY_Q1 :
PBF_REG_P1_TQ_OCCUPANCY,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_TQ_LINES_FREED_CNT_Q1 :
PBF_REG_P1_TQ_LINES_FREED_CNT},
{4, (CHIP_IS_E3B0(sc)) ?
PBF_REG_TQ_OCCUPANCY_LB_Q :
PBF_REG_P4_TQ_OCCUPANCY,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
PBF_REG_P4_TQ_LINES_FREED_CNT}
};
struct pbf_pN_buf_regs buf_regs[] = {
{0, (CHIP_IS_E3B0(sc)) ?
PBF_REG_INIT_CRD_Q0 :
PBF_REG_P0_INIT_CRD ,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_CREDIT_Q0 :
PBF_REG_P0_CREDIT,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
{1, (CHIP_IS_E3B0(sc)) ?
PBF_REG_INIT_CRD_Q1 :
PBF_REG_P1_INIT_CRD,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_CREDIT_Q1 :
PBF_REG_P1_CREDIT,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
{4, (CHIP_IS_E3B0(sc)) ?
PBF_REG_INIT_CRD_LB_Q :
PBF_REG_P4_INIT_CRD,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_CREDIT_LB_Q :
PBF_REG_P4_CREDIT,
(CHIP_IS_E3B0(sc)) ?
PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
};
int i;
/* Verify the command queues are flushed P0, P1, P4 */
for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
}
/* Verify the transmission buffers are flushed P0, P1, P4 */
for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
}
}
static void
bxe_hw_enable_status(struct bxe_softc *sc)
{
uint32_t val;
val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
val = REG_RD(sc, PBF_REG_DISABLE_PF);
BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
}
static int
bxe_pf_flr_clnup(struct bxe_softc *sc)
{
uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
/* Re-enable PF target read access */
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Poll HW usage counters */
BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
return (-1);
}
/* Zero the igu 'trailing edge' and 'leading edge' */
/* Send the FW cleanup command */
if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
return (-1);
}
/* ATC cleanup */
/* Verify TX hw is flushed */
bxe_tx_hw_flushed(sc, poll_cnt);
/* Wait 100ms (not adjusted according to platform) */
DELAY(100000);
/* Verify no pending pci transactions */
if (bxe_is_pcie_pending(sc)) {
BLOGE(sc, "PCIE Transactions still pending\n");
}
/* Debug */
bxe_hw_enable_status(sc);
/*
* Master enable - Due to WB DMAE writes performed before this
* register is re-initialized as part of the regular function init
*/
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
return (0);
}
static int
bxe_init_hw_func(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
int func = SC_FUNC(sc);
int init_phase = PHASE_PF0 + func;
struct ecore_ilt *ilt = sc->ilt;
uint16_t cdu_ilt_start;
uint32_t addr, val;
uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
int i, main_mem_width, rc;
BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
/* FLR cleanup */
if (!CHIP_IS_E1x(sc)) {
rc = bxe_pf_flr_clnup(sc);
if (rc) {
BLOGE(sc, "FLR cleanup failed!\n");
// XXX bxe_fw_dump(sc);
// XXX bxe_idle_chk(sc);
return (rc);
}
}
/* set MSI reconfigure capability */
if (sc->devinfo.int_block == INT_BLOCK_HC) {
addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
val = REG_RD(sc, addr);
val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
REG_WR(sc, addr, val);
}
ecore_init_block(sc, BLOCK_PXP, init_phase);
ecore_init_block(sc, BLOCK_PXP2, init_phase);
ilt = sc->ilt;
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(sc); i++) {
ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
sc->context[i].vcxt_dma.paddr;
ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
}
ecore_ilt_init_op(sc, INITOP_SET);
/* Set NIC mode */
REG_WR(sc, PRS_REG_NIC_MODE, 1);
BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
if (!CHIP_IS_E1x(sc)) {
uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
/* Turn on a single ISR mode in IGU if driver is going to use
* INT#x or MSI
*/
if (sc->interrupt_mode != INTR_MODE_MSIX) {
pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
}
/*
* Timers workaround bug: function init part.
* Need to wait 20msec after initializing ILT,
* needed to make sure there are no requests in
* one of the PXP internal queues with "old" ILT addresses
*/
DELAY(20000);
/*
* Master enable - Due to WB DMAE writes performed before this
* register is re-initialized as part of the regular function
* init
*/
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
/* Enable the function in IGU */
REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
}
sc->dmae_ready = 1;
ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
if (!CHIP_IS_E1x(sc))
REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
ecore_init_block(sc, BLOCK_ATC, init_phase);
ecore_init_block(sc, BLOCK_DMAE, init_phase);
ecore_init_block(sc, BLOCK_NIG, init_phase);
ecore_init_block(sc, BLOCK_SRC, init_phase);
ecore_init_block(sc, BLOCK_MISC, init_phase);
ecore_init_block(sc, BLOCK_TCM, init_phase);
ecore_init_block(sc, BLOCK_UCM, init_phase);
ecore_init_block(sc, BLOCK_CCM, init_phase);
ecore_init_block(sc, BLOCK_XCM, init_phase);
ecore_init_block(sc, BLOCK_TSEM, init_phase);
ecore_init_block(sc, BLOCK_USEM, init_phase);
ecore_init_block(sc, BLOCK_CSEM, init_phase);
ecore_init_block(sc, BLOCK_XSEM, init_phase);
if (!CHIP_IS_E1x(sc))
REG_WR(sc, QM_REG_PF_EN, 1);
if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
}
ecore_init_block(sc, BLOCK_QM, init_phase);
ecore_init_block(sc, BLOCK_TM, init_phase);
ecore_init_block(sc, BLOCK_DORQ, init_phase);
bxe_iov_init_dq(sc);
ecore_init_block(sc, BLOCK_BRB1, init_phase);
ecore_init_block(sc, BLOCK_PRS, init_phase);
ecore_init_block(sc, BLOCK_TSDM, init_phase);
ecore_init_block(sc, BLOCK_CSDM, init_phase);
ecore_init_block(sc, BLOCK_USDM, init_phase);
ecore_init_block(sc, BLOCK_XSDM, init_phase);
ecore_init_block(sc, BLOCK_UPB, init_phase);
ecore_init_block(sc, BLOCK_XPB, init_phase);
ecore_init_block(sc, BLOCK_PBF, init_phase);
if (!CHIP_IS_E1x(sc))
REG_WR(sc, PBF_REG_DISABLE_PF, 0);
ecore_init_block(sc, BLOCK_CDU, init_phase);
ecore_init_block(sc, BLOCK_CFC, init_phase);
if (!CHIP_IS_E1x(sc))
REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
if (IS_MF(sc)) {
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
}
ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
/* HC init per function */
if (sc->devinfo.int_block == INT_BLOCK_HC) {
if (CHIP_IS_E1H(sc)) {
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
}
ecore_init_block(sc, BLOCK_HC, init_phase);
} else {
int num_segs, sb_idx, prod_offset;
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
if (!CHIP_IS_E1x(sc)) {
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
}
ecore_init_block(sc, BLOCK_IGU, init_phase);
if (!CHIP_IS_E1x(sc)) {
int dsb_idx = 0;
/**
* Producer memory:
* E2 mode: address 0-135 match to the mapping memory;
* 136 - PF0 default prod; 137 - PF1 default prod;
* 138 - PF2 default prod; 139 - PF3 default prod;
* 140 - PF0 attn prod; 141 - PF1 attn prod;
* 142 - PF2 attn prod; 143 - PF3 attn prod;
* 144-147 reserved.
*
* E1.5 mode - In backward compatible mode;
* for non default SB; each even line in the memory
* holds the U producer and each odd line hold
* the C producer. The first 128 producers are for
* NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
* producers are for the DSB for each PF.
* Each PF has five segments: (the order inside each
* segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
* 132-135 C prods; 136-139 X prods; 140-143 T prods;
* 144-147 attn prods;
*/
/* non-default-status-blocks */
num_segs = CHIP_INT_MODE_IS_BC(sc) ?
IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
prod_offset = (sc->igu_base_sb + sb_idx) *
num_segs;
for (i = 0; i < num_segs; i++) {
addr = IGU_REG_PROD_CONS_MEMORY +
(prod_offset + i) * 4;
REG_WR(sc, addr, 0);
}
/* send consumer update with value 0 */
bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
USTORM_ID, 0, IGU_INT_NOP, 1);
bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
}
/* default-status-blocks */
num_segs = CHIP_INT_MODE_IS_BC(sc) ?
IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
if (CHIP_IS_MODE_4_PORT(sc))
dsb_idx = SC_FUNC(sc);
else
dsb_idx = SC_VN(sc);
prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
IGU_BC_BASE_DSB_PROD + dsb_idx :
IGU_NORM_BASE_DSB_PROD + dsb_idx);
/*
* igu prods come in chunks of E1HVN_MAX (4) -
* does not matters what is the current chip mode
*/
for (i = 0; i < (num_segs * E1HVN_MAX);
i += E1HVN_MAX) {
addr = IGU_REG_PROD_CONS_MEMORY +
(prod_offset + i)*4;
REG_WR(sc, addr, 0);
}
/* send consumer update with 0 */
if (CHIP_INT_MODE_IS_BC(sc)) {
bxe_ack_sb(sc, sc->igu_dsb_id,
USTORM_ID, 0, IGU_INT_NOP, 1);
bxe_ack_sb(sc, sc->igu_dsb_id,
CSTORM_ID, 0, IGU_INT_NOP, 1);
bxe_ack_sb(sc, sc->igu_dsb_id,
XSTORM_ID, 0, IGU_INT_NOP, 1);
bxe_ack_sb(sc, sc->igu_dsb_id,
TSTORM_ID, 0, IGU_INT_NOP, 1);
bxe_ack_sb(sc, sc->igu_dsb_id,
ATTENTION_ID, 0, IGU_INT_NOP, 1);
} else {
bxe_ack_sb(sc, sc->igu_dsb_id,
USTORM_ID, 0, IGU_INT_NOP, 1);
bxe_ack_sb(sc, sc->igu_dsb_id,
ATTENTION_ID, 0, IGU_INT_NOP, 1);
}
bxe_igu_clear_sb(sc, sc->igu_dsb_id);
/* !!! these should become driver const once
rf-tool supports split-68 const */
REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
}
}
/* Reset PCIE errors for debug */
REG_WR(sc, 0x2114, 0xffffffff);
REG_WR(sc, 0x2120, 0xffffffff);
if (CHIP_IS_E1x(sc)) {
main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
main_mem_base = HC_REG_MAIN_MEMORY +
SC_PORT(sc) * (main_mem_size * 4);
main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
main_mem_width = 8;
val = REG_RD(sc, main_mem_prty_clr);
if (val) {
BLOGD(sc, DBG_LOAD,
"Parity errors in HC block during function init (0x%x)!\n",
val);
}
/* Clear "false" parity errors in MSI-X table */
for (i = main_mem_base;
i < main_mem_base + main_mem_size * 4;
i += main_mem_width) {
bxe_read_dmae(sc, i, main_mem_width / 4);
bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
i, main_mem_width / 4);
}
/* Clear HC parity attention */
REG_RD(sc, main_mem_prty_clr);
}
#if 1
/* Enable STORMs SP logging */
REG_WR8(sc, BAR_USTRORM_INTMEM +
USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
REG_WR8(sc, BAR_TSTRORM_INTMEM +
TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
REG_WR8(sc, BAR_CSTRORM_INTMEM +
CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
REG_WR8(sc, BAR_XSTRORM_INTMEM +
XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
#endif
elink_phy_probe(&sc->link_params);
return (0);
}
static void
bxe_link_reset(struct bxe_softc *sc)
{
if (!BXE_NOMCP(sc)) {
bxe_acquire_phy_lock(sc);
elink_lfa_reset(&sc->link_params, &sc->link_vars);
bxe_release_phy_lock(sc);
} else {
if (!CHIP_REV_IS_SLOW(sc)) {
BLOGW(sc, "Bootcode is missing - cannot reset link\n");
}
}
}
static void
bxe_reset_port(struct bxe_softc *sc)
{
int port = SC_PORT(sc);
uint32_t val;
ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
/* reset physical Link */
bxe_link_reset(sc);
REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
/* Do not rcv packets to BRB */
REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
/* Do not direct rcv packets that are not for MCP to the BRB */
REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
/* Configure AEU */
REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
DELAY(100000);
/* Check for BRB port occupancy */
val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
if (val) {
BLOGD(sc, DBG_LOAD,
"BRB1 is not empty, %d blocks are occupied\n", val);
}
/* TODO: Close Doorbell port? */
}
static void
bxe_ilt_wr(struct bxe_softc *sc,
uint32_t index,
bus_addr_t addr)
{
int reg;
uint32_t wb_write[2];
if (CHIP_IS_E1(sc)) {
reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
} else {
reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
}
wb_write[0] = ONCHIP_ADDR1(addr);
wb_write[1] = ONCHIP_ADDR2(addr);
REG_WR_DMAE(sc, reg, wb_write, 2);
}
static void
bxe_clear_func_ilt(struct bxe_softc *sc,
uint32_t func)
{
uint32_t i, base = FUNC_ILT_BASE(func);
for (i = base; i < base + ILT_PER_FUNC; i++) {
bxe_ilt_wr(sc, i, 0);
}
}
static void
bxe_reset_func(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int port = SC_PORT(sc);
int func = SC_FUNC(sc);
int i;
/* Disable the function in the FW */
REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
/* FP SBs */
FOR_EACH_ETH_QUEUE(sc, i) {
fp = &sc->fp[i];
REG_WR8(sc, BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
SB_DISABLED);
}
/* SP SB */
REG_WR8(sc, BAR_CSTRORM_INTMEM +
CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
SB_DISABLED);
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
}
/* Configure IGU */
if (sc->devinfo.int_block == INT_BLOCK_HC) {
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
} else {
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
}
if (CNIC_LOADED(sc)) {
/* Disable Timer scan */
REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
/*
* Wait for at least 10ms and up to 2 second for the timers
* scan to complete
*/
for (i = 0; i < 200; i++) {
DELAY(10000);
if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
break;
}
}
/* Clear ILT */
bxe_clear_func_ilt(sc, func);
/*
* Timers workaround bug for E2: if this is vnic-3,
* we need to set the entire ilt range for this timers.
*/
if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
struct ilt_client_info ilt_cli;
/* use dummy TM client */
memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
ilt_cli.start = 0;
ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
ilt_cli.client_num = ILT_CLIENT_TM;
ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
}
/* this assumes that reset_port() called before reset_func()*/
if (!CHIP_IS_E1x(sc)) {
bxe_pf_disable(sc);
}
sc->dmae_ready = 0;
}
static int
bxe_gunzip_init(struct bxe_softc *sc)
{
return (0);
}
static void
bxe_gunzip_end(struct bxe_softc *sc)
{
return;
}
static int
bxe_init_firmware(struct bxe_softc *sc)
{
if (CHIP_IS_E1(sc)) {
ecore_init_e1_firmware(sc);
sc->iro_array = e1_iro_arr;
} else if (CHIP_IS_E1H(sc)) {
ecore_init_e1h_firmware(sc);
sc->iro_array = e1h_iro_arr;
} else if (!CHIP_IS_E1x(sc)) {
ecore_init_e2_firmware(sc);
sc->iro_array = e2_iro_arr;
} else {
BLOGE(sc, "Unsupported chip revision\n");
return (-1);
}
return (0);
}
static void
bxe_release_firmware(struct bxe_softc *sc)
{
/* Do nothing */
return;
}
static int
ecore_gunzip(struct bxe_softc *sc,
const uint8_t *zbuf,
int len)
{
/* XXX : Implement... */
BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
return (FALSE);
}
static void
ecore_reg_wr_ind(struct bxe_softc *sc,
uint32_t addr,
uint32_t val)
{
bxe_reg_wr_ind(sc, addr, val);
}
static void
ecore_write_dmae_phys_len(struct bxe_softc *sc,
bus_addr_t phys_addr,
uint32_t addr,
uint32_t len)
{
bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
}
void
ecore_storm_memset_struct(struct bxe_softc *sc,
uint32_t addr,
size_t size,
uint32_t *data)
{
uint8_t i;
for (i = 0; i < size/4; i++) {
REG_WR(sc, addr + (i * 4), data[i]);
}
}
/*
* character device - ioctl interface definitions
*/
#include "bxe_dump.h"
#include "bxe_ioctl.h"
#include <sys/conf.h>
static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td);
static struct cdevsw bxe_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = bxe_eioctl,
.d_name = "bxecnic",
};
#define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
#define DUMP_ALL_PRESETS 0x1FFF
#define DUMP_MAX_PRESETS 13
#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
#define IS_REG_IN_PRESET(presets, idx) \
((presets & (1 << (idx-1))) == (1 << (idx-1)))
static int
bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
{
if (CHIP_IS_E1(sc))
return dump_num_registers[0][preset-1];
else if (CHIP_IS_E1H(sc))
return dump_num_registers[1][preset-1];
else if (CHIP_IS_E2(sc))
return dump_num_registers[2][preset-1];
else if (CHIP_IS_E3A0(sc))
return dump_num_registers[3][preset-1];
else if (CHIP_IS_E3B0(sc))
return dump_num_registers[4][preset-1];
else
return 0;
}
static int
bxe_get_total_regs_len32(struct bxe_softc *sc)
{
uint32_t preset_idx;
int regdump_len32 = 0;
/* Calculate the total preset regs length */
for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
}
return regdump_len32;
}
static const uint32_t *
__bxe_get_page_addr_ar(struct bxe_softc *sc)
{
if (CHIP_IS_E2(sc))
return page_vals_e2;
else if (CHIP_IS_E3(sc))
return page_vals_e3;
else
return NULL;
}
static uint32_t
__bxe_get_page_reg_num(struct bxe_softc *sc)
{
if (CHIP_IS_E2(sc))
return PAGE_MODE_VALUES_E2;
else if (CHIP_IS_E3(sc))
return PAGE_MODE_VALUES_E3;
else
return 0;
}
static const uint32_t *
__bxe_get_page_write_ar(struct bxe_softc *sc)
{
if (CHIP_IS_E2(sc))
return page_write_regs_e2;
else if (CHIP_IS_E3(sc))
return page_write_regs_e3;
else
return NULL;
}
static uint32_t
__bxe_get_page_write_num(struct bxe_softc *sc)
{
if (CHIP_IS_E2(sc))
return PAGE_WRITE_REGS_E2;
else if (CHIP_IS_E3(sc))
return PAGE_WRITE_REGS_E3;
else
return 0;
}
static const struct reg_addr *
__bxe_get_page_read_ar(struct bxe_softc *sc)
{
if (CHIP_IS_E2(sc))
return page_read_regs_e2;
else if (CHIP_IS_E3(sc))
return page_read_regs_e3;
else
return NULL;
}
static uint32_t
__bxe_get_page_read_num(struct bxe_softc *sc)
{
if (CHIP_IS_E2(sc))
return PAGE_READ_REGS_E2;
else if (CHIP_IS_E3(sc))
return PAGE_READ_REGS_E3;
else
return 0;
}
static bool
bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
{
if (CHIP_IS_E1(sc))
return IS_E1_REG(reg_info->chips);
else if (CHIP_IS_E1H(sc))
return IS_E1H_REG(reg_info->chips);
else if (CHIP_IS_E2(sc))
return IS_E2_REG(reg_info->chips);
else if (CHIP_IS_E3A0(sc))
return IS_E3A0_REG(reg_info->chips);
else if (CHIP_IS_E3B0(sc))
return IS_E3B0_REG(reg_info->chips);
else
return 0;
}
static bool
bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
{
if (CHIP_IS_E1(sc))
return IS_E1_REG(wreg_info->chips);
else if (CHIP_IS_E1H(sc))
return IS_E1H_REG(wreg_info->chips);
else if (CHIP_IS_E2(sc))
return IS_E2_REG(wreg_info->chips);
else if (CHIP_IS_E3A0(sc))
return IS_E3A0_REG(wreg_info->chips);
else if (CHIP_IS_E3B0(sc))
return IS_E3B0_REG(wreg_info->chips);
else
return 0;
}
/**
* bxe_read_pages_regs - read "paged" registers
*
* @bp device handle
* @p output buffer
*
* Reads "paged" memories: memories that may only be read by first writing to a
* specific address ("write address") and then reading from a specific address
* ("read address"). There may be more than one write address per "page" and
* more than one read address per write address.
*/
static void
bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
{
uint32_t i, j, k, n;
/* addresses of the paged registers */
const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
/* number of paged registers */
int num_pages = __bxe_get_page_reg_num(sc);
/* write addresses */
const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
/* number of write addresses */
int write_num = __bxe_get_page_write_num(sc);
/* read addresses info */
const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
/* number of read addresses */
int read_num = __bxe_get_page_read_num(sc);
uint32_t addr, size;
for (i = 0; i < num_pages; i++) {
for (j = 0; j < write_num; j++) {
REG_WR(sc, write_addr[j], page_addr[i]);
for (k = 0; k < read_num; k++) {
if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
size = read_addr[k].size;
for (n = 0; n < size; n++) {
addr = read_addr[k].addr + n*4;
*p++ = REG_RD(sc, addr);
}
}
}
}
}
return;
}
static int
bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
{
uint32_t i, j, addr;
const struct wreg_addr *wreg_addr_p = NULL;
if (CHIP_IS_E1(sc))
wreg_addr_p = &wreg_addr_e1;
else if (CHIP_IS_E1H(sc))
wreg_addr_p = &wreg_addr_e1h;
else if (CHIP_IS_E2(sc))
wreg_addr_p = &wreg_addr_e2;
else if (CHIP_IS_E3A0(sc))
wreg_addr_p = &wreg_addr_e3;
else if (CHIP_IS_E3B0(sc))
wreg_addr_p = &wreg_addr_e3b0;
else
return (-1);
/* Read the idle_chk registers */
for (i = 0; i < IDLE_REGS_COUNT; i++) {
if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
for (j = 0; j < idle_reg_addrs[i].size; j++)
*p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
}
}
/* Read the regular registers */
for (i = 0; i < REGS_COUNT; i++) {
if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
}
}
/* Read the CAM registers */
if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
for (i = 0; i < wreg_addr_p->size; i++) {
*p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
/* In case of wreg_addr register, read additional
registers from read_regs array
*/
for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
addr = *(wreg_addr_p->read_regs);
*p++ = REG_RD(sc, addr + j*4);
}
}
}
/* Paged registers are supported in E2 & E3 only */
if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
/* Read "paged" registers */
bxe_read_pages_regs(sc, p, preset);
}
return 0;
}
int
bxe_grc_dump(struct bxe_softc *sc)
{
int rval = 0;
uint32_t preset_idx;
uint8_t *buf;
uint32_t size;
struct dump_header *d_hdr;
uint32_t i;
uint32_t reg_val;
uint32_t reg_addr;
uint32_t cmd_offset;
struct ecore_ilt *ilt = SC_ILT(sc);
struct bxe_fastpath *fp;
struct ilt_client_info *ilt_cli;
int grc_dump_size;
if (sc->grcdump_done || sc->grcdump_started)
return (rval);
sc->grcdump_started = 1;
BLOGI(sc, "Started collecting grcdump\n");
grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
sizeof(struct dump_header);
sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
if (sc->grc_dump == NULL) {
BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
return(ENOMEM);
}
/* Disable parity attentions as long as following dump may
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
/* Disable parity on path 0 */
bxe_pretend_func(sc, 0);
ecore_disable_blocks_parity(sc);
/* Disable parity on path 1 */
bxe_pretend_func(sc, 1);
ecore_disable_blocks_parity(sc);
/* Return to current function */
bxe_pretend_func(sc, SC_ABS_FUNC(sc));
buf = sc->grc_dump;
d_hdr = sc->grc_dump;
d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
d_hdr->version = BNX2X_DUMP_VERSION;
d_hdr->preset = DUMP_ALL_PRESETS;
if (CHIP_IS_E1(sc)) {
d_hdr->dump_meta_data = DUMP_CHIP_E1;
} else if (CHIP_IS_E1H(sc)) {
d_hdr->dump_meta_data = DUMP_CHIP_E1H;
} else if (CHIP_IS_E2(sc)) {
d_hdr->dump_meta_data = DUMP_CHIP_E2 |
(BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
} else if (CHIP_IS_E3A0(sc)) {
d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
(BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
} else if (CHIP_IS_E3B0(sc)) {
d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
(BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
}
buf += sizeof(struct dump_header);
for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
/* Skip presets with IOR */
if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
(preset_idx == 11))
continue;
rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
if (rval)
break;
size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
buf += size;
}
bxe_pretend_func(sc, 0);
ecore_clear_blocks_parity(sc);
ecore_enable_blocks_parity(sc);
bxe_pretend_func(sc, 1);
ecore_clear_blocks_parity(sc);
ecore_enable_blocks_parity(sc);
/* Return to current function */
bxe_pretend_func(sc, SC_ABS_FUNC(sc));
if(sc->state == BXE_STATE_OPEN) {
if(sc->fw_stats_req != NULL) {
BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
(uintmax_t)sc->fw_stats_req_mapping,
(uintmax_t)sc->fw_stats_data_mapping,
sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
}
if(sc->def_sb != NULL) {
BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
(void *)sc->def_sb_dma.paddr, sc->def_sb,
sizeof(struct host_sp_status_block));
}
if(sc->eq_dma.vaddr != NULL) {
BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
}
if(sc->sp_dma.vaddr != NULL) {
BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
sizeof(struct bxe_slowpath));
}
if(sc->spq_dma.vaddr != NULL) {
BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
}
if(sc->gz_buf_dma.vaddr != NULL) {
BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
FW_BUF_SIZE);
}
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
fp->rx_sge_dma.vaddr != NULL) {
BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
sizeof(union bxe_host_hc_status_block));
BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
}
}
if(ilt != NULL ) {
ilt_cli = &ilt->clients[1];
if(ilt->lines != NULL) {
for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
}
}
}
cmd_offset = DMAE_REG_CMD_MEM;
for (i = 0; i < 224; i++) {
reg_addr = (cmd_offset +(i * 4));
reg_val = REG_RD(sc, reg_addr);
BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
reg_addr, reg_val);
}
}
BLOGI(sc, "Collection of grcdump done\n");
sc->grcdump_done = 1;
return(rval);
}
static int
bxe_add_cdev(struct bxe_softc *sc)
{
sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
if (sc->eeprom == NULL) {
BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
return (-1);
}
sc->ioctl_dev = make_dev(&bxe_cdevsw,
if_getdunit(sc->ifp),
UID_ROOT,
GID_WHEEL,
0600,
"%s",
if_name(sc->ifp));
if (sc->ioctl_dev == NULL) {
free(sc->eeprom, M_DEVBUF);
sc->eeprom = NULL;
return (-1);
}
sc->ioctl_dev->si_drv1 = sc;
return (0);
}
static void
bxe_del_cdev(struct bxe_softc *sc)
{
if (sc->ioctl_dev != NULL)
destroy_dev(sc->ioctl_dev);
if (sc->eeprom != NULL) {
free(sc->eeprom, M_DEVBUF);
sc->eeprom = NULL;
}
sc->ioctl_dev = NULL;
return;
}
static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
{
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
return FALSE;
return TRUE;
}
static int
bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
{
int rval = 0;
if(!bxe_is_nvram_accessible(sc)) {
BLOGW(sc, "Cannot access eeprom when interface is down\n");
return (-EAGAIN);
}
rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
return (rval);
}
static int
bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
{
int rval = 0;
if(!bxe_is_nvram_accessible(sc)) {
BLOGW(sc, "Cannot access eeprom when interface is down\n");
return (-EAGAIN);
}
rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
return (rval);
}
static int
bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
{
int rval = 0;
switch (eeprom->eeprom_cmd) {
case BXE_EEPROM_CMD_SET_EEPROM:
rval = copyin(eeprom->eeprom_data, sc->eeprom,
eeprom->eeprom_data_len);
if (rval)
break;
rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
eeprom->eeprom_data_len);
break;
case BXE_EEPROM_CMD_GET_EEPROM:
rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
eeprom->eeprom_data_len);
if (rval) {
break;
}
rval = copyout(sc->eeprom, eeprom->eeprom_data,
eeprom->eeprom_data_len);
break;
default:
rval = EINVAL;
break;
}
if (rval) {
BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval);
}
return (rval);
}
static int
bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
{
uint32_t ext_phy_config;
int port = SC_PORT(sc);
int cfg_idx = bxe_get_link_cfg_idx(sc);
dev_p->supported = sc->port.supported[cfg_idx] |
(sc->port.supported[cfg_idx ^ 1] &
(ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
dev_p->advertising = sc->port.advertising[cfg_idx];
if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
ELINK_ETH_PHY_SFP_1G_FIBER) {
dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
}
if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
!(sc->flags & BXE_MF_FUNC_DIS)) {
dev_p->duplex = sc->link_vars.duplex;
if (IS_MF(sc) && !BXE_NOMCP(sc))
dev_p->speed = bxe_get_mf_speed(sc);
else
dev_p->speed = sc->link_vars.line_speed;
} else {
dev_p->duplex = DUPLEX_UNKNOWN;
dev_p->speed = SPEED_UNKNOWN;
}
dev_p->port = bxe_media_detect(sc);
ext_phy_config = SHMEM_RD(sc,
dev_info.port_hw_config[port].external_phy_config);
if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
dev_p->phy_address = sc->port.phy_addr;
else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
else
dev_p->phy_address = 0;
if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
dev_p->autoneg = AUTONEG_ENABLE;
else
dev_p->autoneg = AUTONEG_DISABLE;
return 0;
}
static int
bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
struct bxe_softc *sc;
int rval = 0;
bxe_grcdump_t *dump = NULL;
int grc_dump_size;
bxe_drvinfo_t *drv_infop = NULL;
bxe_dev_setting_t *dev_p;
bxe_dev_setting_t dev_set;
bxe_get_regs_t *reg_p;
bxe_reg_rdw_t *reg_rdw_p;
bxe_pcicfg_rdw_t *cfg_rdw_p;
bxe_perm_mac_addr_t *mac_addr_p;
if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
return ENXIO;
dump = (bxe_grcdump_t *)data;
switch(cmd) {
case BXE_GRC_DUMP_SIZE:
dump->pci_func = sc->pcie_func;
dump->grcdump_size =
(bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
sizeof(struct dump_header);
break;
case BXE_GRC_DUMP:
grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
sizeof(struct dump_header);
if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
(dump->grcdump_size < grc_dump_size)) {
rval = EINVAL;
break;
}
if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
(!sc->grcdump_started)) {
rval = bxe_grc_dump(sc);
}
if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
(sc->grc_dump != NULL)) {
dump->grcdump_dwords = grc_dump_size >> 2;
rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
free(sc->grc_dump, M_DEVBUF);
sc->grc_dump = NULL;
sc->grcdump_started = 0;
sc->grcdump_done = 0;
}
break;
case BXE_DRV_INFO:
drv_infop = (bxe_drvinfo_t *)data;
snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
BXE_DRIVER_VERSION);
snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
sc->devinfo.bc_ver_str);
snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
"%s", sc->fw_ver_str);
drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
drv_infop->reg_dump_len =
(bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
+ sizeof(struct dump_header);
snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
sc->pcie_bus, sc->pcie_device, sc->pcie_func);
break;
case BXE_DEV_SETTING:
dev_p = (bxe_dev_setting_t *)data;
bxe_get_settings(sc, &dev_set);
dev_p->supported = dev_set.supported;
dev_p->advertising = dev_set.advertising;
dev_p->speed = dev_set.speed;
dev_p->duplex = dev_set.duplex;
dev_p->port = dev_set.port;
dev_p->phy_address = dev_set.phy_address;
dev_p->autoneg = dev_set.autoneg;
break;
case BXE_GET_REGS:
reg_p = (bxe_get_regs_t *)data;
grc_dump_size = reg_p->reg_buf_len;
if((!sc->grcdump_done) && (!sc->grcdump_started)) {
bxe_grc_dump(sc);
}
if((sc->grcdump_done) && (sc->grcdump_started) &&
(sc->grc_dump != NULL)) {
rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
free(sc->grc_dump, M_DEVBUF);
sc->grc_dump = NULL;
sc->grcdump_started = 0;
sc->grcdump_done = 0;
}
break;
case BXE_RDW_REG:
reg_rdw_p = (bxe_reg_rdw_t *)data;
if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
(reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
(reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
break;
case BXE_RDW_PCICFG:
cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
cfg_rdw_p->cfg_width);
} else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
cfg_rdw_p->cfg_width);
} else {
BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
}
break;
case BXE_MAC_ADDR:
mac_addr_p = (bxe_perm_mac_addr_t *)data;
snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
sc->mac_addr_str);
break;
case BXE_EEPROM:
rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
break;
default:
break;
}
return (rval);
}
#ifdef DEBUGNET
static void
bxe_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
{
struct bxe_softc *sc;
sc = if_getsoftc(ifp);
BXE_CORE_LOCK(sc);
*nrxr = sc->num_queues;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
*clsize = sc->fp[0].mbuf_alloc_size;
BXE_CORE_UNLOCK(sc);
}
static void
bxe_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
{
}
static int
bxe_debugnet_transmit(if_t ifp, struct mbuf *m)
{
struct bxe_softc *sc;
int error;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || !sc->link_vars.link_up)
return (ENOENT);
error = bxe_tx_encap(&sc->fp[0], &m);
if (error != 0 && m != NULL)
m_freem(m);
return (error);
}
static int
bxe_debugnet_poll(if_t ifp, int count)
{
struct bxe_softc *sc;
int i;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
!sc->link_vars.link_up)
return (ENOENT);
for (i = 0; i < sc->num_queues; i++)
(void)bxe_rxeof(sc, &sc->fp[i]);
(void)bxe_txeof(sc, &sc->fp[0]);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/dev/cadence/if_cgem.c b/sys/dev/cadence/if_cgem.c
index 9d2b1d71883e..67fbdf4e3fc5 100644
--- a/sys/dev/cadence/if_cgem.c
+++ b/sys/dev/cadence/if_cgem.c
@@ -1,2016 +1,2011 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* A network interface driver for Cadence GEM Gigabit Ethernet
* interface such as the one used in Xilinx Zynq-7000 SoC.
*
* Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
* (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16
* and register definitions are in appendix B.18.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_mib.h>
#include <net/if_types.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#endif
#include <net/bpf.h>
#include <net/bpfdesc.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mii/mii_fdt.h>
#include <dev/clk/clk.h>
#if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT
#define CGEM64
#endif
#include <dev/cadence/if_cgem_hw.h>
#include "miibus_if.h"
#define IF_CGEM_NAME "cgem"
#define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */
#define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */
/* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */
#define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */
#define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */
#define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \
CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
#define HWQUIRK_NONE 0
#define HWQUIRK_NEEDNULLQS 1
#define HWQUIRK_RXHANGWAR 2
static struct ofw_compat_data compat_data[] = {
{ "cdns,zynq-gem", HWQUIRK_RXHANGWAR }, /* Deprecated */
{ "cdns,zynqmp-gem", HWQUIRK_NEEDNULLQS }, /* Deprecated */
{ "xlnx,zynq-gem", HWQUIRK_RXHANGWAR },
{ "xlnx,zynqmp-gem", HWQUIRK_NEEDNULLQS },
{ "microchip,mpfs-mss-gem", HWQUIRK_NEEDNULLQS },
{ "sifive,fu540-c000-gem", HWQUIRK_NONE },
{ "sifive,fu740-c000-gem", HWQUIRK_NONE },
{ NULL, 0 }
};
struct cgem_softc {
if_t ifp;
struct mtx sc_mtx;
device_t dev;
device_t miibus;
u_int mii_media_active; /* last active media */
int if_old_flags;
struct resource *mem_res;
struct resource *irq_res;
void *intrhand;
struct callout tick_ch;
uint32_t net_ctl_shadow;
uint32_t net_cfg_shadow;
clk_t clk_pclk;
clk_t clk_hclk;
clk_t clk_txclk;
clk_t clk_rxclk;
clk_t clk_tsuclk;
int neednullqs;
int phy_contype;
bus_dma_tag_t desc_dma_tag;
bus_dma_tag_t mbuf_dma_tag;
/* receive descriptor ring */
struct cgem_rx_desc *rxring;
bus_addr_t rxring_physaddr;
struct mbuf *rxring_m[CGEM_NUM_RX_DESCS];
bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS];
int rxring_hd_ptr; /* where to put rcv bufs */
int rxring_tl_ptr; /* where to get receives */
int rxring_queued; /* how many rcv bufs queued */
bus_dmamap_t rxring_dma_map;
int rxbufs; /* tunable number rcv bufs */
int rxhangwar; /* rx hang work-around */
u_int rxoverruns; /* rx overruns */
u_int rxnobufs; /* rx buf ring empty events */
u_int rxdmamapfails; /* rx dmamap failures */
uint32_t rx_frames_prev;
/* transmit descriptor ring */
struct cgem_tx_desc *txring;
bus_addr_t txring_physaddr;
struct mbuf *txring_m[CGEM_NUM_TX_DESCS];
bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS];
int txring_hd_ptr; /* where to put next xmits */
int txring_tl_ptr; /* next xmit mbuf to free */
int txring_queued; /* num xmits segs queued */
u_int txfull; /* tx ring full events */
u_int txdefrags; /* tx calls to m_defrag() */
u_int txdefragfails; /* tx m_defrag() failures */
u_int txdmamapfails; /* tx dmamap failures */
/* null descriptor rings */
void *null_qs;
bus_addr_t null_qs_physaddr;
/* hardware provided statistics */
struct cgem_hw_stats {
uint64_t tx_bytes;
uint32_t tx_frames;
uint32_t tx_frames_bcast;
uint32_t tx_frames_multi;
uint32_t tx_frames_pause;
uint32_t tx_frames_64b;
uint32_t tx_frames_65to127b;
uint32_t tx_frames_128to255b;
uint32_t tx_frames_256to511b;
uint32_t tx_frames_512to1023b;
uint32_t tx_frames_1024to1536b;
uint32_t tx_under_runs;
uint32_t tx_single_collisn;
uint32_t tx_multi_collisn;
uint32_t tx_excsv_collisn;
uint32_t tx_late_collisn;
uint32_t tx_deferred_frames;
uint32_t tx_carrier_sense_errs;
uint64_t rx_bytes;
uint32_t rx_frames;
uint32_t rx_frames_bcast;
uint32_t rx_frames_multi;
uint32_t rx_frames_pause;
uint32_t rx_frames_64b;
uint32_t rx_frames_65to127b;
uint32_t rx_frames_128to255b;
uint32_t rx_frames_256to511b;
uint32_t rx_frames_512to1023b;
uint32_t rx_frames_1024to1536b;
uint32_t rx_frames_undersize;
uint32_t rx_frames_oversize;
uint32_t rx_frames_jabber;
uint32_t rx_frames_fcs_errs;
uint32_t rx_frames_length_errs;
uint32_t rx_symbol_errs;
uint32_t rx_align_errs;
uint32_t rx_resource_errs;
uint32_t rx_overrun_errs;
uint32_t rx_ip_hdr_csum_errs;
uint32_t rx_tcp_csum_errs;
uint32_t rx_udp_csum_errs;
} stats;
};
#define RD4(sc, off) (bus_read_4((sc)->mem_res, (off)))
#define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val)))
#define BARRIER(sc, off, len, flags) \
(bus_barrier((sc)->mem_res, (off), (len), (flags))
#define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \
device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
#define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
#define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
/* Allow platforms to optionally provide a way to set the reference clock. */
int cgem_set_ref_clk(int unit, int frequency);
static int cgem_probe(device_t dev);
static int cgem_attach(device_t dev);
static int cgem_detach(device_t dev);
static void cgem_tick(void *);
static void cgem_intr(void *);
static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
static void
cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
{
int i;
uint32_t rnd;
/* See if boot loader gave us a MAC address already. */
for (i = 0; i < 4; i++) {
uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
if (low != 0 || high != 0) {
eaddr[0] = low & 0xff;
eaddr[1] = (low >> 8) & 0xff;
eaddr[2] = (low >> 16) & 0xff;
eaddr[3] = (low >> 24) & 0xff;
eaddr[4] = high & 0xff;
eaddr[5] = (high >> 8) & 0xff;
break;
}
}
/* No MAC from boot loader? Assign a random one. */
if (i == 4) {
rnd = arc4random();
eaddr[0] = 'b';
eaddr[1] = 's';
eaddr[2] = 'd';
eaddr[3] = (rnd >> 16) & 0xff;
eaddr[4] = (rnd >> 8) & 0xff;
eaddr[5] = rnd & 0xff;
device_printf(sc->dev, "no mac address found, assigning "
"random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0],
eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
}
/* Move address to first slot and zero out the rest. */
WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
(eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
for (i = 1; i < 4; i++) {
WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
}
}
/*
* cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash
* corresponds to a bit in a 64-bit hash register. Setting that bit in the
* hash register enables reception of all frames with a destination address
* that hashes to that 6-bit value.
*
* The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
* Reference Manual. Bits 0-5 in the hash are the exclusive-or of
* every sixth bit in the destination address.
*/
static int
cgem_mac_hash(u_char eaddr[])
{
int hash;
int i, j;
hash = 0;
for (i = 0; i < 6; i++)
for (j = i; j < 48; j += 6)
if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
hash ^= (1 << i);
return hash;
}
static u_int
cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *hashes = arg;
int index;
index = cgem_mac_hash(LLADDR(sdl));
if (index > 31)
hashes[0] |= (1U << (index - 32));
else
hashes[1] |= (1U << index);
return (1);
}
/*
* After any change in rx flags or multi-cast addresses, set up hash registers
* and net config register bits.
*/
static void
cgem_rx_filter(struct cgem_softc *sc)
{
if_t ifp = sc->ifp;
uint32_t hashes[2] = { 0, 0 };
sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL);
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL;
else {
if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
hashes[0] = 0xffffffff;
hashes[1] = 0xffffffff;
} else
if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes);
if (hashes[0] != 0 || hashes[1] != 0)
sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN;
}
WR4(sc, CGEM_HASH_TOP, hashes[0]);
WR4(sc, CGEM_HASH_BOT, hashes[1]);
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
}
/* For bus_dmamap_load() callback. */
static void
cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
if (nsegs != 1 || error != 0)
return;
*(bus_addr_t *)arg = segs[0].ds_addr;
}
/* Set up null queues for priority queues we actually can't disable. */
static void
cgem_null_qs(struct cgem_softc *sc)
{
struct cgem_rx_desc *rx_desc;
struct cgem_tx_desc *tx_desc;
uint32_t queue_mask;
int n;
/* Read design config register 6 to determine number of queues. */
queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) &
CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1;
if (queue_mask == 0)
return;
/* Create empty RX queue and empty TX buf queues. */
memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
sizeof(struct cgem_tx_desc));
rx_desc = sc->null_qs;
rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
tx_desc = (struct cgem_tx_desc *)(rx_desc + 1);
tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
/* Point all valid ring base pointers to the null queues. */
for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) {
WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
sizeof(struct cgem_rx_desc));
}
}
/* Create DMA'able descriptor rings. */
static int
cgem_setup_descs(struct cgem_softc *sc)
{
int i, err;
int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) +
CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
if (sc->neednullqs)
desc_rings_size += sizeof(struct cgem_rx_desc) +
sizeof(struct cgem_tx_desc);
sc->txring = NULL;
sc->rxring = NULL;
/* Allocate non-cached DMA space for RX and TX descriptors. */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1,
#ifdef CGEM64
1ULL << 32, /* Do not cross a 4G boundary. */
#else
0,
#endif
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
desc_rings_size, 1, desc_rings_size, 0,
busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
if (err)
return (err);
/* Set up a bus_dma_tag for mbufs. */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx,
&sc->mbuf_dma_tag);
if (err)
return (err);
/*
* Allocate DMA memory. We allocate transmit, receive and null
* descriptor queues all at once because the hardware only provides
* one register for the upper 32 bits of rx and tx descriptor queues
* hardware addresses.
*/
err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->rxring_dma_map);
if (err)
return (err);
/* Load descriptor DMA memory. */
err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
(void *)sc->rxring, desc_rings_size,
cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
if (err)
return (err);
/* Initialize RX descriptors. */
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
sc->rxring[i].addr = CGEM_RXDESC_OWN;
sc->rxring[i].ctl = 0;
sc->rxring_m[i] = NULL;
sc->rxring_m_dmamap[i] = NULL;
}
sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
sc->rxring_hd_ptr = 0;
sc->rxring_tl_ptr = 0;
sc->rxring_queued = 0;
sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
sizeof(struct cgem_rx_desc);
/* Initialize TX descriptor ring. */
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
sc->txring[i].addr = 0;
sc->txring[i].ctl = CGEM_TXDESC_USED;
sc->txring_m[i] = NULL;
sc->txring_m_dmamap[i] = NULL;
}
sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
sc->txring_hd_ptr = 0;
sc->txring_tl_ptr = 0;
sc->txring_queued = 0;
if (sc->neednullqs) {
sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
sc->null_qs_physaddr = sc->txring_physaddr +
CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
cgem_null_qs(sc);
}
return (0);
}
/* Fill receive descriptor ring with mbufs. */
static void
cgem_fill_rqueue(struct cgem_softc *sc)
{
struct mbuf *m = NULL;
bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
int nsegs;
CGEM_ASSERT_LOCKED(sc);
while (sc->rxring_queued < sc->rxbufs) {
/* Get a cluster mbuf. */
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
break;
m->m_len = MCLBYTES;
m->m_pkthdr.len = MCLBYTES;
m->m_pkthdr.rcvif = sc->ifp;
/* Load map and plug in physical address. */
if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
&sc->rxring_m_dmamap[sc->rxring_hd_ptr])) {
sc->rxdmamapfails++;
m_free(m);
break;
}
if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
segs, &nsegs, BUS_DMA_NOWAIT)) {
sc->rxdmamapfails++;
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[sc->rxring_hd_ptr]);
sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL;
m_free(m);
break;
}
sc->rxring_m[sc->rxring_hd_ptr] = m;
/* Sync cache with receive buffer. */
bus_dmamap_sync(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[sc->rxring_hd_ptr],
BUS_DMASYNC_PREREAD);
/* Write rx descriptor and increment head pointer. */
sc->rxring[sc->rxring_hd_ptr].ctl = 0;
#ifdef CGEM64
sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
#endif
if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
CGEM_RXDESC_WRAP;
sc->rxring_hd_ptr = 0;
} else
sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
sc->rxring_queued++;
}
}
/* Pull received packets off of receive descriptor ring. */
static void
cgem_recv(struct cgem_softc *sc)
{
if_t ifp = sc->ifp;
struct mbuf *m, *m_hd, **m_tl;
uint32_t ctl;
CGEM_ASSERT_LOCKED(sc);
/* Pick up all packets in which the OWN bit is set. */
m_hd = NULL;
m_tl = &m_hd;
while (sc->rxring_queued > 0 &&
(sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
/* Grab filled mbuf. */
m = sc->rxring_m[sc->rxring_tl_ptr];
sc->rxring_m[sc->rxring_tl_ptr] = NULL;
/* Sync cache with receive buffer. */
bus_dmamap_sync(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[sc->rxring_tl_ptr],
BUS_DMASYNC_POSTREAD);
/* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL;
/* Increment tail pointer. */
if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
sc->rxring_tl_ptr = 0;
sc->rxring_queued--;
/*
* Check FCS and make sure entire packet landed in one mbuf
* cluster (which is much bigger than the largest ethernet
* packet).
*/
if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
(ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
(CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
/* discard. */
m_free(m);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
/* Ready it to hand off to upper layers. */
m->m_data += ETHER_ALIGN;
m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len;
/*
* Are we using hardware checksumming? Check the status in the
* receive descriptor.
*/
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
/* TCP or UDP checks out, IP checks out too. */
if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
(ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED | CSUM_IP_VALID |
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
/* Only IP checks out. */
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED | CSUM_IP_VALID;
m->m_pkthdr.csum_data = 0xffff;
}
}
/* Queue it up for delivery below. */
*m_tl = m;
m_tl = &m->m_next;
}
/* Replenish receive buffers. */
cgem_fill_rqueue(sc);
/* Unlock and send up packets. */
CGEM_UNLOCK(sc);
while (m_hd != NULL) {
m = m_hd;
m_hd = m_hd->m_next;
m->m_next = NULL;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_input(ifp, m);
}
CGEM_LOCK(sc);
}
/* Find completed transmits and free their mbufs. */
static void
cgem_clean_tx(struct cgem_softc *sc)
{
struct mbuf *m;
uint32_t ctl;
CGEM_ASSERT_LOCKED(sc);
/* free up finished transmits. */
while (sc->txring_queued > 0 &&
((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
CGEM_TXDESC_USED) != 0) {
/* Sync cache. */
bus_dmamap_sync(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_tl_ptr],
BUS_DMASYNC_POSTWRITE);
/* Unload and destroy DMA map. */
bus_dmamap_unload(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_tl_ptr]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_tl_ptr]);
sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL;
/* Free up the mbuf. */
m = sc->txring_m[sc->txring_tl_ptr];
sc->txring_m[sc->txring_tl_ptr] = NULL;
m_freem(m);
/* Check the status. */
if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
/* Serious bus error. log to console. */
#ifdef CGEM64
device_printf(sc->dev,
"cgem_clean_tx: AHB error, addr=0x%x%08x\n",
sc->txring[sc->txring_tl_ptr].addrhi,
sc->txring[sc->txring_tl_ptr].addr);
#else
device_printf(sc->dev,
"cgem_clean_tx: AHB error, addr=0x%x\n",
sc->txring[sc->txring_tl_ptr].addr);
#endif
} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
CGEM_TXDESC_LATE_COLL)) != 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
} else
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
/*
* If the packet spanned more than one tx descriptor, skip
* descriptors until we find the end so that only
* start-of-frame descriptors are processed.
*/
while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
if ((ctl & CGEM_TXDESC_WRAP) != 0)
sc->txring_tl_ptr = 0;
else
sc->txring_tl_ptr++;
sc->txring_queued--;
ctl = sc->txring[sc->txring_tl_ptr].ctl;
sc->txring[sc->txring_tl_ptr].ctl =
ctl | CGEM_TXDESC_USED;
}
/* Next descriptor. */
if ((ctl & CGEM_TXDESC_WRAP) != 0)
sc->txring_tl_ptr = 0;
else
sc->txring_tl_ptr++;
sc->txring_queued--;
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
}
}
/* Start transmits. */
static void
cgem_start_locked(if_t ifp)
{
struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
struct mbuf *m;
bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
uint32_t ctl;
int i, nsegs, wrap, err;
CGEM_ASSERT_LOCKED(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0)
return;
for (;;) {
/* Check that there is room in the descriptor ring. */
if (sc->txring_queued >=
CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
/* Try to make room. */
cgem_clean_tx(sc);
/* Still no room? */
if (sc->txring_queued >=
CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
sc->txfull++;
break;
}
}
/* Grab next transmit packet. */
m = if_dequeue(ifp);
if (m == NULL)
break;
/* Create and load DMA map. */
if (bus_dmamap_create(sc->mbuf_dma_tag, 0,
&sc->txring_m_dmamap[sc->txring_hd_ptr])) {
m_freem(m);
sc->txdmamapfails++;
continue;
}
err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs,
BUS_DMA_NOWAIT);
if (err == EFBIG) {
/* Too many segments! defrag and try again. */
struct mbuf *m2 = m_defrag(m, M_NOWAIT);
if (m2 == NULL) {
sc->txdefragfails++;
m_freem(m);
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_hd_ptr]);
sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
continue;
}
m = m2;
err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs,
&nsegs, BUS_DMA_NOWAIT);
sc->txdefrags++;
}
if (err) {
/* Give up. */
m_freem(m);
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_hd_ptr]);
sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL;
sc->txdmamapfails++;
continue;
}
sc->txring_m[sc->txring_hd_ptr] = m;
/* Sync tx buffer with cache. */
bus_dmamap_sync(sc->mbuf_dma_tag,
sc->txring_m_dmamap[sc->txring_hd_ptr],
BUS_DMASYNC_PREWRITE);
/* Set wrap flag if next packet might run off end of ring. */
wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
CGEM_NUM_TX_DESCS;
/*
* Fill in the TX descriptors back to front so that USED bit in
* first descriptor is cleared last.
*/
for (i = nsegs - 1; i >= 0; i--) {
/* Descriptor address. */
sc->txring[sc->txring_hd_ptr + i].addr =
segs[i].ds_addr;
#ifdef CGEM64
sc->txring[sc->txring_hd_ptr + i].addrhi =
segs[i].ds_addr >> 32;
#endif
/* Descriptor control word. */
ctl = segs[i].ds_len;
if (i == nsegs - 1) {
ctl |= CGEM_TXDESC_LAST_BUF;
if (wrap)
ctl |= CGEM_TXDESC_WRAP;
}
sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
if (i != 0)
sc->txring_m[sc->txring_hd_ptr + i] = NULL;
}
if (wrap)
sc->txring_hd_ptr = 0;
else
sc->txring_hd_ptr += nsegs;
sc->txring_queued += nsegs;
/* Kick the transmitter. */
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
CGEM_NET_CTRL_START_TX);
/* If there is a BPF listener, bounce a copy to him. */
ETHER_BPF_MTAP(ifp, m);
}
}
static void
cgem_start(if_t ifp)
{
struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
CGEM_LOCK(sc);
cgem_start_locked(ifp);
CGEM_UNLOCK(sc);
}
static void
cgem_poll_hw_stats(struct cgem_softc *sc)
{
uint32_t n;
CGEM_ASSERT_LOCKED(sc);
sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
sc->stats.tx_single_collisn += n;
if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
sc->stats.tx_multi_collisn += n;
if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
sc->stats.tx_excsv_collisn += n;
if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
n = RD4(sc, CGEM_LATE_COLL);
sc->stats.tx_late_collisn += n;
if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
}
static void
cgem_tick(void *arg)
{
struct cgem_softc *sc = (struct cgem_softc *)arg;
struct mii_data *mii;
CGEM_ASSERT_LOCKED(sc);
/* Poll the phy. */
if (sc->miibus != NULL) {
mii = device_get_softc(sc->miibus);
mii_tick(mii);
}
/* Poll statistics registers. */
cgem_poll_hw_stats(sc);
/* Check for receiver hang. */
if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
/*
* Reset receiver logic by toggling RX_EN bit. 1usec
* delay is necessary especially when operating at 100mbps
* and 10mbps speeds.
*/
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
~CGEM_NET_CTRL_RX_EN);
DELAY(1);
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
}
sc->rx_frames_prev = sc->stats.rx_frames;
/* Next callout in one second. */
callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
}
/* Interrupt handler. */
static void
cgem_intr(void *arg)
{
struct cgem_softc *sc = (struct cgem_softc *)arg;
if_t ifp = sc->ifp;
uint32_t istatus;
CGEM_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
CGEM_UNLOCK(sc);
return;
}
/* Read interrupt status and immediately clear the bits. */
istatus = RD4(sc, CGEM_INTR_STAT);
WR4(sc, CGEM_INTR_STAT, istatus);
/* Packets received. */
if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
cgem_recv(sc);
/* Free up any completed transmit buffers. */
cgem_clean_tx(sc);
/* Hresp not ok. Something is very bad with DMA. Try to clear. */
if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
device_printf(sc->dev,
"cgem_intr: hresp not okay! rx_status=0x%x\n",
RD4(sc, CGEM_RX_STAT));
WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
}
/* Receiver overrun. */
if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
/* Clear status bit. */
WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
sc->rxoverruns++;
}
/* Receiver ran out of bufs. */
if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
cgem_fill_rqueue(sc);
sc->rxnobufs++;
}
/* Restart transmitter if needed. */
if (!if_sendq_empty(ifp))
cgem_start_locked(ifp);
CGEM_UNLOCK(sc);
}
/* Reset hardware. */
static void
cgem_reset(struct cgem_softc *sc)
{
CGEM_ASSERT_LOCKED(sc);
/* Determine data bus width from design configuration register. */
switch (RD4(sc, CGEM_DESIGN_CFG1) &
CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) {
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
break;
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
break;
default:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
}
WR4(sc, CGEM_NET_CTRL, 0);
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
WR4(sc, CGEM_HASH_BOT, 0);
WR4(sc, CGEM_HASH_TOP, 0);
WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */
WR4(sc, CGEM_RX_QBAR, 0);
/* Get management port running even if interface is down. */
sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
}
/* Bring up the hardware. */
static void
cgem_config(struct cgem_softc *sc)
{
if_t ifp = sc->ifp;
uint32_t dma_cfg;
u_char *eaddr = if_getlladdr(ifp);
CGEM_ASSERT_LOCKED(sc);
/* Program Net Config Register. */
sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
CGEM_NET_CFG_DBUS_WIDTH_MASK);
sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE |
CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN |
CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100);
/* Check connection type, enable SGMII bits if necessary. */
if (sc->phy_contype == MII_CONTYPE_SGMII) {
sc->net_cfg_shadow |= CGEM_NET_CFG_SGMII_EN;
sc->net_cfg_shadow |= CGEM_NET_CFG_PCS_SEL;
}
/* Enable receive checksum offloading? */
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
/* Program DMA Config Register. */
dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
#ifdef CGEM64
CGEM_DMA_CFG_ADDR_BUS_64 |
#endif
CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
/* Enable transmit checksum offloading? */
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
WR4(sc, CGEM_DMA_CFG, dma_cfg);
/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr);
WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr);
#ifdef CGEM64
WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
#endif
/* Enable rx and tx. */
sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
/* Set receive address in case it changed. */
WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
(eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
/* Set up interrupts. */
WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
CGEM_INTR_HRESP_NOT_OK);
}
/* Turn on interface and load up receive ring with buffers. */
static void
cgem_init_locked(struct cgem_softc *sc)
{
struct mii_data *mii;
CGEM_ASSERT_LOCKED(sc);
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
return;
cgem_config(sc);
cgem_fill_rqueue(sc);
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
if (sc->miibus != NULL) {
mii = device_get_softc(sc->miibus);
mii_mediachg(mii);
}
callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
}
static void
cgem_init(void *arg)
{
struct cgem_softc *sc = (struct cgem_softc *)arg;
CGEM_LOCK(sc);
cgem_init_locked(sc);
CGEM_UNLOCK(sc);
}
/* Turn off interface. Free up any buffers in transmit or receive queues. */
static void
cgem_stop(struct cgem_softc *sc)
{
int i;
CGEM_ASSERT_LOCKED(sc);
callout_stop(&sc->tick_ch);
/* Shut down hardware. */
cgem_reset(sc);
/* Clear out transmit queue. */
memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc));
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
sc->txring[i].ctl = CGEM_TXDESC_USED;
if (sc->txring_m[i]) {
/* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag,
sc->txring_m_dmamap[i]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->txring_m_dmamap[i]);
sc->txring_m_dmamap[i] = NULL;
m_freem(sc->txring_m[i]);
sc->txring_m[i] = NULL;
}
}
sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
sc->txring_hd_ptr = 0;
sc->txring_tl_ptr = 0;
sc->txring_queued = 0;
/* Clear out receive queue. */
memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc));
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
sc->rxring[i].addr = CGEM_RXDESC_OWN;
if (sc->rxring_m[i]) {
/* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[i]);
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[i]);
sc->rxring_m_dmamap[i] = NULL;
m_freem(sc->rxring_m[i]);
sc->rxring_m[i] = NULL;
}
}
sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
sc->rxring_hd_ptr = 0;
sc->rxring_tl_ptr = 0;
sc->rxring_queued = 0;
/* Force next statchg or linkchg to program net config register. */
sc->mii_media_active = 0;
}
static int
cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct cgem_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
struct mii_data *mii;
int error = 0, mask;
switch (cmd) {
case SIOCSIFFLAGS:
CGEM_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->if_old_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0) {
cgem_rx_filter(sc);
}
} else {
cgem_init_locked(sc);
}
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
cgem_stop(sc);
}
sc->if_old_flags = if_getflags(ifp);
CGEM_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/* Set up multi-cast filters. */
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
CGEM_LOCK(sc);
cgem_rx_filter(sc);
CGEM_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
if (sc->miibus == NULL)
return (ENXIO);
mii = device_get_softc(sc->miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
CGEM_LOCK(sc);
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if ((mask & IFCAP_TXCSUM) != 0) {
if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
/* Turn on TX checksumming. */
if_setcapenablebit(ifp, IFCAP_TXCSUM |
IFCAP_TXCSUM_IPV6, 0);
if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0);
WR4(sc, CGEM_DMA_CFG,
RD4(sc, CGEM_DMA_CFG) |
CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
} else {
/* Turn off TX checksumming. */
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM |
IFCAP_TXCSUM_IPV6);
if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST);
WR4(sc, CGEM_DMA_CFG,
RD4(sc, CGEM_DMA_CFG) &
~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
}
}
if ((mask & IFCAP_RXCSUM) != 0) {
if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
/* Turn on RX checksumming. */
if_setcapenablebit(ifp, IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6, 0);
sc->net_cfg_shadow |=
CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
} else {
/* Turn off RX checksumming. */
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6);
sc->net_cfg_shadow &=
~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
}
}
if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
(IFCAP_RXCSUM | IFCAP_TXCSUM))
if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0);
else
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
CGEM_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
/* MII bus support routines.
*/
static int
cgem_ifmedia_upd(if_t ifp)
{
struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
struct mii_data *mii;
struct mii_softc *miisc;
int error = 0;
mii = device_get_softc(sc->miibus);
CGEM_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
}
CGEM_UNLOCK(sc);
return (error);
}
static void
cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp);
struct mii_data *mii;
mii = device_get_softc(sc->miibus);
CGEM_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
CGEM_UNLOCK(sc);
}
static int
cgem_miibus_readreg(device_t dev, int phy, int reg)
{
struct cgem_softc *sc = device_get_softc(dev);
int tries, val;
WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ |
(phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
(reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
/* Wait for completion. */
tries=0;
while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
DELAY(5);
if (++tries > 200) {
device_printf(dev, "phy read timeout: %d\n", reg);
return (-1);
}
}
val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
if (reg == MII_EXTSR)
/*
* MAC does not support half-duplex at gig speeds.
* Let mii(4) exclude the capability.
*/
val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
return (val);
}
static int
cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct cgem_softc *sc = device_get_softc(dev);
int tries;
WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 |
CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE |
(phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
(reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
(data & CGEM_PHY_MAINT_DATA_MASK));
/* Wait for completion. */
tries = 0;
while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
DELAY(5);
if (++tries > 200) {
device_printf(dev, "phy write timeout: %d\n", reg);
return (-1);
}
}
return (0);
}
static void
cgem_miibus_statchg(device_t dev)
{
struct cgem_softc *sc = device_get_softc(dev);
struct mii_data *mii = device_get_softc(sc->miibus);
CGEM_ASSERT_LOCKED(sc);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID) &&
sc->mii_media_active != mii->mii_media_active)
cgem_mediachange(sc, mii);
}
static void
cgem_miibus_linkchg(device_t dev)
{
struct cgem_softc *sc = device_get_softc(dev);
struct mii_data *mii = device_get_softc(sc->miibus);
CGEM_ASSERT_LOCKED(sc);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID) &&
sc->mii_media_active != mii->mii_media_active)
cgem_mediachange(sc, mii);
}
/*
* Overridable weak symbol cgem_set_ref_clk(). This allows platforms to
* provide a function to set the cgem's reference clock.
*/
static int __used
cgem_default_set_ref_clk(int unit, int frequency)
{
return 0;
}
__weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
/* Call to set reference clock and network config bits according to media. */
static void
cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
{
int ref_clk_freq;
CGEM_ASSERT_LOCKED(sc);
/* Update hardware to reflect media. */
sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
CGEM_NET_CFG_FULL_DUPLEX);
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 |
CGEM_NET_CFG_GIGE_EN);
ref_clk_freq = 125000000;
break;
case IFM_100_TX:
sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100;
ref_clk_freq = 25000000;
break;
default:
ref_clk_freq = 2500000;
}
if ((mii->mii_media_active & IFM_FDX) != 0)
sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX;
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
if (sc->clk_pclk != NULL) {
CGEM_UNLOCK(sc);
if (clk_set_freq(sc->clk_pclk, ref_clk_freq, 0))
device_printf(sc->dev, "could not set ref clk to %d\n",
ref_clk_freq);
CGEM_LOCK(sc);
}
sc->mii_media_active = mii->mii_media_active;
}
static void
cgem_add_sysctls(device_t dev)
{
struct cgem_softc *sc = device_get_softc(dev);
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child;
struct sysctl_oid *tree;
ctx = device_get_sysctl_ctx(dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
&sc->rxbufs, 0, "Number receive buffers to provide");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
&sc->rxhangwar, 0, "Enable receive hang work-around");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
&sc->rxoverruns, 0, "Receive overrun events");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
&sc->rxnobufs, 0, "Receive buf queue empty events");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
&sc->rxdmamapfails, 0, "Receive DMA map failures");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
&sc->txfull, 0, "Transmit ring full events");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
&sc->txdmamapfails, 0, "Transmit DMA map failures");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
&sc->txdefrags, 0, "Transmit m_defrag() calls");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
&sc->txdefragfails, 0, "Transmit m_defrag() failures");
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics");
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
&sc->stats.tx_bytes, "Total bytes transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
&sc->stats.tx_frames, 0, "Total frames transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
&sc->stats.tx_frames_bcast, 0,
"Number broadcast frames transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
&sc->stats.tx_frames_multi, 0,
"Number multicast frames transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
"Number pause frames transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
&sc->stats.tx_frames_64b, 0,
"Number frames transmitted of size 64 bytes or less");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
&sc->stats.tx_frames_65to127b, 0,
"Number frames transmitted of size 65-127 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
"Number frames transmitted of size 128-255 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
"Number frames transmitted of size 256-511 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
"Number frames transmitted of size 512-1023 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
"Number frames transmitted of size 1024-1536 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
"Number transmit under-run events");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
"Number single-collision transmit frames");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
"Number multi-collision transmit frames");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
"Number excessive collision transmit frames");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
"Number late-collision transmit frames");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
"Number deferred transmit frames");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
"Number carrier sense errors on transmit");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
&sc->stats.rx_bytes, "Total bytes received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
&sc->stats.rx_frames, 0, "Total frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
"Number broadcast frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
"Number multicast frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
"Number pause frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
"Number frames received of size 64 bytes or less");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
"Number frames received of size 65-127 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
"Number frames received of size 128-255 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
"Number frames received of size 256-511 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
"Number frames received of size 512-1023 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
"Number frames received of size 1024-1536 bytes");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
"Number undersize frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
"Number oversize frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
"Number jabber frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
"Number frames received with FCS errors");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
"Number frames received with length errors");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
"Number receive symbol errors");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
"Number receive alignment errors");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
"Number frames received when no rx buffer available");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
"Number frames received but not copied due to receive overrun");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
"Number frames received with IP header checksum errors");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
"Number frames received with TCP checksum errors");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
"Number frames received with UDP checksum errors");
}
static int
cgem_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_str == NULL)
return (ENXIO);
device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
return (0);
}
static int
cgem_attach(device_t dev)
{
struct cgem_softc *sc = device_get_softc(dev);
if_t ifp = NULL;
int rid, err;
u_char eaddr[ETHER_ADDR_LEN];
int hwquirks;
phandle_t node;
sc->dev = dev;
CGEM_LOCK_INIT(sc);
/* Key off of compatible string and set hardware-specific options. */
hwquirks = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
if ((hwquirks & HWQUIRK_NEEDNULLQS) != 0)
sc->neednullqs = 1;
if ((hwquirks & HWQUIRK_RXHANGWAR) != 0)
sc->rxhangwar = 1;
/*
* Both pclk and hclk are mandatory but we don't have a proper
* clock driver for Zynq so don't make it fatal if we can't
* get them.
*/
if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->clk_pclk) != 0)
device_printf(dev,
"could not retrieve pclk.\n");
else {
if (clk_enable(sc->clk_pclk) != 0)
device_printf(dev, "could not enable pclk.\n");
}
if (clk_get_by_ofw_name(dev, 0, "hclk", &sc->clk_hclk) != 0)
device_printf(dev,
"could not retrieve hclk.\n");
else {
if (clk_enable(sc->clk_hclk) != 0)
device_printf(dev, "could not enable hclk.\n");
}
/* Optional clocks */
if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->clk_txclk) == 0) {
if (clk_enable(sc->clk_txclk) != 0) {
device_printf(dev, "could not enable tx_clk.\n");
err = ENXIO;
goto err_pclk;
}
}
if (clk_get_by_ofw_name(dev, 0, "rx_clk", &sc->clk_rxclk) == 0) {
if (clk_enable(sc->clk_rxclk) != 0) {
device_printf(dev, "could not enable rx_clk.\n");
err = ENXIO;
goto err_tx_clk;
}
}
if (clk_get_by_ofw_name(dev, 0, "tsu_clk", &sc->clk_tsuclk) == 0) {
if (clk_enable(sc->clk_tsuclk) != 0) {
device_printf(dev, "could not enable tsu_clk.\n");
err = ENXIO;
goto err_rx_clk;
}
}
node = ofw_bus_get_node(dev);
sc->phy_contype = mii_fdt_get_contype(node);
/* Get memory resource. */
rid = 0;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem_res == NULL) {
device_printf(dev, "could not allocate memory resources.\n");
err = ENOMEM;
goto err_tsu_clk;
}
/* Get IRQ resource. */
rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (sc->irq_res == NULL) {
device_printf(dev, "could not allocate interrupt resource.\n");
cgem_detach(dev);
return (ENOMEM);
}
/* Set up ifnet structure. */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "could not allocate ifnet structure\n");
- cgem_detach(dev);
- return (ENOMEM);
- }
if_setsoftc(ifp, sc);
if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, cgem_init);
if_setioctlfn(ifp, cgem_ioctl);
if_setstartfn(ifp, cgem_start);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0);
if_setsendqlen(ifp, CGEM_NUM_TX_DESCS);
if_setsendqready(ifp);
/* Disable hardware checksumming by default. */
if_sethwassist(ifp, 0);
if_setcapenable(ifp, if_getcapabilities(ifp) &
~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM));
sc->if_old_flags = if_getflags(ifp);
sc->rxbufs = DEFAULT_NUM_RX_BUFS;
/* Reset hardware. */
CGEM_LOCK(sc);
cgem_reset(sc);
CGEM_UNLOCK(sc);
/* Attach phy to mii bus. */
err = mii_attach(dev, &sc->miibus, ifp,
cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK,
MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (err)
device_printf(dev, "warning: attaching PHYs failed\n");
/* Set up TX and RX descriptor area. */
err = cgem_setup_descs(sc);
if (err) {
device_printf(dev, "could not set up dma mem for descs.\n");
cgem_detach(dev);
goto err;
}
/* Get a MAC address. */
cgem_get_mac(sc, eaddr);
/* Start ticks. */
callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
ether_ifattach(ifp, eaddr);
err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
if (err) {
device_printf(dev, "could not set interrupt handler.\n");
ether_ifdetach(ifp);
cgem_detach(dev);
goto err;
}
cgem_add_sysctls(dev);
return (0);
err_tsu_clk:
if (sc->clk_tsuclk)
clk_release(sc->clk_tsuclk);
err_rx_clk:
if (sc->clk_rxclk)
clk_release(sc->clk_rxclk);
err_tx_clk:
if (sc->clk_txclk)
clk_release(sc->clk_txclk);
err_pclk:
if (sc->clk_pclk)
clk_release(sc->clk_pclk);
if (sc->clk_hclk)
clk_release(sc->clk_hclk);
err:
return (err);
}
static int
cgem_detach(device_t dev)
{
struct cgem_softc *sc = device_get_softc(dev);
int i;
if (sc == NULL)
return (ENODEV);
if (device_is_attached(dev)) {
CGEM_LOCK(sc);
cgem_stop(sc);
CGEM_UNLOCK(sc);
callout_drain(&sc->tick_ch);
if_setflagbits(sc->ifp, 0, IFF_UP);
ether_ifdetach(sc->ifp);
}
if (sc->miibus != NULL) {
device_delete_child(dev, sc->miibus);
sc->miibus = NULL;
}
/* Release resources. */
if (sc->mem_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->mem_res), sc->mem_res);
sc->mem_res = NULL;
}
if (sc->irq_res != NULL) {
if (sc->intrhand)
bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
bus_release_resource(dev, SYS_RES_IRQ,
rman_get_rid(sc->irq_res), sc->irq_res);
sc->irq_res = NULL;
}
/* Release DMA resources. */
if (sc->rxring != NULL) {
if (sc->rxring_physaddr != 0) {
bus_dmamap_unload(sc->desc_dma_tag,
sc->rxring_dma_map);
sc->rxring_physaddr = 0;
sc->txring_physaddr = 0;
sc->null_qs_physaddr = 0;
}
bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
sc->rxring_dma_map);
sc->rxring = NULL;
sc->txring = NULL;
sc->null_qs = NULL;
for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
if (sc->rxring_m_dmamap[i] != NULL) {
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[i]);
sc->rxring_m_dmamap[i] = NULL;
}
for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
if (sc->txring_m_dmamap[i] != NULL) {
bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->txring_m_dmamap[i]);
sc->txring_m_dmamap[i] = NULL;
}
}
if (sc->desc_dma_tag != NULL) {
bus_dma_tag_destroy(sc->desc_dma_tag);
sc->desc_dma_tag = NULL;
}
if (sc->mbuf_dma_tag != NULL) {
bus_dma_tag_destroy(sc->mbuf_dma_tag);
sc->mbuf_dma_tag = NULL;
}
bus_generic_detach(dev);
if (sc->clk_tsuclk)
clk_release(sc->clk_tsuclk);
if (sc->clk_rxclk)
clk_release(sc->clk_rxclk);
if (sc->clk_txclk)
clk_release(sc->clk_txclk);
if (sc->clk_pclk)
clk_release(sc->clk_pclk);
if (sc->clk_hclk)
clk_release(sc->clk_hclk);
CGEM_LOCK_DESTROY(sc);
return (0);
}
static device_method_t cgem_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cgem_probe),
DEVMETHOD(device_attach, cgem_attach),
DEVMETHOD(device_detach, cgem_detach),
/* MII interface */
DEVMETHOD(miibus_readreg, cgem_miibus_readreg),
DEVMETHOD(miibus_writereg, cgem_miibus_writereg),
DEVMETHOD(miibus_statchg, cgem_miibus_statchg),
DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg),
DEVMETHOD_END
};
static driver_t cgem_driver = {
"cgem",
cgem_methods,
sizeof(struct cgem_softc),
};
DRIVER_MODULE(cgem, simplebus, cgem_driver, NULL, NULL);
DRIVER_MODULE(miibus, cgem, miibus_driver, NULL, NULL);
MODULE_DEPEND(cgem, miibus, 1, 1, 1);
MODULE_DEPEND(cgem, ether, 1, 1, 1);
SIMPLEBUS_PNP_INFO(compat_data);
diff --git a/sys/dev/cas/if_cas.c b/sys/dev/cas/if_cas.c
index 0cf17cf04b33..76d1b713e5bb 100644
--- a/sys/dev/cas/if_cas.c
+++ b/sys/dev/cas/if_cas.c
@@ -1,2915 +1,2913 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 2001 Eduardo Horvath.
* Copyright (c) 2001-2003 Thomas Moestl
* Copyright (c) 2007-2009 Marius Strobl <marius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
* from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius
*/
#include <sys/cdefs.h>
/*
* driver for Sun Cassini/Cassini+ and National Semiconductor DP83065
* Saturn Gigabit Ethernet controllers
*/
#if 0
#define CAS_DEBUG
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/callout.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/refcount.h>
#include <sys/resource.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <machine/bus.h>
#if defined(__powerpc__)
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/openfirm.h>
#include <machine/ofw_machdep.h>
#endif
#include <machine/resource.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/cas/if_casreg.h>
#include <dev/cas/if_casvar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "miibus_if.h"
#define RINGASSERT(n , min, max) \
CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max))
RINGASSERT(CAS_NRXCOMP, 128, 32768);
RINGASSERT(CAS_NRXDESC, 32, 8192);
RINGASSERT(CAS_NRXDESC2, 32, 8192);
RINGASSERT(CAS_NTXDESC, 32, 8192);
#undef RINGASSERT
#define CCDASSERT(m, a) \
CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0)
CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN);
CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN);
CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN);
#undef CCDASSERT
#define CAS_TRIES 10000
/*
* According to documentation, the hardware has support for basic TCP
* checksum offloading only, in practice this can be also used for UDP
* however (i.e. the problem of previous Sun NICs that a checksum of 0x0
* is not converted to 0xffff no longer exists).
*/
#define CAS_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx);
static int cas_attach(struct cas_softc *sc);
static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr,
uint32_t set);
static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs,
int nsegs, int error);
static void cas_detach(struct cas_softc *sc);
static int cas_disable_rx(struct cas_softc *sc);
static int cas_disable_tx(struct cas_softc *sc);
static void cas_eint(struct cas_softc *sc, u_int status);
static void cas_free(struct mbuf *m);
static void cas_init(void *xsc);
static void cas_init_locked(struct cas_softc *sc);
static void cas_init_regs(struct cas_softc *sc);
static int cas_intr(void *v);
static void cas_intr_task(void *arg, int pending __unused);
static int cas_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head);
static int cas_mediachange(if_t ifp);
static void cas_mediastatus(if_t ifp, struct ifmediareq *ifmr);
static void cas_meminit(struct cas_softc *sc);
static void cas_mifinit(struct cas_softc *sc);
static int cas_mii_readreg(device_t dev, int phy, int reg);
static void cas_mii_statchg(device_t dev);
static int cas_mii_writereg(device_t dev, int phy, int reg, int val);
static void cas_reset(struct cas_softc *sc);
static int cas_reset_rx(struct cas_softc *sc);
static int cas_reset_tx(struct cas_softc *sc);
static void cas_resume(struct cas_softc *sc);
static u_int cas_descsize(u_int sz);
static void cas_rint(struct cas_softc *sc);
static void cas_rint_timeout(void *arg);
static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum);
static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp);
static u_int cas_rxcompsize(u_int sz);
static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs,
int nsegs, int error);
static void cas_setladrf(struct cas_softc *sc);
static void cas_start(if_t ifp);
static void cas_stop(if_t ifp);
static void cas_suspend(struct cas_softc *sc);
static void cas_tick(void *arg);
static void cas_tint(struct cas_softc *sc);
static void cas_tx_task(void *arg, int pending __unused);
static inline void cas_txkick(struct cas_softc *sc);
static void cas_watchdog(struct cas_softc *sc);
MODULE_DEPEND(cas, ether, 1, 1, 1);
MODULE_DEPEND(cas, miibus, 1, 1, 1);
#ifdef CAS_DEBUG
#include <sys/ktr.h>
#define KTR_CAS KTR_SPARE2
#endif
static int
cas_attach(struct cas_softc *sc)
{
struct cas_txsoft *txs;
if_t ifp;
int error, i;
uint32_t v;
/* Set up ifnet structure. */
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOSPC);
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setstartfn(ifp, cas_start);
if_setioctlfn(ifp, cas_ioctl);
if_setinitfn(ifp, cas_init);
if_setsendqlen(ifp, CAS_TXQUEUELEN);
if_setsendqready(ifp);
callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
/* Create local taskq. */
NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc);
TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp);
sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->sc_tq);
if (sc->sc_tq == NULL) {
device_printf(sc->sc_dev, "could not create taskqueue\n");
error = ENXIO;
goto fail_ifnet;
}
error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->sc_dev));
if (error != 0) {
device_printf(sc->sc_dev, "could not start threads\n");
goto fail_taskq;
}
/* Make sure the chip is stopped. */
cas_reset(sc);
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL,
&sc->sc_pdmatag);
if (error != 0)
goto fail_taskq;
error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag);
if (error != 0)
goto fail_ptag;
error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES,
BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
if (error != 0)
goto fail_rtag;
error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
sizeof(struct cas_control_data), 1,
sizeof(struct cas_control_data), 0,
NULL, NULL, &sc->sc_cdmatag);
if (error != 0)
goto fail_ttag;
/*
* Allocate the control data structures, create and load the
* DMA map for it.
*/
if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
(void **)&sc->sc_control_data,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->sc_cddmamap)) != 0) {
device_printf(sc->sc_dev,
"unable to allocate control data, error = %d\n", error);
goto fail_ctag;
}
sc->sc_cddma = 0;
if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
sc->sc_control_data, sizeof(struct cas_control_data),
cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
device_printf(sc->sc_dev,
"unable to load control data DMA map, error = %d\n",
error);
goto fail_cmem;
}
/*
* Initialize the transmit job descriptors.
*/
STAILQ_INIT(&sc->sc_txfreeq);
STAILQ_INIT(&sc->sc_txdirtyq);
/*
* Create the transmit buffer DMA maps.
*/
error = ENOMEM;
for (i = 0; i < CAS_TXQUEUELEN; i++) {
txs = &sc->sc_txsoft[i];
txs->txs_mbuf = NULL;
txs->txs_ndescs = 0;
if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
&txs->txs_dmamap)) != 0) {
device_printf(sc->sc_dev,
"unable to create TX DMA map %d, error = %d\n",
i, error);
goto fail_txd;
}
STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
}
/*
* Allocate the receive buffers, create and load the DMA maps
* for them.
*/
for (i = 0; i < CAS_NRXDESC; i++) {
if ((error = bus_dmamem_alloc(sc->sc_rdmatag,
&sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK,
&sc->sc_rxdsoft[i].rxds_dmamap)) != 0) {
device_printf(sc->sc_dev,
"unable to allocate RX buffer %d, error = %d\n",
i, error);
goto fail_rxmem;
}
sc->sc_rxdptr = i;
sc->sc_rxdsoft[i].rxds_paddr = 0;
if ((error = bus_dmamap_load(sc->sc_rdmatag,
sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf,
CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 ||
sc->sc_rxdsoft[i].rxds_paddr == 0) {
device_printf(sc->sc_dev,
"unable to load RX DMA map %d, error = %d\n",
i, error);
goto fail_rxmap;
}
}
if ((sc->sc_flags & CAS_SERDES) == 0) {
CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII);
CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
cas_mifinit(sc);
/*
* Look for an external PHY.
*/
error = ENXIO;
v = CAS_READ_4(sc, CAS_MIF_CONF);
if ((v & CAS_MIF_CONF_MDI1) != 0) {
v |= CAS_MIF_CONF_PHY_SELECT;
CAS_WRITE_4(sc, CAS_MIF_CONF, v);
CAS_BARRIER(sc, CAS_MIF_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
/* Enable/unfreeze the GMII pins of Saturn. */
if (sc->sc_variant == CAS_SATURN) {
CAS_WRITE_4(sc, CAS_SATURN_PCFG,
CAS_READ_4(sc, CAS_SATURN_PCFG) &
~CAS_SATURN_PCFG_FSI);
CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
BUS_SPACE_BARRIER_READ |
BUS_SPACE_BARRIER_WRITE);
DELAY(10000);
}
error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
}
/*
* Fall back on an internal PHY if no external PHY was found.
*/
if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) {
v &= ~CAS_MIF_CONF_PHY_SELECT;
CAS_WRITE_4(sc, CAS_MIF_CONF, v);
CAS_BARRIER(sc, CAS_MIF_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
/* Freeze the GMII pins of Saturn for saving power. */
if (sc->sc_variant == CAS_SATURN) {
CAS_WRITE_4(sc, CAS_SATURN_PCFG,
CAS_READ_4(sc, CAS_SATURN_PCFG) |
CAS_SATURN_PCFG_FSI);
CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
BUS_SPACE_BARRIER_READ |
BUS_SPACE_BARRIER_WRITE);
DELAY(10000);
}
error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
}
} else {
/*
* Use the external PCS SERDES.
*/
CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES);
CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_WRITE);
/* Enable/unfreeze the SERDES pins of Saturn. */
if (sc->sc_variant == CAS_SATURN) {
CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0);
CAS_BARRIER(sc, CAS_SATURN_PCFG, 4,
BUS_SPACE_BARRIER_WRITE);
}
CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD);
CAS_BARRIER(sc, CAS_PCS_SERDES_CTRL, 4,
BUS_SPACE_BARRIER_WRITE);
CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN);
CAS_BARRIER(sc, CAS_PCS_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK,
CAS_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
}
if (error != 0) {
device_printf(sc->sc_dev, "attaching PHYs failed\n");
goto fail_rxmap;
}
sc->sc_mii = device_get_softc(sc->sc_miibus);
/*
* From this point forward, the attachment cannot fail. A failure
* before this point releases all resources that may have been
* allocated.
*/
/* Announce FIFO sizes. */
v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE);
device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
CAS_RX_FIFO_SIZE / 1024, v / 16);
/* Attach the interface. */
ether_ifattach(ifp, sc->sc_enaddr);
/*
* Tell the upper layer(s) we support long frames/checksum offloads.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
if_sethwassist(ifp, CAS_CSUM_FEATURES);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
return (0);
/*
* Free any resources we've allocated during the failed attach
* attempt. Do this in reverse order and fall through.
*/
fail_rxmap:
for (i = 0; i < CAS_NRXDESC; i++)
if (sc->sc_rxdsoft[i].rxds_paddr != 0)
bus_dmamap_unload(sc->sc_rdmatag,
sc->sc_rxdsoft[i].rxds_dmamap);
fail_rxmem:
for (i = 0; i < CAS_NRXDESC; i++)
if (sc->sc_rxdsoft[i].rxds_buf != NULL)
bus_dmamem_free(sc->sc_rdmatag,
sc->sc_rxdsoft[i].rxds_buf,
sc->sc_rxdsoft[i].rxds_dmamap);
fail_txd:
for (i = 0; i < CAS_TXQUEUELEN; i++)
if (sc->sc_txsoft[i].txs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_tdmatag,
sc->sc_txsoft[i].txs_dmamap);
bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
fail_cmem:
bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
sc->sc_cddmamap);
fail_ctag:
bus_dma_tag_destroy(sc->sc_cdmatag);
fail_ttag:
bus_dma_tag_destroy(sc->sc_tdmatag);
fail_rtag:
bus_dma_tag_destroy(sc->sc_rdmatag);
fail_ptag:
bus_dma_tag_destroy(sc->sc_pdmatag);
fail_taskq:
taskqueue_free(sc->sc_tq);
fail_ifnet:
if_free(ifp);
return (error);
}
static void
cas_detach(struct cas_softc *sc)
{
if_t ifp = sc->sc_ifp;
int i;
ether_ifdetach(ifp);
CAS_LOCK(sc);
cas_stop(ifp);
CAS_UNLOCK(sc);
callout_drain(&sc->sc_tick_ch);
callout_drain(&sc->sc_rx_ch);
taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
taskqueue_drain(sc->sc_tq, &sc->sc_tx_task);
if_free(ifp);
taskqueue_free(sc->sc_tq);
device_delete_child(sc->sc_dev, sc->sc_miibus);
for (i = 0; i < CAS_NRXDESC; i++)
if (sc->sc_rxdsoft[i].rxds_dmamap != NULL)
bus_dmamap_sync(sc->sc_rdmatag,
sc->sc_rxdsoft[i].rxds_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (i = 0; i < CAS_NRXDESC; i++)
if (sc->sc_rxdsoft[i].rxds_paddr != 0)
bus_dmamap_unload(sc->sc_rdmatag,
sc->sc_rxdsoft[i].rxds_dmamap);
for (i = 0; i < CAS_NRXDESC; i++)
if (sc->sc_rxdsoft[i].rxds_buf != NULL)
bus_dmamem_free(sc->sc_rdmatag,
sc->sc_rxdsoft[i].rxds_buf,
sc->sc_rxdsoft[i].rxds_dmamap);
for (i = 0; i < CAS_TXQUEUELEN; i++)
if (sc->sc_txsoft[i].txs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_tdmatag,
sc->sc_txsoft[i].txs_dmamap);
CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
sc->sc_cddmamap);
bus_dma_tag_destroy(sc->sc_cdmatag);
bus_dma_tag_destroy(sc->sc_tdmatag);
bus_dma_tag_destroy(sc->sc_rdmatag);
bus_dma_tag_destroy(sc->sc_pdmatag);
}
static void
cas_suspend(struct cas_softc *sc)
{
if_t ifp = sc->sc_ifp;
CAS_LOCK(sc);
cas_stop(ifp);
CAS_UNLOCK(sc);
}
static void
cas_resume(struct cas_softc *sc)
{
if_t ifp = sc->sc_ifp;
CAS_LOCK(sc);
/*
* On resume all registers have to be initialized again like
* after power-on.
*/
sc->sc_flags &= ~CAS_INITED;
if (if_getflags(ifp) & IFF_UP)
cas_init_locked(sc);
CAS_UNLOCK(sc);
}
static inline void
cas_rxcksum(struct mbuf *m, uint16_t cksum)
{
struct ether_header *eh;
struct ip *ip;
struct udphdr *uh;
uint16_t *opts;
int32_t hlen, len, pktlen;
uint32_t temp32;
pktlen = m->m_pkthdr.len;
if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
return;
eh = mtod(m, struct ether_header *);
if (eh->ether_type != htons(ETHERTYPE_IP))
return;
ip = (struct ip *)(eh + 1);
if (ip->ip_v != IPVERSION)
return;
hlen = ip->ip_hl << 2;
pktlen -= sizeof(struct ether_header);
if (hlen < sizeof(struct ip))
return;
if (ntohs(ip->ip_len) < hlen)
return;
if (ntohs(ip->ip_len) != pktlen)
return;
if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
return; /* Cannot handle fragmented packet. */
switch (ip->ip_p) {
case IPPROTO_TCP:
if (pktlen < (hlen + sizeof(struct tcphdr)))
return;
break;
case IPPROTO_UDP:
if (pktlen < (hlen + sizeof(struct udphdr)))
return;
uh = (struct udphdr *)((uint8_t *)ip + hlen);
if (uh->uh_sum == 0)
return; /* no checksum */
break;
default:
return;
}
cksum = ~cksum;
/* checksum fixup for IP options */
len = hlen - sizeof(struct ip);
if (len > 0) {
opts = (uint16_t *)(ip + 1);
for (; len > 0; len -= sizeof(uint16_t), opts++) {
temp32 = cksum - *opts;
temp32 = (temp32 >> 16) + (temp32 & 65535);
cksum = temp32 & 65535;
}
}
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
m->m_pkthdr.csum_data = cksum;
}
static void
cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
{
struct cas_softc *sc = xsc;
if (error != 0)
return;
if (nsegs != 1)
panic("%s: bad control buffer segment count", __func__);
sc->sc_cddma = segs[0].ds_addr;
}
static void
cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
{
struct cas_softc *sc = xsc;
if (error != 0)
return;
if (nsegs != 1)
panic("%s: bad RX buffer segment count", __func__);
sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr;
}
static void
cas_tick(void *arg)
{
struct cas_softc *sc = arg;
if_t ifp = sc->sc_ifp;
uint32_t v;
CAS_LOCK_ASSERT(sc, MA_OWNED);
/*
* Unload collision and error counters.
*/
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) +
CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT));
v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) +
CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v);
if_inc_counter(ifp, IFCOUNTER_OERRORS, v);
if_inc_counter(ifp, IFCOUNTER_IERRORS,
CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) +
CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) +
CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) +
CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL));
/*
* Then clear the hardware counters.
*/
CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0);
mii_tick(sc->sc_mii);
if (sc->sc_txfree != CAS_MAXTXFREE)
cas_tint(sc);
cas_watchdog(sc);
callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
}
static int
cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set)
{
int i;
uint32_t reg;
for (i = CAS_TRIES; i--; DELAY(100)) {
reg = CAS_READ_4(sc, r);
if ((reg & clr) == 0 && (reg & set) == set)
return (1);
}
return (0);
}
static void
cas_reset(struct cas_softc *sc)
{
#ifdef CAS_DEBUG
CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
/* Disable all interrupts in order to avoid spurious ones. */
CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
cas_reset_rx(sc);
cas_reset_tx(sc);
/*
* Do a full reset modulo the result of the last auto-negotiation
* when using the SERDES.
*/
CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX |
((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
CAS_BARRIER(sc, CAS_RESET, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
DELAY(3000);
if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0))
device_printf(sc->sc_dev, "cannot reset device\n");
}
static void
cas_stop(if_t ifp)
{
struct cas_softc *sc = if_getsoftc(ifp);
struct cas_txsoft *txs;
#ifdef CAS_DEBUG
CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
callout_stop(&sc->sc_tick_ch);
callout_stop(&sc->sc_rx_ch);
/* Disable all interrupts in order to avoid spurious ones. */
CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
cas_reset_tx(sc);
cas_reset_rx(sc);
/*
* Release any queued transmit buffers.
*/
while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
if (txs->txs_ndescs != 0) {
bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
if (txs->txs_mbuf != NULL) {
m_freem(txs->txs_mbuf);
txs->txs_mbuf = NULL;
}
}
STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
}
/*
* Mark the interface down and cancel the watchdog timer.
*/
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->sc_flags &= ~CAS_LINK;
sc->sc_wdog_timer = 0;
}
static int
cas_reset_rx(struct cas_softc *sc)
{
/*
* Resetting while DMA is in progress can cause a bus hang, so we
* disable DMA first.
*/
(void)cas_disable_rx(sc);
CAS_WRITE_4(sc, CAS_RX_CONF, 0);
CAS_BARRIER(sc, CAS_RX_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0))
device_printf(sc->sc_dev, "cannot disable RX DMA\n");
/* Finally, reset the ERX. */
CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX |
((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
CAS_BARRIER(sc, CAS_RESET, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX, 0)) {
device_printf(sc->sc_dev, "cannot reset receiver\n");
return (1);
}
return (0);
}
static int
cas_reset_tx(struct cas_softc *sc)
{
/*
* Resetting while DMA is in progress can cause a bus hang, so we
* disable DMA first.
*/
(void)cas_disable_tx(sc);
CAS_WRITE_4(sc, CAS_TX_CONF, 0);
CAS_BARRIER(sc, CAS_TX_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0))
device_printf(sc->sc_dev, "cannot disable TX DMA\n");
/* Finally, reset the ETX. */
CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX |
((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0));
CAS_BARRIER(sc, CAS_RESET, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_TX, 0)) {
device_printf(sc->sc_dev, "cannot reset transmitter\n");
return (1);
}
return (0);
}
static int
cas_disable_rx(struct cas_softc *sc)
{
CAS_WRITE_4(sc, CAS_MAC_RX_CONF,
CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN);
CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0))
return (1);
if (bootverbose)
device_printf(sc->sc_dev, "cannot disable RX MAC\n");
return (0);
}
static int
cas_disable_tx(struct cas_softc *sc)
{
CAS_WRITE_4(sc, CAS_MAC_TX_CONF,
CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN);
CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0))
return (1);
if (bootverbose)
device_printf(sc->sc_dev, "cannot disable TX MAC\n");
return (0);
}
static inline void
cas_rxcompinit(struct cas_rx_comp *rxcomp)
{
rxcomp->crc_word1 = 0;
rxcomp->crc_word2 = 0;
rxcomp->crc_word3 =
htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO));
rxcomp->crc_word4 = htole64(CAS_RC4_ZERO);
}
static void
cas_meminit(struct cas_softc *sc)
{
int i;
CAS_LOCK_ASSERT(sc, MA_OWNED);
/*
* Initialize the transmit descriptor ring.
*/
for (i = 0; i < CAS_NTXDESC; i++) {
sc->sc_txdescs[i].cd_flags = 0;
sc->sc_txdescs[i].cd_buf_ptr = 0;
}
sc->sc_txfree = CAS_MAXTXFREE;
sc->sc_txnext = 0;
sc->sc_txwin = 0;
/*
* Initialize the receive completion ring.
*/
for (i = 0; i < CAS_NRXCOMP; i++)
cas_rxcompinit(&sc->sc_rxcomps[i]);
sc->sc_rxcptr = 0;
/*
* Initialize the first receive descriptor ring. We leave
* the second one zeroed as we don't actually use it.
*/
for (i = 0; i < CAS_NRXDESC; i++)
CAS_INIT_RXDESC(sc, i, i);
sc->sc_rxdptr = 0;
CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static u_int
cas_descsize(u_int sz)
{
switch (sz) {
case 32:
return (CAS_DESC_32);
case 64:
return (CAS_DESC_64);
case 128:
return (CAS_DESC_128);
case 256:
return (CAS_DESC_256);
case 512:
return (CAS_DESC_512);
case 1024:
return (CAS_DESC_1K);
case 2048:
return (CAS_DESC_2K);
case 4096:
return (CAS_DESC_4K);
case 8192:
return (CAS_DESC_8K);
default:
printf("%s: invalid descriptor ring size %d\n", __func__, sz);
return (CAS_DESC_32);
}
}
static u_int
cas_rxcompsize(u_int sz)
{
switch (sz) {
case 128:
return (CAS_RX_CONF_COMP_128);
case 256:
return (CAS_RX_CONF_COMP_256);
case 512:
return (CAS_RX_CONF_COMP_512);
case 1024:
return (CAS_RX_CONF_COMP_1K);
case 2048:
return (CAS_RX_CONF_COMP_2K);
case 4096:
return (CAS_RX_CONF_COMP_4K);
case 8192:
return (CAS_RX_CONF_COMP_8K);
case 16384:
return (CAS_RX_CONF_COMP_16K);
case 32768:
return (CAS_RX_CONF_COMP_32K);
default:
printf("%s: invalid dcompletion ring size %d\n", __func__, sz);
return (CAS_RX_CONF_COMP_128);
}
}
static void
cas_init(void *xsc)
{
struct cas_softc *sc = xsc;
CAS_LOCK(sc);
cas_init_locked(sc);
CAS_UNLOCK(sc);
}
/*
* Initialization of interface; set up initialization block
* and transmit/receive descriptor rings.
*/
static void
cas_init_locked(struct cas_softc *sc)
{
if_t ifp = sc->sc_ifp;
uint32_t v;
CAS_LOCK_ASSERT(sc, MA_OWNED);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
#ifdef CAS_DEBUG
CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev),
__func__);
#endif
/*
* Initialization sequence. The numbered steps below correspond
* to the sequence outlined in section 6.3.5.1 in the Ethernet
* Channel Engine manual (part of the PCIO manual).
* See also the STP2002-STQ document from Sun Microsystems.
*/
/* step 1 & 2. Reset the Ethernet Channel. */
cas_stop(ifp);
cas_reset(sc);
#ifdef CAS_DEBUG
CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev),
__func__);
#endif
if ((sc->sc_flags & CAS_SERDES) == 0)
/* Re-initialize the MIF. */
cas_mifinit(sc);
/* step 3. Setup data structures in host memory. */
cas_meminit(sc);
/* step 4. TX MAC registers & counters */
cas_init_regs(sc);
/* step 5. RX MAC registers & counters */
/* step 6 & 7. Program Ring Base Addresses. */
CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI,
(((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32));
CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO,
CAS_CDTXDADDR(sc, 0) & 0xffffffff);
CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI,
(((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32));
CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO,
CAS_CDRXCADDR(sc, 0) & 0xffffffff);
CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI,
(((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32));
CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO,
CAS_CDRXDADDR(sc, 0) & 0xffffffff);
if ((sc->sc_flags & CAS_REG_PLUS) != 0) {
CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI,
(((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32));
CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO,
CAS_CDRXD2ADDR(sc, 0) & 0xffffffff);
}
#ifdef CAS_DEBUG
CTR5(KTR_CAS,
"loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx",
CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0),
CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma);
#endif
/* step 8. Global Configuration & Interrupt Masks */
/* Disable weighted round robin. */
CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS);
/*
* Enable infinite bursts for revisions without PCI issues if
* applicable. Doing so greatly improves the TX performance.
*/
CAS_WRITE_4(sc, CAS_INF_BURST,
(sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN :
0);
/* Set up interrupts. */
CAS_WRITE_4(sc, CAS_INTMASK,
~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR |
CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR |
CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY |
CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH |
CAS_INTR_PCI_ERROR_INT
#ifdef CAS_DEBUG
| CAS_INTR_PCS_INT | CAS_INTR_MIF
#endif
));
/* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */
CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW);
CAS_WRITE_4(sc, CAS_MAC_TX_MASK,
~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR));
#ifdef CAS_DEBUG
CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK,
~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE |
CAS_MAC_CTRL_NON_PAUSE));
#else
CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK,
CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE |
CAS_MAC_CTRL_NON_PAUSE);
#endif
/* Enable PCI error interrupts. */
CAS_WRITE_4(sc, CAS_ERROR_MASK,
~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO |
CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO));
/* Enable PCI error interrupts in BIM configuration. */
CAS_WRITE_4(sc, CAS_BIM_CONF,
CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN);
/*
* step 9. ETX Configuration: encode receive descriptor ring size,
* enable DMA and disable pre-interrupt writeback completion.
*/
v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT;
CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN |
CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS);
/* step 10. ERX Configuration */
/*
* Encode receive completion and descriptor ring sizes, set the
* swivel offset.
*/
v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT;
v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT;
if ((sc->sc_flags & CAS_REG_PLUS) != 0)
v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT;
CAS_WRITE_4(sc, CAS_RX_CONF,
v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT));
/* Set the PAUSE thresholds. We use the maximum OFF threshold. */
CAS_WRITE_4(sc, CAS_RX_PTHRS,
(111 << CAS_RX_PTHRS_XOFF_SHFT) | (15 << CAS_RX_PTHRS_XON_SHFT));
/* RX blanking */
CAS_WRITE_4(sc, CAS_RX_BLANK,
(15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT));
/* Set RX_COMP_AFULL threshold to half of the RX completions. */
CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS,
(CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT);
/* Initialize the RX page size register as appropriate for 8k. */
CAS_WRITE_4(sc, CAS_RX_PSZ,
(CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) |
(4 << CAS_RX_PSZ_MB_CNT_SHFT) |
(CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) |
(CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT));
/* Disable RX random early detection. */
CAS_WRITE_4(sc, CAS_RX_RED, 0);
/* Zero the RX reassembly DMA table. */
for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) {
CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v);
CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0);
CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0);
CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0);
}
/* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */
CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0);
CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0);
/* Finally, enable RX DMA. */
CAS_WRITE_4(sc, CAS_RX_CONF,
CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN);
/* step 11. Configure Media. */
/* step 12. RX_MAC Configuration Register */
v = CAS_READ_4(sc, CAS_MAC_RX_CONF);
v &= ~(CAS_MAC_RX_CONF_STRPPAD | CAS_MAC_RX_CONF_EN);
v |= CAS_MAC_RX_CONF_STRPFCS;
sc->sc_mac_rxcfg = v;
/*
* Clear the RX filter and reprogram it. This will also set the
* current RX MAC configuration and enable it.
*/
cas_setladrf(sc);
/* step 13. TX_MAC Configuration Register */
v = CAS_READ_4(sc, CAS_MAC_TX_CONF);
v |= CAS_MAC_TX_CONF_EN;
(void)cas_disable_tx(sc);
CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v);
/* step 14. Issue Transmit Pending command. */
/* step 15. Give the receiver a swift kick. */
CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4);
CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0);
if ((sc->sc_flags & CAS_REG_PLUS) != 0)
CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
mii_mediachg(sc->sc_mii);
/* Start the one second timer. */
sc->sc_wdog_timer = 0;
callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc);
}
static int
cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head)
{
bus_dma_segment_t txsegs[CAS_NTXSEGS];
struct cas_txsoft *txs;
struct ip *ip;
struct mbuf *m;
uint64_t cflags;
int error, nexttx, nsegs, offset, seg;
CAS_LOCK_ASSERT(sc, MA_OWNED);
/* Get a work queue entry. */
if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
/* Ran out of descriptors. */
return (ENOBUFS);
}
cflags = 0;
if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) {
if (M_WRITABLE(*m_head) == 0) {
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
*m_head = m;
if (m == NULL)
return (ENOBUFS);
}
offset = sizeof(struct ether_header);
m = m_pullup(*m_head, offset + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, caddr_t) + offset);
offset += (ip->ip_hl << 2);
cflags = (offset << CAS_TD_CKSUM_START_SHFT) |
((offset + m->m_pkthdr.csum_data) <<
CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN;
*m_head = m;
}
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, CAS_NTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
txs->txs_dmamap, *m_head, txsegs, &nsegs,
BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
/* If nsegs is wrong then the stack is corrupt. */
KASSERT(nsegs <= CAS_NTXSEGS,
("%s: too many DMA segments (%d)", __func__, nsegs));
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/*
* Ensure we have enough descriptors free to describe
* the packet. Note, we always reserve one descriptor
* at the end of the ring as a termination point, in
* order to prevent wrap-around.
*/
if (nsegs > sc->sc_txfree - 1) {
txs->txs_ndescs = 0;
bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
return (ENOBUFS);
}
txs->txs_ndescs = nsegs;
txs->txs_firstdesc = sc->sc_txnext;
nexttx = txs->txs_firstdesc;
for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) {
#ifdef CAS_DEBUG
CTR6(KTR_CAS,
"%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
__func__, seg, nexttx, txsegs[seg].ds_len,
txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr));
#endif
sc->sc_txdescs[nexttx].cd_buf_ptr =
htole64(txsegs[seg].ds_addr);
KASSERT(txsegs[seg].ds_len <
CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT,
("%s: segment size too large!", __func__));
sc->sc_txdescs[nexttx].cd_flags =
htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT);
txs->txs_lastdesc = nexttx;
}
/* Set EOF on the last descriptor. */
#ifdef CAS_DEBUG
CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d",
__func__, seg, nexttx);
#endif
sc->sc_txdescs[txs->txs_lastdesc].cd_flags |=
htole64(CAS_TD_END_OF_FRAME);
/* Lastly set SOF on the first descriptor. */
#ifdef CAS_DEBUG
CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d",
__func__, seg, nexttx);
#endif
if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) {
sc->sc_txwin = 0;
sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME);
} else
sc->sc_txdescs[txs->txs_firstdesc].cd_flags |=
htole64(cflags | CAS_TD_START_OF_FRAME);
/* Sync the DMA map. */
bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
BUS_DMASYNC_PREWRITE);
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
__func__, txs->txs_firstdesc, txs->txs_lastdesc,
txs->txs_ndescs);
#endif
STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
txs->txs_mbuf = *m_head;
sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc);
sc->sc_txfree -= txs->txs_ndescs;
return (0);
}
static void
cas_init_regs(struct cas_softc *sc)
{
int i;
const u_char *laddr = if_getlladdr(sc->sc_ifp);
CAS_LOCK_ASSERT(sc, MA_OWNED);
/* These registers are not cleared on reset. */
if ((sc->sc_flags & CAS_INITED) == 0) {
/* magic values */
CAS_WRITE_4(sc, CAS_MAC_IPG0, 0);
CAS_WRITE_4(sc, CAS_MAC_IPG1, 8);
CAS_WRITE_4(sc, CAS_MAC_IPG2, 4);
/* min frame length */
CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN);
/* max frame length and max burst size */
CAS_WRITE_4(sc, CAS_MAC_MAX_BF,
((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) <<
CAS_MAC_MAX_BF_FRM_SHFT) |
(0x2000 << CAS_MAC_MAX_BF_BST_SHFT));
/* more magic values */
CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7);
CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4);
CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10);
CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8808);
/* random number seed */
CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED,
((laddr[5] << 8) | laddr[4]) & 0x3ff);
/* secondary MAC addresses: 0:0:0:0:0:0 */
for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41;
i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3)
CAS_WRITE_4(sc, i, 0);
/* MAC control address: 01:80:c2:00:00:01 */
CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001);
CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200);
CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180);
/* MAC filter address: 0:0:0:0:0:0 */
CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0);
CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0);
CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0);
CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0);
CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0);
/* Zero the hash table. */
for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15;
i += CAS_MAC_HASH1 - CAS_MAC_HASH0)
CAS_WRITE_4(sc, i, 0);
sc->sc_flags |= CAS_INITED;
}
/* Counters need to be zeroed. */
CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0);
CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0);
/* Set XOFF PAUSE time. */
CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT);
/* Set the station address. */
CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
/* Enable MII outputs. */
CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE);
}
static void
cas_tx_task(void *arg, int pending __unused)
{
if_t ifp;
ifp = (if_t)arg;
cas_start(ifp);
}
static inline void
cas_txkick(struct cas_softc *sc)
{
/*
* Update the TX kick register. This register has to point to the
* descriptor after the last valid one and for optimum performance
* should be incremented in multiples of 4 (the DMA engine fetches/
* updates descriptors in batches of 4).
*/
#ifdef CAS_DEBUG
CTR3(KTR_CAS, "%s: %s: kicking TX %d",
device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
#endif
CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext);
}
static void
cas_start(if_t ifp)
{
struct cas_softc *sc = if_getsoftc(ifp);
struct mbuf *m;
int kicked, ntx;
CAS_LOCK(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) {
CAS_UNLOCK(sc);
return;
}
if (sc->sc_txfree < CAS_MAXTXFREE / 4)
cas_tint(sc);
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d",
device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
sc->sc_txnext);
#endif
ntx = 0;
kicked = 0;
for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) {
m = if_dequeue(ifp);
if (m == NULL)
break;
if (cas_load_txmbuf(sc, &m) != 0) {
if (m == NULL)
break;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m);
break;
}
if ((sc->sc_txnext % 4) == 0) {
cas_txkick(sc);
kicked = 1;
} else
kicked = 0;
ntx++;
BPF_MTAP(ifp, m);
}
if (ntx > 0) {
if (kicked == 0)
cas_txkick(sc);
#ifdef CAS_DEBUG
CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d",
device_get_name(sc->sc_dev), sc->sc_txnext);
#endif
/* Set a watchdog timer in case the chip flakes out. */
sc->sc_wdog_timer = 5;
#ifdef CAS_DEBUG
CTR3(KTR_CAS, "%s: %s: watchdog %d",
device_get_name(sc->sc_dev), __func__,
sc->sc_wdog_timer);
#endif
}
CAS_UNLOCK(sc);
}
static void
cas_tint(struct cas_softc *sc)
{
if_t ifp = sc->sc_ifp;
struct cas_txsoft *txs;
int progress;
uint32_t txlast;
#ifdef CAS_DEBUG
int i;
CAS_LOCK_ASSERT(sc, MA_OWNED);
CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
/*
* Go through our TX list and free mbufs for those
* frames that have been transmitted.
*/
progress = 0;
CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
#ifdef CAS_DEBUG
if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
printf(" txsoft %p transmit chain:\n", txs);
for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) {
printf("descriptor %d: ", i);
printf("cd_flags: 0x%016llx\t",
(long long)le64toh(
sc->sc_txdescs[i].cd_flags));
printf("cd_buf_ptr: 0x%016llx\n",
(long long)le64toh(
sc->sc_txdescs[i].cd_buf_ptr));
if (i == txs->txs_lastdesc)
break;
}
}
#endif
/*
* In theory, we could harvest some descriptors before
* the ring is empty, but that's a bit complicated.
*
* CAS_TX_COMPn points to the last descriptor
* processed + 1.
*/
txlast = CAS_READ_4(sc, CAS_TX_COMP3);
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, "
"txs->txs_lastdesc = %d, txlast = %d",
__func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
#endif
if (txs->txs_firstdesc <= txs->txs_lastdesc) {
if ((txlast >= txs->txs_firstdesc) &&
(txlast <= txs->txs_lastdesc))
break;
} else {
/* Ick -- this command wraps. */
if ((txlast >= txs->txs_firstdesc) ||
(txlast <= txs->txs_lastdesc))
break;
}
#ifdef CAS_DEBUG
CTR1(KTR_CAS, "%s: releasing a descriptor", __func__);
#endif
STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
sc->sc_txfree += txs->txs_ndescs;
bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
if (txs->txs_mbuf != NULL) {
m_freem(txs->txs_mbuf);
txs->txs_mbuf = NULL;
}
STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
progress = 1;
}
#ifdef CAS_DEBUG
CTR5(KTR_CAS, "%s: CAS_TX_SM1 %x CAS_TX_SM2 %x CAS_TX_DESC_BASE %llx "
"CAS_TX_COMP3 %x",
__func__, CAS_READ_4(sc, CAS_TX_SM1), CAS_READ_4(sc, CAS_TX_SM2),
((long long)CAS_READ_4(sc, CAS_TX_DESC3_BASE_HI) << 32) |
CAS_READ_4(sc, CAS_TX_DESC3_BASE_LO),
CAS_READ_4(sc, CAS_TX_COMP3));
#endif
if (progress) {
/* We freed some descriptors, so reset IFF_DRV_OACTIVE. */
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (STAILQ_EMPTY(&sc->sc_txdirtyq))
sc->sc_wdog_timer = 0;
}
#ifdef CAS_DEBUG
CTR3(KTR_CAS, "%s: %s: watchdog %d",
device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
#endif
}
static void
cas_rint_timeout(void *arg)
{
struct epoch_tracker et;
struct cas_softc *sc = arg;
CAS_LOCK_ASSERT(sc, MA_OWNED);
NET_EPOCH_ENTER(et);
cas_rint(sc);
NET_EPOCH_EXIT(et);
}
static void
cas_rint(struct cas_softc *sc)
{
struct cas_rxdsoft *rxds, *rxds2;
if_t ifp = sc->sc_ifp;
struct mbuf *m, *m2;
uint64_t word1, word2, word3 __unused, word4;
uint32_t rxhead;
u_int idx, idx2, len, off, skip;
CAS_LOCK_ASSERT(sc, MA_OWNED);
callout_stop(&sc->sc_rx_ch);
#ifdef CAS_DEBUG
CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
#define PRINTWORD(n, delimiter) \
printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter)
#define SKIPASSERT(n) \
KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \
("%s: word ## n not 0", __func__))
#define WORDTOH(n) \
word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n)
/*
* Read the completion head register once. This limits
* how long the following loop can execute.
*/
rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD);
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
__func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead);
#endif
skip = 0;
CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (; sc->sc_rxcptr != rxhead;
sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) {
if (skip != 0) {
SKIPASSERT(1);
SKIPASSERT(2);
SKIPASSERT(3);
--skip;
goto skip;
}
WORDTOH(1);
WORDTOH(2);
WORDTOH(3);
WORDTOH(4);
#ifdef CAS_DEBUG
if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
printf(" completion %d: ", sc->sc_rxcptr);
PRINTWORD(1, '\t');
PRINTWORD(2, '\t');
PRINTWORD(3, '\t');
PRINTWORD(4, '\n');
}
#endif
if (__predict_false(
(word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW ||
(word4 & CAS_RC4_ZERO) != 0)) {
/*
* The descriptor is still marked as owned, although
* it is supposed to have completed. This has been
* observed on some machines. Just exiting here
* might leave the packet sitting around until another
* one arrives to trigger a new interrupt, which is
* generally undesirable, so set up a timeout.
*/
callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS,
cas_rint_timeout, sc);
break;
}
if (__predict_false(
(word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
device_printf(sc->sc_dev,
"receive error: CRC error\n");
continue;
}
KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 ||
CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0,
("%s: data and header present", __func__));
KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 ||
CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0,
("%s: split and header present", __func__));
KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 ||
(word1 & CAS_RC1_RELEASE_HDR) == 0,
("%s: data present but header release", __func__));
KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 ||
(word1 & CAS_RC1_RELEASE_DATA) == 0,
("%s: header present but data release", __func__));
if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) {
idx = CAS_GET(word2, CAS_RC2_HDR_INDEX);
off = CAS_GET(word2, CAS_RC2_HDR_OFF);
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d",
__func__, idx, off, len);
#endif
rxds = &sc->sc_rxdsoft[idx];
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m != NULL) {
refcount_acquire(&rxds->rxds_refcount);
bus_dmamap_sync(sc->sc_rdmatag,
rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
m_extadd(m, (char *)rxds->rxds_buf +
off * 256 + ETHER_ALIGN, len, cas_free,
sc, (void *)(uintptr_t)idx,
M_RDONLY, EXT_NET_DRV);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
m = NULL;
}
}
if (m != NULL) {
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
cas_rxcksum(m, CAS_GET(word4,
CAS_RC4_TCP_CSUM));
/* Pass it on. */
CAS_UNLOCK(sc);
if_input(ifp, m);
CAS_LOCK(sc);
} else
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
if ((word1 & CAS_RC1_RELEASE_HDR) != 0 &&
refcount_release(&rxds->rxds_refcount) != 0)
cas_add_rxdesc(sc, idx);
} else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) {
idx = CAS_GET(word1, CAS_RC1_DATA_INDEX);
off = CAS_GET(word1, CAS_RC1_DATA_OFF);
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d",
__func__, idx, off, len);
#endif
rxds = &sc->sc_rxdsoft[idx];
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m != NULL) {
refcount_acquire(&rxds->rxds_refcount);
off += ETHER_ALIGN;
m->m_len = min(CAS_PAGE_SIZE - off, len);
bus_dmamap_sync(sc->sc_rdmatag,
rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD);
m_extadd(m, (char *)rxds->rxds_buf + off,
m->m_len, cas_free, sc,
(void *)(uintptr_t)idx, M_RDONLY,
EXT_NET_DRV);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
m = NULL;
}
}
idx2 = 0;
m2 = NULL;
rxds2 = NULL;
if ((word1 & CAS_RC1_SPLIT_PKT) != 0) {
KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0,
("%s: split but no release next",
__func__));
idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX);
#ifdef CAS_DEBUG
CTR2(KTR_CAS, "%s: split at idx %d",
__func__, idx2);
#endif
rxds2 = &sc->sc_rxdsoft[idx2];
if (m != NULL) {
MGET(m2, M_NOWAIT, MT_DATA);
if (m2 != NULL) {
refcount_acquire(
&rxds2->rxds_refcount);
m2->m_len = len - m->m_len;
bus_dmamap_sync(
sc->sc_rdmatag,
rxds2->rxds_dmamap,
BUS_DMASYNC_POSTREAD);
m_extadd(m2,
(char *)rxds2->rxds_buf,
m2->m_len, cas_free, sc,
(void *)(uintptr_t)idx2,
M_RDONLY, EXT_NET_DRV);
if ((m2->m_flags & M_EXT) ==
0) {
m_freem(m2);
m2 = NULL;
}
}
}
if (m2 != NULL)
m->m_next = m2;
else if (m != NULL) {
m_freem(m);
m = NULL;
}
}
if (m != NULL) {
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = len;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
cas_rxcksum(m, CAS_GET(word4,
CAS_RC4_TCP_CSUM));
/* Pass it on. */
CAS_UNLOCK(sc);
if_input(ifp, m);
CAS_LOCK(sc);
} else
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
if ((word1 & CAS_RC1_RELEASE_DATA) != 0 &&
refcount_release(&rxds->rxds_refcount) != 0)
cas_add_rxdesc(sc, idx);
if ((word1 & CAS_RC1_SPLIT_PKT) != 0 &&
refcount_release(&rxds2->rxds_refcount) != 0)
cas_add_rxdesc(sc, idx2);
}
skip = CAS_GET(word1, CAS_RC1_SKIP);
skip:
cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
}
CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr);
#undef PRINTWORD
#undef SKIPASSERT
#undef WORDTOH
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d",
__func__, sc->sc_rxcptr, sc->sc_rxdptr,
CAS_READ_4(sc, CAS_RX_COMP_HEAD));
#endif
}
static void
cas_free(struct mbuf *m)
{
struct cas_rxdsoft *rxds;
struct cas_softc *sc;
u_int idx, locked;
sc = m->m_ext.ext_arg1;
idx = (uintptr_t)m->m_ext.ext_arg2;
rxds = &sc->sc_rxdsoft[idx];
if (refcount_release(&rxds->rxds_refcount) == 0)
return;
/*
* NB: this function can be called via m_freem(9) within
* this driver!
*/
if ((locked = CAS_LOCK_OWNED(sc)) == 0)
CAS_LOCK(sc);
cas_add_rxdesc(sc, idx);
if (locked == 0)
CAS_UNLOCK(sc);
}
static inline void
cas_add_rxdesc(struct cas_softc *sc, u_int idx)
{
CAS_LOCK_ASSERT(sc, MA_OWNED);
bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap,
BUS_DMASYNC_PREREAD);
CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx);
sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr);
/*
* Update the RX kick register. This register has to point to the
* descriptor after the last valid one (before the current batch)
* and for optimum performance should be incremented in multiples
* of 4 (the DMA engine fetches/updates descriptors in batches of 4).
*/
if ((sc->sc_rxdptr % 4) == 0) {
CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CAS_WRITE_4(sc, CAS_RX_KICK,
(sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK);
}
}
static void
cas_eint(struct cas_softc *sc, u_int status)
{
if_t ifp = sc->sc_ifp;
CAS_LOCK_ASSERT(sc, MA_OWNED);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
if ((status & CAS_INTR_PCI_ERROR_INT) != 0) {
status = CAS_READ_4(sc, CAS_ERROR_STATUS);
printf(", PCI bus error 0x%x", status);
if ((status & CAS_ERROR_OTHER) != 0) {
status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2);
printf(", PCI status 0x%x", status);
pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2);
}
}
printf("\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
cas_init_locked(sc);
if (!if_sendq_empty(ifp))
taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
}
static int
cas_intr(void *v)
{
struct cas_softc *sc = v;
if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) &
CAS_INTR_SUMMARY) == 0))
return (FILTER_STRAY);
/* Disable interrupts. */
CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff);
taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
return (FILTER_HANDLED);
}
static void
cas_intr_task(void *arg, int pending __unused)
{
struct cas_softc *sc = arg;
if_t ifp = sc->sc_ifp;
uint32_t status, status2;
CAS_LOCK_ASSERT(sc, MA_NOTOWNED);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
status = CAS_READ_4(sc, CAS_STATUS);
if (__predict_false((status & CAS_INTR_SUMMARY) == 0))
goto done;
CAS_LOCK(sc);
#ifdef CAS_DEBUG
CTR4(KTR_CAS, "%s: %s: cplt %x, status %x",
device_get_name(sc->sc_dev), __func__,
(status >> CAS_STATUS_TX_COMP3_SHFT), (u_int)status);
/*
* PCS interrupts must be cleared, otherwise no traffic is passed!
*/
if ((status & CAS_INTR_PCS_INT) != 0) {
status2 =
CAS_READ_4(sc, CAS_PCS_INTR_STATUS) |
CAS_READ_4(sc, CAS_PCS_INTR_STATUS);
if ((status2 & CAS_PCS_INTR_LINK) != 0)
device_printf(sc->sc_dev,
"%s: PCS link status changed\n", __func__);
}
if ((status & CAS_MAC_CTRL_STATUS) != 0) {
status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS);
if ((status2 & CAS_MAC_CTRL_PAUSE) != 0)
device_printf(sc->sc_dev,
"%s: PAUSE received (PAUSE time %d slots)\n",
__func__,
(status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >>
CAS_MAC_CTRL_STATUS_PT_SHFT);
if ((status2 & CAS_MAC_CTRL_PAUSE) != 0)
device_printf(sc->sc_dev,
"%s: transited to PAUSE state\n", __func__);
if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0)
device_printf(sc->sc_dev,
"%s: transited to non-PAUSE state\n", __func__);
}
if ((status & CAS_INTR_MIF) != 0)
device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
#endif
if (__predict_false((status &
(CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR |
CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) {
cas_eint(sc, status);
CAS_UNLOCK(sc);
return;
}
if (__predict_false(status & CAS_INTR_TX_MAC_INT)) {
status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS);
if ((status2 &
(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0)
device_printf(sc->sc_dev,
"MAC TX fault, status %x\n", status2);
}
if (__predict_false(status & CAS_INTR_RX_MAC_INT)) {
status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS);
if ((status2 & CAS_MAC_RX_OVERFLOW) != 0)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0)
device_printf(sc->sc_dev,
"MAC RX fault, status %x\n", status2);
}
if ((status &
(CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) {
cas_rint(sc);
#ifdef CAS_DEBUG
if (__predict_false((status &
(CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL |
CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0))
device_printf(sc->sc_dev,
"RX fault, status %x\n", status);
#endif
}
if ((status &
(CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0)
cas_tint(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
CAS_UNLOCK(sc);
return;
} else if (!if_sendq_empty(ifp))
taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
CAS_UNLOCK(sc);
status = CAS_READ_4(sc, CAS_STATUS_ALIAS);
if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) {
taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
return;
}
done:
/* Re-enable interrupts. */
CAS_WRITE_4(sc, CAS_INTMASK,
~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR |
CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR |
CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY |
CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH |
CAS_INTR_PCI_ERROR_INT
#ifdef CAS_DEBUG
| CAS_INTR_PCS_INT | CAS_INTR_MIF
#endif
));
}
static void
cas_watchdog(struct cas_softc *sc)
{
if_t ifp = sc->sc_ifp;
CAS_LOCK_ASSERT(sc, MA_OWNED);
#ifdef CAS_DEBUG
CTR4(KTR_CAS,
"%s: CAS_RX_CONF %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONF %x",
__func__, CAS_READ_4(sc, CAS_RX_CONF),
CAS_READ_4(sc, CAS_MAC_RX_STATUS),
CAS_READ_4(sc, CAS_MAC_RX_CONF));
CTR4(KTR_CAS,
"%s: CAS_TX_CONF %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONF %x",
__func__, CAS_READ_4(sc, CAS_TX_CONF),
CAS_READ_4(sc, CAS_MAC_TX_STATUS),
CAS_READ_4(sc, CAS_MAC_TX_CONF));
#endif
if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
return;
if ((sc->sc_flags & CAS_LINK) != 0)
device_printf(sc->sc_dev, "device timeout\n");
else if (bootverbose)
device_printf(sc->sc_dev, "device timeout (no link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
/* Try to get more packets going. */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
cas_init_locked(sc);
if (!if_sendq_empty(ifp))
taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
}
static void
cas_mifinit(struct cas_softc *sc)
{
/* Configure the MIF in frame mode. */
CAS_WRITE_4(sc, CAS_MIF_CONF,
CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE);
CAS_BARRIER(sc, CAS_MIF_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/*
* MII interface
*
* The MII interface supports at least three different operating modes:
*
* Bitbang mode is implemented using data, clock and output enable registers.
*
* Frame mode is implemented by loading a complete frame into the frame
* register and polling the valid bit for completion.
*
* Polling mode uses the frame register but completion is indicated by
* an interrupt.
*
*/
static int
cas_mii_readreg(device_t dev, int phy, int reg)
{
struct cas_softc *sc;
int n;
uint32_t v;
#ifdef CAS_DEBUG_PHY
printf("%s: phy %d reg %d\n", __func__, phy, reg);
#endif
sc = device_get_softc(dev);
if ((sc->sc_flags & CAS_SERDES) != 0) {
switch (reg) {
case MII_BMCR:
reg = CAS_PCS_CTRL;
break;
case MII_BMSR:
reg = CAS_PCS_STATUS;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
case MII_ANAR:
reg = CAS_PCS_ANAR;
break;
case MII_ANLPAR:
reg = CAS_PCS_ANLPAR;
break;
case MII_EXTSR:
return (EXTSR_1000XFDX | EXTSR_1000XHDX);
default:
device_printf(sc->sc_dev,
"%s: unhandled register %d\n", __func__, reg);
return (0);
}
return (CAS_READ_4(sc, reg));
}
/* Construct the frame command. */
v = CAS_MIF_FRAME_READ |
(phy << CAS_MIF_FRAME_PHY_SHFT) |
(reg << CAS_MIF_FRAME_REG_SHFT);
CAS_WRITE_4(sc, CAS_MIF_FRAME, v);
CAS_BARRIER(sc, CAS_MIF_FRAME, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
for (n = 0; n < 100; n++) {
DELAY(1);
v = CAS_READ_4(sc, CAS_MIF_FRAME);
if (v & CAS_MIF_FRAME_TA_LSB)
return (v & CAS_MIF_FRAME_DATA);
}
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (0);
}
static int
cas_mii_writereg(device_t dev, int phy, int reg, int val)
{
struct cas_softc *sc;
int n;
uint32_t v;
#ifdef CAS_DEBUG_PHY
printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
#endif
sc = device_get_softc(dev);
if ((sc->sc_flags & CAS_SERDES) != 0) {
switch (reg) {
case MII_BMSR:
reg = CAS_PCS_STATUS;
break;
case MII_BMCR:
reg = CAS_PCS_CTRL;
if ((val & CAS_PCS_CTRL_RESET) == 0)
break;
CAS_WRITE_4(sc, CAS_PCS_CTRL, val);
CAS_BARRIER(sc, CAS_PCS_CTRL, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!cas_bitwait(sc, CAS_PCS_CTRL,
CAS_PCS_CTRL_RESET, 0))
device_printf(sc->sc_dev,
"cannot reset PCS\n");
/* FALLTHROUGH */
case MII_ANAR:
CAS_WRITE_4(sc, CAS_PCS_CONF, 0);
CAS_BARRIER(sc, CAS_PCS_CONF, 4,
BUS_SPACE_BARRIER_WRITE);
CAS_WRITE_4(sc, CAS_PCS_ANAR, val);
CAS_BARRIER(sc, CAS_PCS_ANAR, 4,
BUS_SPACE_BARRIER_WRITE);
CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL,
CAS_PCS_SERDES_CTRL_ESD);
CAS_BARRIER(sc, CAS_PCS_CONF, 4,
BUS_SPACE_BARRIER_WRITE);
CAS_WRITE_4(sc, CAS_PCS_CONF,
CAS_PCS_CONF_EN);
CAS_BARRIER(sc, CAS_PCS_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (0);
case MII_ANLPAR:
reg = CAS_PCS_ANLPAR;
break;
default:
device_printf(sc->sc_dev,
"%s: unhandled register %d\n", __func__, reg);
return (0);
}
CAS_WRITE_4(sc, reg, val);
CAS_BARRIER(sc, reg, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (0);
}
/* Construct the frame command. */
v = CAS_MIF_FRAME_WRITE |
(phy << CAS_MIF_FRAME_PHY_SHFT) |
(reg << CAS_MIF_FRAME_REG_SHFT) |
(val & CAS_MIF_FRAME_DATA);
CAS_WRITE_4(sc, CAS_MIF_FRAME, v);
CAS_BARRIER(sc, CAS_MIF_FRAME, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
for (n = 0; n < 100; n++) {
DELAY(1);
v = CAS_READ_4(sc, CAS_MIF_FRAME);
if (v & CAS_MIF_FRAME_TA_LSB)
return (1);
}
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (0);
}
static void
cas_mii_statchg(device_t dev)
{
struct cas_softc *sc;
if_t ifp;
int gigabit;
uint32_t rxcfg, txcfg, v;
sc = device_get_softc(dev);
ifp = sc->sc_ifp;
CAS_LOCK_ASSERT(sc, MA_OWNED);
#ifdef CAS_DEBUG
if ((if_getflags(ifp) & IFF_DEBUG) != 0)
device_printf(sc->sc_dev, "%s: status changen", __func__);
#endif
if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
sc->sc_flags |= CAS_LINK;
else
sc->sc_flags &= ~CAS_LINK;
switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
case IFM_1000_T:
gigabit = 1;
break;
default:
gigabit = 0;
}
/*
* The configuration done here corresponds to the steps F) and
* G) and as far as enabling of RX and TX MAC goes also step H)
* of the initialization sequence outlined in section 11.2.1 of
* the Cassini+ ASIC Specification.
*/
rxcfg = sc->sc_mac_rxcfg;
rxcfg &= ~CAS_MAC_RX_CONF_CARR;
txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU |
CAS_MAC_TX_CONF_NGUL;
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS;
else if (gigabit != 0) {
rxcfg |= CAS_MAC_RX_CONF_CARR;
txcfg |= CAS_MAC_TX_CONF_CARR;
}
(void)cas_disable_tx(sc);
CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg);
(void)cas_disable_rx(sc);
CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg);
v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) &
~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP);
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
v |= CAS_MAC_CTRL_CONF_RXP;
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
IFM_ETH_TXPAUSE) != 0)
v |= CAS_MAC_CTRL_CONF_TXP;
CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v);
/*
* All supported chips have a bug causing incorrect checksum
* to be calculated when letting them strip the FCS in half-
* duplex mode. In theory we could disable FCS stripping and
* manually adjust the checksum accordingly. It seems to make
* more sense to optimze for the common case and just disable
* hardware checksumming in half-duplex mode though.
*/
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) {
if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
if_sethwassist(ifp, 0);
} else if ((sc->sc_flags & CAS_NO_CSUM) == 0) {
if_setcapenable(ifp, if_getcapabilities(ifp));
if_sethwassist(ifp, CAS_CSUM_FEATURES);
}
if (sc->sc_variant == CAS_SATURN) {
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
/* silicon bug workaround */
CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41);
else
CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7);
}
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
gigabit != 0)
CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME,
CAS_MAC_SLOT_TIME_CARR);
else
CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME,
CAS_MAC_SLOT_TIME_NORM);
/* XIF Configuration */
v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED;
if ((sc->sc_flags & CAS_SERDES) == 0) {
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
v |= CAS_MAC_XIF_CONF_NOECHO;
v |= CAS_MAC_XIF_CONF_BUF_OE;
}
if (gigabit != 0)
v |= CAS_MAC_XIF_CONF_GMII;
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
v |= CAS_MAC_XIF_CONF_FDXLED;
CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v);
sc->sc_mac_rxcfg = rxcfg;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
(sc->sc_flags & CAS_LINK) != 0) {
CAS_WRITE_4(sc, CAS_MAC_TX_CONF,
txcfg | CAS_MAC_TX_CONF_EN);
CAS_WRITE_4(sc, CAS_MAC_RX_CONF,
rxcfg | CAS_MAC_RX_CONF_EN);
}
}
static int
cas_mediachange(if_t ifp)
{
struct cas_softc *sc = if_getsoftc(ifp);
int error;
/* XXX add support for serial media. */
CAS_LOCK(sc);
error = mii_mediachg(sc->sc_mii);
CAS_UNLOCK(sc);
return (error);
}
static void
cas_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct cas_softc *sc = if_getsoftc(ifp);
CAS_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
CAS_UNLOCK(sc);
return;
}
mii_pollstat(sc->sc_mii);
ifmr->ifm_active = sc->sc_mii->mii_media_active;
ifmr->ifm_status = sc->sc_mii->mii_media_status;
CAS_UNLOCK(sc);
}
static int
cas_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct cas_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int error;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
CAS_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->sc_ifflags) &
(IFF_ALLMULTI | IFF_PROMISC)) != 0)
cas_setladrf(sc);
else
cas_init_locked(sc);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
cas_stop(ifp);
sc->sc_ifflags = if_getflags(ifp);
CAS_UNLOCK(sc);
break;
case SIOCSIFCAP:
CAS_LOCK(sc);
if ((sc->sc_flags & CAS_NO_CSUM) != 0) {
error = EINVAL;
CAS_UNLOCK(sc);
break;
}
if_setcapenable(ifp, ifr->ifr_reqcap);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassist(ifp, CAS_CSUM_FEATURES);
else
if_sethwassist(ifp, 0);
CAS_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
CAS_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
cas_setladrf(sc);
CAS_UNLOCK(sc);
break;
case SIOCSIFMTU:
if ((ifr->ifr_mtu < ETHERMIN) ||
(ifr->ifr_mtu > ETHERMTU_JUMBO))
error = EINVAL;
else
if_setmtu(ifp, ifr->ifr_mtu);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static u_int
cas_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *hash = arg;
crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
/* We just want the 8 most significant bits. */
crc >>= 24;
/* Set the corresponding bit in the filter. */
hash[crc >> 4] |= 1 << (15 - (crc & 15));
return (1);
}
static void
cas_setladrf(struct cas_softc *sc)
{
if_t ifp = sc->sc_ifp;
int i;
uint32_t hash[16];
uint32_t v;
CAS_LOCK_ASSERT(sc, MA_OWNED);
/*
* Turn off the RX MAC and the hash filter as required by the Sun
* Cassini programming restrictions.
*/
v = sc->sc_mac_rxcfg & ~(CAS_MAC_RX_CONF_HFILTER |
CAS_MAC_RX_CONF_EN);
CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v);
CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER |
CAS_MAC_RX_CONF_EN, 0))
device_printf(sc->sc_dev,
"cannot disable RX MAC or hash filter\n");
v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_PGRP);
if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
v |= CAS_MAC_RX_CONF_PROMISC;
goto chipit;
}
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
v |= CAS_MAC_RX_CONF_PGRP;
goto chipit;
}
/*
* Set up multicast address filter by passing all multicast
* addresses through a crc generator, and then using the high
* order 8 bits as an index into the 256 bit logical address
* filter. The high order 4 bits selects the word, while the
* other 4 bits select the bit within the word (where bit 0
* is the MSB).
*/
memset(hash, 0, sizeof(hash));
if_foreach_llmaddr(ifp, cas_hash_maddr, &hash);
v |= CAS_MAC_RX_CONF_HFILTER;
/* Now load the hash table into the chip (if we are using it). */
for (i = 0; i < 16; i++)
CAS_WRITE_4(sc,
CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
hash[i]);
chipit:
sc->sc_mac_rxcfg = v;
CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v | CAS_MAC_RX_CONF_EN);
}
static int cas_pci_attach(device_t dev);
static int cas_pci_detach(device_t dev);
static int cas_pci_probe(device_t dev);
static int cas_pci_resume(device_t dev);
static int cas_pci_suspend(device_t dev);
static device_method_t cas_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cas_pci_probe),
DEVMETHOD(device_attach, cas_pci_attach),
DEVMETHOD(device_detach, cas_pci_detach),
DEVMETHOD(device_suspend, cas_pci_suspend),
DEVMETHOD(device_resume, cas_pci_resume),
/* Use the suspend handler here, it is all that is required. */
DEVMETHOD(device_shutdown, cas_pci_suspend),
/* MII interface */
DEVMETHOD(miibus_readreg, cas_mii_readreg),
DEVMETHOD(miibus_writereg, cas_mii_writereg),
DEVMETHOD(miibus_statchg, cas_mii_statchg),
DEVMETHOD_END
};
static driver_t cas_pci_driver = {
"cas",
cas_pci_methods,
sizeof(struct cas_softc)
};
static const struct cas_pci_dev {
uint32_t cpd_devid;
uint8_t cpd_revid;
int cpd_variant;
const char *cpd_desc;
} cas_pci_devlist[] = {
{ 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" },
{ 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" },
{ 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" },
{ 0, 0, 0, NULL }
};
DRIVER_MODULE(cas, pci, cas_pci_driver, 0, 0);
MODULE_PNP_INFO("W32:vendor/device", pci, cas, cas_pci_devlist,
nitems(cas_pci_devlist) - 1);
DRIVER_MODULE(miibus, cas, miibus_driver, 0, 0);
MODULE_DEPEND(cas, pci, 1, 1, 1);
static int
cas_pci_probe(device_t dev)
{
int i;
for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) {
if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid &&
pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) {
device_set_desc(dev, cas_pci_devlist[i].cpd_desc);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static struct resource_spec cas_pci_res_spec[] = {
{ SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* CAS_RES_INTR */
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* CAS_RES_MEM */
{ -1, 0 }
};
#define CAS_LOCAL_MAC_ADDRESS "local-mac-address"
#define CAS_PHY_INTERFACE "phy-interface"
#define CAS_PHY_TYPE "phy-type"
#define CAS_PHY_TYPE_PCS "pcs"
static int
cas_pci_attach(device_t dev)
{
char buf[sizeof(CAS_LOCAL_MAC_ADDRESS)];
struct cas_softc *sc;
int i;
#if !defined(__powerpc__)
u_char enaddr[4][ETHER_ADDR_LEN];
u_int j, k, lma, pcs[4], phy;
#endif
sc = device_get_softc(dev);
sc->sc_variant = CAS_UNKNOWN;
for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) {
if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid &&
pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) {
sc->sc_variant = cas_pci_devlist[i].cpd_variant;
break;
}
}
if (sc->sc_variant == CAS_UNKNOWN) {
device_printf(dev, "unknown adaptor\n");
return (ENXIO);
}
/* PCI configuration */
pci_write_config(dev, PCIR_COMMAND,
pci_read_config(dev, PCIR_COMMAND, 2) | PCIM_CMD_BUSMASTEREN |
PCIM_CMD_MWRICEN | PCIM_CMD_PERRESPEN | PCIM_CMD_SERRESPEN, 2);
sc->sc_dev = dev;
if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02)
/* Hardware checksumming may hang TX. */
sc->sc_flags |= CAS_NO_CSUM;
if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN)
sc->sc_flags |= CAS_REG_PLUS;
if (sc->sc_variant == CAS_CAS ||
(sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11))
sc->sc_flags |= CAS_TABORT;
if (bootverbose)
device_printf(dev, "flags=0x%x\n", sc->sc_flags);
if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) {
device_printf(dev, "failed to allocate resources\n");
bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
return (ENXIO);
}
CAS_LOCK_INIT(sc, device_get_nameunit(dev));
#if defined(__powerpc__)
OF_getetheraddr(dev, sc->sc_enaddr);
if (OF_getprop(ofw_bus_get_node(dev), CAS_PHY_INTERFACE, buf,
sizeof(buf)) > 0 || OF_getprop(ofw_bus_get_node(dev),
CAS_PHY_TYPE, buf, sizeof(buf)) > 0) {
buf[sizeof(buf) - 1] = '\0';
if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0)
sc->sc_flags |= CAS_SERDES;
}
#else
/*
* Dig out VPD (vital product data) and read the MAC address as well
* as the PHY type. The VPD resides in the PCI Expansion ROM (PCI
* FCode) and can't be accessed via the PCI capability pointer.
* SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described
* in the free US Patent 7149820.
*/
#define PCI_ROMHDR_SIZE 0x1c
#define PCI_ROMHDR_SIG 0x00
#define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */
#define PCI_ROMHDR_PTR_DATA 0x18
#define PCI_ROM_SIZE 0x18
#define PCI_ROM_SIG 0x00
#define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */
/* reversed */
#define PCI_ROM_VENDOR 0x04
#define PCI_ROM_DEVICE 0x06
#define PCI_ROM_PTR_VPD 0x08
#define PCI_VPDRES_BYTE0 0x00
#define PCI_VPDRES_ISLARGE(x) ((x) & 0x80)
#define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f)
#define PCI_VPDRES_LARGE_LEN_LSB 0x01
#define PCI_VPDRES_LARGE_LEN_MSB 0x02
#define PCI_VPDRES_LARGE_SIZE 0x03
#define PCI_VPDRES_TYPE_ID_STRING 0x02 /* large */
#define PCI_VPDRES_TYPE_VPD 0x10 /* large */
#define PCI_VPD_KEY0 0x00
#define PCI_VPD_KEY1 0x01
#define PCI_VPD_LEN 0x02
#define PCI_VPD_SIZE 0x03
#define CAS_ROM_READ_1(sc, offs) \
CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs))
#define CAS_ROM_READ_2(sc, offs) \
CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs))
#define CAS_ROM_READ_4(sc, offs) \
CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs))
lma = phy = 0;
memset(enaddr, 0, sizeof(enaddr));
memset(pcs, 0, sizeof(pcs));
/* Enable PCI Expansion ROM access. */
CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN,
CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM);
/* Read PCI Expansion ROM header. */
if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC ||
(i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) <
PCI_ROMHDR_SIZE) {
device_printf(dev, "unexpected PCI Expansion ROM header\n");
goto fail_prom;
}
/* Read PCI Expansion ROM data. */
if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC ||
CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) ||
CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) ||
(j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) <
i + PCI_ROM_SIZE) {
device_printf(dev, "unexpected PCI Expansion ROM data\n");
goto fail_prom;
}
/* Read PCI VPD. */
next:
if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc,
j + PCI_VPDRES_BYTE0)) == 0) {
device_printf(dev, "no large PCI VPD\n");
goto fail_prom;
}
i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) |
CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB);
switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc,
j + PCI_VPDRES_BYTE0))) {
case PCI_VPDRES_TYPE_ID_STRING:
/* Skip identifier string. */
j += PCI_VPDRES_LARGE_SIZE + i;
goto next;
case PCI_VPDRES_TYPE_VPD:
for (j += PCI_VPDRES_LARGE_SIZE; i > 0;
i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN),
j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) {
if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z')
/* no Enhanced VPD */
continue;
if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I')
/* no instance property */
continue;
if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'B') {
/* byte array */
if (CAS_ROM_READ_1(sc,
j + PCI_VPD_SIZE + 4) != ETHER_ADDR_LEN)
continue;
bus_read_region_1(sc->sc_res[CAS_RES_MEM],
CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5,
buf, sizeof(buf));
buf[sizeof(buf) - 1] = '\0';
if (strcmp(buf, CAS_LOCAL_MAC_ADDRESS) != 0)
continue;
bus_read_region_1(sc->sc_res[CAS_RES_MEM],
CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE +
5 + sizeof(CAS_LOCAL_MAC_ADDRESS),
enaddr[lma], sizeof(enaddr[lma]));
lma++;
if (lma == 4 && phy == 4)
break;
} else if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) ==
'S') {
/* string */
if (CAS_ROM_READ_1(sc,
j + PCI_VPD_SIZE + 4) !=
sizeof(CAS_PHY_TYPE_PCS))
continue;
bus_read_region_1(sc->sc_res[CAS_RES_MEM],
CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5,
buf, sizeof(buf));
buf[sizeof(buf) - 1] = '\0';
if (strcmp(buf, CAS_PHY_INTERFACE) == 0)
k = sizeof(CAS_PHY_INTERFACE);
else if (strcmp(buf, CAS_PHY_TYPE) == 0)
k = sizeof(CAS_PHY_TYPE);
else
continue;
bus_read_region_1(sc->sc_res[CAS_RES_MEM],
CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE +
5 + k, buf, sizeof(buf));
buf[sizeof(buf) - 1] = '\0';
if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0)
pcs[phy] = 1;
phy++;
if (lma == 4 && phy == 4)
break;
}
}
break;
default:
device_printf(dev, "unexpected PCI VPD\n");
goto fail_prom;
}
fail_prom:
CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0);
if (lma == 0) {
device_printf(dev, "could not determine Ethernet address\n");
goto fail;
}
i = 0;
if (lma > 1 && pci_get_slot(dev) < nitems(enaddr))
i = pci_get_slot(dev);
memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN);
if (phy == 0) {
device_printf(dev, "could not determine PHY type\n");
goto fail;
}
i = 0;
if (phy > 1 && pci_get_slot(dev) < nitems(pcs))
i = pci_get_slot(dev);
if (pcs[i] != 0)
sc->sc_flags |= CAS_SERDES;
#endif
if (cas_attach(sc) != 0) {
device_printf(dev, "could not be attached\n");
goto fail;
}
if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET |
INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) {
device_printf(dev, "failed to set up interrupt\n");
cas_detach(sc);
goto fail;
}
return (0);
fail:
CAS_LOCK_DESTROY(sc);
bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
return (ENXIO);
}
static int
cas_pci_detach(device_t dev)
{
struct cas_softc *sc;
sc = device_get_softc(dev);
bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih);
cas_detach(sc);
CAS_LOCK_DESTROY(sc);
bus_release_resources(dev, cas_pci_res_spec, sc->sc_res);
return (0);
}
static int
cas_pci_suspend(device_t dev)
{
cas_suspend(device_get_softc(dev));
return (0);
}
static int
cas_pci_resume(device_t dev)
{
cas_resume(device_get_softc(dev));
return (0);
}
diff --git a/sys/dev/cxgb/cxgb_main.c b/sys/dev/cxgb/cxgb_main.c
index c5fc067f8312..9bcdb86312c4 100644
--- a/sys/dev/cxgb/cxgb_main.c
+++ b/sys/dev/cxgb/cxgb_main.c
@@ -1,3656 +1,3651 @@
/**************************************************************************
SPDX-License-Identifier: BSD-2-Clause
Copyright (c) 2007-2009, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/module.h>
#include <sys/pciio.h>
#include <sys/conf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/ktr.h>
#include <sys/rman.h>
#include <sys/ioccom.h>
#include <sys/mbuf.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <sys/proc.h>
#include <net/bpf.h>
#include <net/debugnet.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pci_private.h>
#include <cxgb_include.h>
#ifdef PRIV_SUPPORTED
#include <sys/priv.h>
#endif
static int cxgb_setup_interrupts(adapter_t *);
static void cxgb_teardown_interrupts(adapter_t *);
static void cxgb_init(void *);
static int cxgb_init_locked(struct port_info *);
static int cxgb_uninit_locked(struct port_info *);
static int cxgb_uninit_synchronized(struct port_info *);
static int cxgb_ioctl(if_t, unsigned long, caddr_t);
static int cxgb_media_change(if_t);
static int cxgb_ifm_type(int);
static void cxgb_build_medialist(struct port_info *);
static void cxgb_media_status(if_t, struct ifmediareq *);
static uint64_t cxgb_get_counter(if_t, ift_counter);
static int setup_sge_qsets(adapter_t *);
static void cxgb_async_intr(void *);
static void cxgb_tick_handler(void *, int);
static void cxgb_tick(void *);
static void link_check_callout(void *);
static void check_link_status(void *, int);
static void setup_rss(adapter_t *sc);
static int alloc_filters(struct adapter *);
static int setup_hw_filters(struct adapter *);
static int set_filter(struct adapter *, int, const struct filter_info *);
static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
unsigned int, u64, u64);
static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
unsigned int, u64, u64);
#ifdef TCP_OFFLOAD
static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *);
#endif
/* Attachment glue for the PCI controller end of the device. Each port of
* the device is attached separately, as defined later.
*/
static int cxgb_controller_probe(device_t);
static int cxgb_controller_attach(device_t);
static int cxgb_controller_detach(device_t);
static void cxgb_free(struct adapter *);
static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
unsigned int end);
static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
static int cxgb_get_regs_len(void);
static void touch_bars(device_t dev);
static void cxgb_update_mac_settings(struct port_info *p);
#ifdef TCP_OFFLOAD
static int toe_capability(struct port_info *, int);
#endif
/* Table for probing the cards. The desc field isn't actually used */
struct cxgb_ident {
uint16_t vendor;
uint16_t device;
int index;
char *desc;
} cxgb_identifiers[] = {
{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
{0, 0, 0, NULL}
};
static device_method_t cxgb_controller_methods[] = {
DEVMETHOD(device_probe, cxgb_controller_probe),
DEVMETHOD(device_attach, cxgb_controller_attach),
DEVMETHOD(device_detach, cxgb_controller_detach),
DEVMETHOD_END
};
static driver_t cxgb_controller_driver = {
"cxgbc",
cxgb_controller_methods,
sizeof(struct adapter)
};
static int cxgbc_mod_event(module_t, int, void *);
DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgbc_mod_event, NULL);
MODULE_PNP_INFO("U16:vendor;U16:device", pci, cxgbc, cxgb_identifiers,
nitems(cxgb_identifiers) - 1);
MODULE_VERSION(cxgbc, 1);
MODULE_DEPEND(cxgbc, firmware, 1, 1, 1);
/*
* Attachment glue for the ports. Attachment is done directly to the
* controller device.
*/
static int cxgb_port_probe(device_t);
static int cxgb_port_attach(device_t);
static int cxgb_port_detach(device_t);
static device_method_t cxgb_port_methods[] = {
DEVMETHOD(device_probe, cxgb_port_probe),
DEVMETHOD(device_attach, cxgb_port_attach),
DEVMETHOD(device_detach, cxgb_port_detach),
{ 0, 0 }
};
static driver_t cxgb_port_driver = {
"cxgb",
cxgb_port_methods,
0
};
static d_ioctl_t cxgb_extension_ioctl;
static d_open_t cxgb_extension_open;
static d_close_t cxgb_extension_close;
static struct cdevsw cxgb_cdevsw = {
.d_version = D_VERSION,
.d_flags = 0,
.d_open = cxgb_extension_open,
.d_close = cxgb_extension_close,
.d_ioctl = cxgb_extension_ioctl,
.d_name = "cxgb",
};
DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, 0, 0);
MODULE_VERSION(cxgb, 1);
DEBUGNET_DEFINE(cxgb);
static struct mtx t3_list_lock;
static SLIST_HEAD(, adapter) t3_list;
#ifdef TCP_OFFLOAD
static struct mtx t3_uld_list_lock;
static SLIST_HEAD(, uld_info) t3_uld_list;
#endif
/*
* The driver uses the best interrupt scheme available on a platform in the
* order MSI-X, MSI, legacy pin interrupts. This parameter determines which
* of these schemes the driver may consider as follows:
*
* msi = 2: choose from among all three options
* msi = 1 : only consider MSI and pin interrupts
* msi = 0: force pin interrupts
*/
static int msi_allowed = 2;
SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"CXGB driver parameters");
SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
"MSI-X, MSI, INTx selector");
/*
* The driver uses an auto-queue algorithm by default.
* To disable it and force a single queue-set per port, use multiq = 0
*/
static int multiq = 1;
SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
"use min(ncpus/ports, 8) queue-sets per port");
/*
* By default the driver will not update the firmware unless
* it was compiled against a newer version
*
*/
static int force_fw_update = 0;
SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
"update firmware even if up to date");
int cxgb_use_16k_clusters = -1;
SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
&cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
static int nfilters = -1;
SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
&nfilters, 0, "max number of entries in the filter table");
enum {
MAX_TXQ_ENTRIES = 16384,
MAX_CTRL_TXQ_ENTRIES = 1024,
MAX_RSPQ_ENTRIES = 16384,
MAX_RX_BUFFERS = 16384,
MAX_RX_JUMBO_BUFFERS = 16384,
MIN_TXQ_ENTRIES = 4,
MIN_CTRL_TXQ_ENTRIES = 4,
MIN_RSPQ_ENTRIES = 32,
MIN_FL_ENTRIES = 32,
MIN_FL_JUMBO_ENTRIES = 32
};
struct filter_info {
u32 sip;
u32 sip_mask;
u32 dip;
u16 sport;
u16 dport;
u32 vlan:12;
u32 vlan_prio:3;
u32 mac_hit:1;
u32 mac_idx:4;
u32 mac_vld:1;
u32 pkt_type:2;
u32 report_filter_id:1;
u32 pass:1;
u32 rss:1;
u32 qset:3;
u32 locked:1;
u32 valid:1;
};
enum { FILTER_NO_VLAN_PRI = 7 };
#define EEPROM_MAGIC 0x38E2F10C
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
static __inline char
t3rev2char(struct adapter *adapter)
{
char rev = 'z';
switch(adapter->params.rev) {
case T3_REV_A:
rev = 'a';
break;
case T3_REV_B:
case T3_REV_B2:
rev = 'b';
break;
case T3_REV_C:
rev = 'c';
break;
}
return rev;
}
static struct cxgb_ident *
cxgb_get_ident(device_t dev)
{
struct cxgb_ident *id;
for (id = cxgb_identifiers; id->desc != NULL; id++) {
if ((id->vendor == pci_get_vendor(dev)) &&
(id->device == pci_get_device(dev))) {
return (id);
}
}
return (NULL);
}
static const struct adapter_info *
cxgb_get_adapter_info(device_t dev)
{
struct cxgb_ident *id;
const struct adapter_info *ai;
id = cxgb_get_ident(dev);
if (id == NULL)
return (NULL);
ai = t3_get_adapter_info(id->index);
return (ai);
}
static int
cxgb_controller_probe(device_t dev)
{
const struct adapter_info *ai;
const char *ports;
int nports;
ai = cxgb_get_adapter_info(dev);
if (ai == NULL)
return (ENXIO);
nports = ai->nports0 + ai->nports1;
if (nports == 1)
ports = "port";
else
ports = "ports";
device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports);
return (BUS_PROBE_DEFAULT);
}
#define FW_FNAME "cxgb_t3fw"
#define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
#define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
static int
upgrade_fw(adapter_t *sc)
{
const struct firmware *fw;
int status;
u32 vers;
if ((fw = firmware_get(FW_FNAME)) == NULL) {
device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
return (ENOENT);
} else
device_printf(sc->dev, "installing firmware on card\n");
status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
if (status != 0) {
device_printf(sc->dev, "failed to install firmware: %d\n",
status);
} else {
t3_get_fw_version(sc, &vers);
snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
G_FW_VERSION_MICRO(vers));
}
firmware_put(fw, FIRMWARE_UNLOAD);
return (status);
}
/*
* The cxgb_controller_attach function is responsible for the initial
* bringup of the device. Its responsibilities include:
*
* 1. Determine if the device supports MSI or MSI-X.
* 2. Allocate bus resources so that we can access the Base Address Register
* 3. Create and initialize mutexes for the controller and its control
* logic such as SGE and MDIO.
* 4. Call hardware specific setup routine for the adapter as a whole.
* 5. Allocate the BAR for doing MSI-X.
* 6. Setup the line interrupt iff MSI-X is not supported.
* 7. Create the driver's taskq.
* 8. Start one task queue service thread.
* 9. Check if the firmware and SRAM are up-to-date. They will be
* auto-updated later (before FULL_INIT_DONE), if required.
* 10. Create a child device for each MAC (port)
* 11. Initialize T3 private state.
* 12. Trigger the LED
* 13. Setup offload iff supported.
* 14. Reset/restart the tick callout.
* 15. Attach sysctls
*
* NOTE: Any modification or deviation from this list MUST be reflected in
* the above comment. Failure to do so will result in problems on various
* error conditions including link flapping.
*/
static int
cxgb_controller_attach(device_t dev)
{
device_t child;
const struct adapter_info *ai;
struct adapter *sc;
int i, error = 0;
uint32_t vers;
int port_qsets = 1;
int msi_needed, reg;
sc = device_get_softc(dev);
sc->dev = dev;
sc->msi_count = 0;
ai = cxgb_get_adapter_info(dev);
snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
device_get_unit(dev));
ADAPTER_LOCK_INIT(sc, sc->lockbuf);
snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
device_get_unit(dev));
snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
device_get_unit(dev));
snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
device_get_unit(dev));
MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
mtx_lock(&t3_list_lock);
SLIST_INSERT_HEAD(&t3_list, sc, link);
mtx_unlock(&t3_list_lock);
/* find the PCIe link width and set max read request to 4KB*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
uint16_t lnk;
lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
if (sc->link_width < 8 &&
(ai->caps & SUPPORTED_10000baseT_Full)) {
device_printf(sc->dev,
"PCIe x%d Link, expect reduced performance\n",
sc->link_width);
}
pci_set_max_read_req(dev, 4096);
}
touch_bars(dev);
pci_enable_busmaster(dev);
/*
* Allocate the registers and make them available to the driver.
* The registers that we care about for NIC mode are in BAR 0
*/
sc->regs_rid = PCIR_BAR(0);
if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->regs_rid, RF_ACTIVE)) == NULL) {
device_printf(dev, "Cannot allocate BAR region 0\n");
error = ENXIO;
goto out;
}
sc->bt = rman_get_bustag(sc->regs_res);
sc->bh = rman_get_bushandle(sc->regs_res);
sc->mmio_len = rman_get_size(sc->regs_res);
for (i = 0; i < MAX_NPORTS; i++)
sc->port[i].adapter = sc;
if (t3_prep_adapter(sc, ai, 1) < 0) {
printf("prep adapter failed\n");
error = ENODEV;
goto out;
}
sc->udbs_rid = PCIR_BAR(2);
sc->udbs_res = NULL;
if (is_offload(sc) &&
((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->udbs_rid, RF_ACTIVE)) == NULL)) {
device_printf(dev, "Cannot allocate BAR region 1\n");
error = ENXIO;
goto out;
}
/* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
* enough messages for the queue sets. If that fails, try falling
* back to MSI. If that fails, then try falling back to the legacy
* interrupt pin model.
*/
sc->msix_regs_rid = 0x20;
if ((msi_allowed >= 2) &&
(sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
if (multiq)
port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
if (pci_msix_count(dev) == 0 ||
(error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
sc->msi_count != msi_needed) {
device_printf(dev, "alloc msix failed - "
"msi_count=%d, msi_needed=%d, err=%d; "
"will try MSI\n", sc->msi_count,
msi_needed, error);
sc->msi_count = 0;
port_qsets = 1;
pci_release_msi(dev);
bus_release_resource(dev, SYS_RES_MEMORY,
sc->msix_regs_rid, sc->msix_regs_res);
sc->msix_regs_res = NULL;
} else {
sc->flags |= USING_MSIX;
sc->cxgb_intr = cxgb_async_intr;
device_printf(dev,
"using MSI-X interrupts (%u vectors)\n",
sc->msi_count);
}
}
if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
sc->msi_count = 1;
if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
device_printf(dev, "alloc msi failed - "
"err=%d; will try INTx\n", error);
sc->msi_count = 0;
port_qsets = 1;
pci_release_msi(dev);
} else {
sc->flags |= USING_MSI;
sc->cxgb_intr = t3_intr_msi;
device_printf(dev, "using MSI interrupts\n");
}
}
if (sc->msi_count == 0) {
device_printf(dev, "using line interrupts\n");
sc->cxgb_intr = t3b_intr;
}
/* Create a private taskqueue thread for handling driver events */
sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->tq);
if (sc->tq == NULL) {
device_printf(dev, "failed to allocate controller task queue\n");
goto out;
}
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(dev));
TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
/* Create a periodic callout for checking adapter status */
callout_init(&sc->cxgb_tick_ch, 1);
if (t3_check_fw_version(sc) < 0 || force_fw_update) {
/*
* Warn user that a firmware update will be attempted in init.
*/
device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
sc->flags &= ~FW_UPTODATE;
} else {
sc->flags |= FW_UPTODATE;
}
if (t3_check_tpsram_version(sc) < 0) {
/*
* Warn user that a firmware update will be attempted in init.
*/
device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
sc->flags &= ~TPS_UPTODATE;
} else {
sc->flags |= TPS_UPTODATE;
}
/*
* Create a child device for each MAC. The ethernet attachment
* will be done in these children.
*/
for (i = 0; i < (sc)->params.nports; i++) {
struct port_info *pi;
if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
device_printf(dev, "failed to add child port\n");
error = EINVAL;
goto out;
}
pi = &sc->port[i];
pi->adapter = sc;
pi->nqsets = port_qsets;
pi->first_qset = i*port_qsets;
pi->port_id = i;
pi->tx_chan = i >= ai->nports0;
pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
sc->rxpkt_map[pi->txpkt_intf] = i;
sc->port[i].tx_chan = i >= ai->nports0;
sc->portdev[i] = child;
device_set_softc(child, pi);
}
if ((error = bus_generic_attach(dev)) != 0)
goto out;
/* initialize sge private state */
t3_sge_init_adapter(sc);
t3_led_ready(sc);
error = t3_get_fw_version(sc, &vers);
if (error)
goto out;
snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
G_FW_VERSION_MICRO(vers));
device_set_descf(dev, "%s %sNIC\t E/C: %s S/N: %s",
ai->desc, is_offload(sc) ? "R" : "",
sc->params.vpd.ec, sc->params.vpd.sn);
snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
t3_add_attach_sysctls(sc);
#ifdef TCP_OFFLOAD
for (i = 0; i < NUM_CPL_HANDLERS; i++)
sc->cpl_handler[i] = cpl_not_handled;
#endif
t3_intr_clear(sc);
error = cxgb_setup_interrupts(sc);
out:
if (error)
cxgb_free(sc);
return (error);
}
/*
* The cxgb_controller_detach routine is called with the device is
* unloaded from the system.
*/
static int
cxgb_controller_detach(device_t dev)
{
struct adapter *sc;
sc = device_get_softc(dev);
cxgb_free(sc);
return (0);
}
/*
* The cxgb_free() is called by the cxgb_controller_detach() routine
* to tear down the structures that were built up in
* cxgb_controller_attach(), and should be the final piece of work
* done when fully unloading the driver.
*
*
* 1. Shutting down the threads started by the cxgb_controller_attach()
* routine.
* 2. Stopping the lower level device and all callouts (cxgb_down_locked()).
* 3. Detaching all of the port devices created during the
* cxgb_controller_attach() routine.
* 4. Removing the device children created via cxgb_controller_attach().
* 5. Releasing PCI resources associated with the device.
* 6. Turning off the offload support, iff it was turned on.
* 7. Destroying the mutexes created in cxgb_controller_attach().
*
*/
static void
cxgb_free(struct adapter *sc)
{
int i, nqsets = 0;
ADAPTER_LOCK(sc);
sc->flags |= CXGB_SHUTDOWN;
ADAPTER_UNLOCK(sc);
/*
* Make sure all child devices are gone.
*/
bus_generic_detach(sc->dev);
for (i = 0; i < (sc)->params.nports; i++) {
if (sc->portdev[i] &&
device_delete_child(sc->dev, sc->portdev[i]) != 0)
device_printf(sc->dev, "failed to delete child port\n");
nqsets += sc->port[i].nqsets;
}
/*
* At this point, it is as if cxgb_port_detach has run on all ports, and
* cxgb_down has run on the adapter. All interrupts have been silenced,
* all open devices have been closed.
*/
KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
__func__, sc->open_device_map));
for (i = 0; i < sc->params.nports; i++) {
KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
__func__, i));
}
/*
* Finish off the adapter's callouts.
*/
callout_drain(&sc->cxgb_tick_ch);
callout_drain(&sc->sge_timer_ch);
/*
* Release resources grabbed under FULL_INIT_DONE by cxgb_up. The
* sysctls are cleaned up by the kernel linker.
*/
if (sc->flags & FULL_INIT_DONE) {
t3_free_sge_resources(sc, nqsets);
sc->flags &= ~FULL_INIT_DONE;
}
/*
* Release all interrupt resources.
*/
cxgb_teardown_interrupts(sc);
if (sc->flags & (USING_MSI | USING_MSIX)) {
device_printf(sc->dev, "releasing msi message(s)\n");
pci_release_msi(sc->dev);
} else {
device_printf(sc->dev, "no msi message to release\n");
}
if (sc->msix_regs_res != NULL) {
bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
sc->msix_regs_res);
}
/*
* Free the adapter's taskqueue.
*/
if (sc->tq != NULL) {
taskqueue_free(sc->tq);
sc->tq = NULL;
}
free(sc->filters, M_DEVBUF);
t3_sge_free(sc);
if (sc->udbs_res != NULL)
bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
sc->udbs_res);
if (sc->regs_res != NULL)
bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
sc->regs_res);
MTX_DESTROY(&sc->mdio_lock);
MTX_DESTROY(&sc->sge.reg_lock);
MTX_DESTROY(&sc->elmer_lock);
mtx_lock(&t3_list_lock);
SLIST_REMOVE(&t3_list, sc, adapter, link);
mtx_unlock(&t3_list_lock);
ADAPTER_LOCK_DEINIT(sc);
}
/**
* setup_sge_qsets - configure SGE Tx/Rx/response queues
* @sc: the controller softc
*
* Determines how many sets of SGE queues to use and initializes them.
* We support multiple queue sets per port if we have MSI-X, otherwise
* just one queue set per port.
*/
static int
setup_sge_qsets(adapter_t *sc)
{
int i, j, err, irq_idx = 0, qset_idx = 0;
u_int ntxq = SGE_TXQ_PER_SET;
if ((err = t3_sge_alloc(sc)) != 0) {
device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
return (err);
}
if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
irq_idx = -1;
for (i = 0; i < (sc)->params.nports; i++) {
struct port_info *pi = &sc->port[i];
for (j = 0; j < pi->nqsets; j++, qset_idx++) {
err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
(sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
&sc->params.sge.qset[qset_idx], ntxq, pi);
if (err) {
t3_free_sge_resources(sc, qset_idx);
device_printf(sc->dev,
"t3_sge_alloc_qset failed with %d\n", err);
return (err);
}
}
}
sc->nqsets = qset_idx;
return (0);
}
static void
cxgb_teardown_interrupts(adapter_t *sc)
{
int i;
for (i = 0; i < SGE_QSETS; i++) {
if (sc->msix_intr_tag[i] == NULL) {
/* Should have been setup fully or not at all */
KASSERT(sc->msix_irq_res[i] == NULL &&
sc->msix_irq_rid[i] == 0,
("%s: half-done interrupt (%d).", __func__, i));
continue;
}
bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_intr_tag[i]);
bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
sc->msix_irq_res[i]);
sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
sc->msix_irq_rid[i] = 0;
}
if (sc->intr_tag) {
KASSERT(sc->irq_res != NULL,
("%s: half-done interrupt.", __func__));
bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
sc->irq_res);
sc->irq_res = sc->intr_tag = NULL;
sc->irq_rid = 0;
}
}
static int
cxgb_setup_interrupts(adapter_t *sc)
{
struct resource *res;
void *tag;
int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
sc->irq_rid = intr_flag ? 1 : 0;
sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->irq_res == NULL) {
device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
intr_flag, sc->irq_rid);
err = EINVAL;
sc->irq_rid = 0;
} else {
err = bus_setup_intr(sc->dev, sc->irq_res,
INTR_MPSAFE | INTR_TYPE_NET, NULL,
sc->cxgb_intr, sc, &sc->intr_tag);
if (err) {
device_printf(sc->dev,
"Cannot set up interrupt (%x, %u, %d)\n",
intr_flag, sc->irq_rid, err);
bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
sc->irq_res);
sc->irq_res = sc->intr_tag = NULL;
sc->irq_rid = 0;
}
}
/* That's all for INTx or MSI */
if (!(intr_flag & USING_MSIX) || err)
return (err);
bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
for (i = 0; i < sc->msi_count - 1; i++) {
rid = i + 2;
res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (res == NULL) {
device_printf(sc->dev, "Cannot allocate interrupt "
"for message %d\n", rid);
err = EINVAL;
break;
}
err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
if (err) {
device_printf(sc->dev, "Cannot set up interrupt "
"for message %d (%d)\n", rid, err);
bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
break;
}
sc->msix_irq_rid[i] = rid;
sc->msix_irq_res[i] = res;
sc->msix_intr_tag[i] = tag;
bus_describe_intr(sc->dev, res, tag, "qs%d", i);
}
if (err)
cxgb_teardown_interrupts(sc);
return (err);
}
static int
cxgb_port_probe(device_t dev)
{
struct port_info *p;
const char *desc;
p = device_get_softc(dev);
desc = p->phy.desc;
device_set_descf(dev, "Port %d %s", p->port_id, desc);
return (0);
}
static int
cxgb_makedev(struct port_info *pi)
{
pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp),
UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
if (pi->port_cdev == NULL)
return (ENOMEM);
pi->port_cdev->si_drv1 = (void *)pi;
return (0);
}
#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
#define CXGB_CAP_ENABLE CXGB_CAP
static int
cxgb_port_attach(device_t dev)
{
struct port_info *p;
if_t ifp;
int err;
struct adapter *sc;
p = device_get_softc(dev);
sc = p->adapter;
snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
device_get_unit(device_get_parent(dev)), p->port_id);
PORT_LOCK_INIT(p, p->lockbuf);
callout_init(&p->link_check_ch, 1);
TASK_INIT(&p->link_check_task, 0, check_link_status, p);
/* Allocate an ifnet object and set it up */
ifp = p->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Cannot allocate ifnet\n");
- return (ENOMEM);
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setinitfn(ifp, cxgb_init);
if_setsoftc(ifp, p);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, cxgb_ioctl);
if_settransmitfn(ifp, cxgb_transmit);
if_setqflushfn(ifp, cxgb_qflush);
if_setgetcounterfn(ifp, cxgb_get_counter);
if_setcapabilities(ifp, CXGB_CAP);
#ifdef TCP_OFFLOAD
if (is_offload(sc))
if_setcapabilitiesbit(ifp, IFCAP_TOE4, 0);
#endif
if_setcapenable(ifp, CXGB_CAP_ENABLE);
if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if_sethwtsomax(ifp, IP_MAXPACKET);
if_sethwtsomaxsegcount(ifp, 36);
if_sethwtsomaxsegsize(ifp, 65536);
/*
* Disable TSO on 4-port - it isn't supported by the firmware.
*/
if (sc->params.nports > 2) {
if_setcapabilitiesbit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
if_setcapenablebit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
ether_ifattach(ifp, p->hw_addr);
/* Attach driver debugnet methods. */
DEBUGNET_SET(ifp, cxgb);
#ifdef DEFAULT_JUMBO
if (sc->params.nports <= 2)
if_setmtu(ifp, ETHERMTU_JUMBO);
#endif
if ((err = cxgb_makedev(p)) != 0) {
printf("makedev failed %d\n", err);
return (err);
}
/* Create a list of media supported by this port */
ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
cxgb_media_status);
cxgb_build_medialist(p);
t3_sge_init_port(p);
return (err);
}
/*
* cxgb_port_detach() is called via the device_detach methods when
* cxgb_free() calls the bus_generic_detach. It is responsible for
* removing the device from the view of the kernel, i.e. from all
* interfaces lists etc. This routine is only called when the driver is
* being unloaded, not when the link goes down.
*/
static int
cxgb_port_detach(device_t dev)
{
struct port_info *p;
struct adapter *sc;
int i;
p = device_get_softc(dev);
sc = p->adapter;
/* Tell cxgb_ioctl and if_init that the port is going away */
ADAPTER_LOCK(sc);
SET_DOOMED(p);
wakeup(&sc->flags);
while (IS_BUSY(sc))
mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
SET_BUSY(sc);
ADAPTER_UNLOCK(sc);
if (p->port_cdev != NULL)
destroy_dev(p->port_cdev);
cxgb_uninit_synchronized(p);
ether_ifdetach(p->ifp);
for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
struct sge_qset *qs = &sc->sge.qs[i];
struct sge_txq *txq = &qs->txq[TXQ_ETH];
callout_drain(&txq->txq_watchdog);
callout_drain(&txq->txq_timer);
}
PORT_LOCK_DEINIT(p);
if_free(p->ifp);
p->ifp = NULL;
ADAPTER_LOCK(sc);
CLR_BUSY(sc);
wakeup_one(&sc->flags);
ADAPTER_UNLOCK(sc);
return (0);
}
void
t3_fatal_err(struct adapter *sc)
{
u_int fw_status[4];
if (sc->flags & FULL_INIT_DONE) {
t3_sge_stop(sc);
t3_write_reg(sc, A_XGM_TX_CTRL, 0);
t3_write_reg(sc, A_XGM_RX_CTRL, 0);
t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
t3_intr_disable(sc);
}
device_printf(sc->dev,"encountered fatal error, operation suspended\n");
if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
}
int
t3_os_find_pci_capability(adapter_t *sc, int cap)
{
device_t dev;
struct pci_devinfo *dinfo;
pcicfgregs *cfg;
uint32_t status;
uint8_t ptr;
dev = sc->dev;
dinfo = device_get_ivars(dev);
cfg = &dinfo->cfg;
status = pci_read_config(dev, PCIR_STATUS, 2);
if (!(status & PCIM_STATUS_CAPPRESENT))
return (0);
switch (cfg->hdrtype & PCIM_HDRTYPE) {
case 0:
case 1:
ptr = PCIR_CAP_PTR;
break;
case 2:
ptr = PCIR_CAP_PTR_2;
break;
default:
return (0);
break;
}
ptr = pci_read_config(dev, ptr, 1);
while (ptr != 0) {
if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
return (ptr);
ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
}
return (0);
}
int
t3_os_pci_save_state(struct adapter *sc)
{
device_t dev;
struct pci_devinfo *dinfo;
dev = sc->dev;
dinfo = device_get_ivars(dev);
pci_cfg_save(dev, dinfo, 0);
return (0);
}
int
t3_os_pci_restore_state(struct adapter *sc)
{
device_t dev;
struct pci_devinfo *dinfo;
dev = sc->dev;
dinfo = device_get_ivars(dev);
pci_cfg_restore(dev, dinfo);
return (0);
}
/**
* t3_os_link_changed - handle link status changes
* @sc: the adapter associated with the link change
* @port_id: the port index whose link status has changed
* @link_status: the new status of the link
* @speed: the new speed setting
* @duplex: the new duplex setting
* @fc: the new flow-control setting
*
* This is the OS-dependent handler for link status changes. The OS
* neutral handler takes care of most of the processing for these events,
* then calls this handler for any OS-specific processing.
*/
void
t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
int duplex, int fc, int mac_was_reset)
{
struct port_info *pi = &adapter->port[port_id];
if_t ifp = pi->ifp;
/* no race with detach, so ifp should always be good */
KASSERT(ifp, ("%s: if detached.", __func__));
/* Reapply mac settings if they were lost due to a reset */
if (mac_was_reset) {
PORT_LOCK(pi);
cxgb_update_mac_settings(pi);
PORT_UNLOCK(pi);
}
if (link_status) {
if_setbaudrate(ifp, IF_Mbps(speed));
if_link_state_change(ifp, LINK_STATE_UP);
} else
if_link_state_change(ifp, LINK_STATE_DOWN);
}
/**
* t3_os_phymod_changed - handle PHY module changes
* @phy: the PHY reporting the module change
* @mod_type: new module type
*
* This is the OS-dependent handler for PHY module changes. It is
* invoked when a PHY module is removed or inserted for any OS-specific
* processing.
*/
void t3_os_phymod_changed(struct adapter *adap, int port_id)
{
static const char *mod_str[] = {
NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
};
struct port_info *pi = &adap->port[port_id];
int mod = pi->phy.modtype;
if (mod != pi->media.ifm_cur->ifm_data)
cxgb_build_medialist(pi);
if (mod == phy_modtype_none)
if_printf(pi->ifp, "PHY module unplugged\n");
else {
KASSERT(mod < ARRAY_SIZE(mod_str),
("invalid PHY module type %d", mod));
if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
}
}
void
t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
{
/*
* The ifnet might not be allocated before this gets called,
* as this is called early on in attach by t3_prep_adapter
* save the address off in the port structure
*/
if (cxgb_debug)
printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
}
/*
* Programs the XGMAC based on the settings in the ifnet. These settings
* include MTU, MAC address, mcast addresses, etc.
*/
static void
cxgb_update_mac_settings(struct port_info *p)
{
if_t ifp = p->ifp;
struct t3_rx_mode rm;
struct cmac *mac = &p->mac;
int mtu, hwtagging;
PORT_LOCK_ASSERT_OWNED(p);
bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN);
mtu = if_getmtu(ifp);
if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
mtu += ETHER_VLAN_ENCAP_LEN;
hwtagging = (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0;
t3_mac_set_mtu(mac, mtu);
t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
t3_mac_set_address(mac, 0, p->hw_addr);
t3_init_rx_mode(&rm, p);
t3_mac_set_rx_mode(mac, &rm);
}
static int
await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
unsigned long n)
{
int attempts = 5;
while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
if (!--attempts)
return (ETIMEDOUT);
t3_os_sleep(10);
}
return 0;
}
static int
init_tp_parity(struct adapter *adap)
{
int i;
struct mbuf *m;
struct cpl_set_tcb_field *greq;
unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
t3_tp_set_offload_mode(adap, 1);
for (i = 0; i < 16; i++) {
struct cpl_smt_write_req *req;
m = m_gethdr(M_WAITOK, MT_DATA);
req = mtod(m, struct cpl_smt_write_req *);
m->m_len = m->m_pkthdr.len = sizeof(*req);
memset(req, 0, sizeof(*req));
req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
req->iff = i;
t3_mgmt_tx(adap, m);
}
for (i = 0; i < 2048; i++) {
struct cpl_l2t_write_req *req;
m = m_gethdr(M_WAITOK, MT_DATA);
req = mtod(m, struct cpl_l2t_write_req *);
m->m_len = m->m_pkthdr.len = sizeof(*req);
memset(req, 0, sizeof(*req));
req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
req->params = htonl(V_L2T_W_IDX(i));
t3_mgmt_tx(adap, m);
}
for (i = 0; i < 2048; i++) {
struct cpl_rte_write_req *req;
m = m_gethdr(M_WAITOK, MT_DATA);
req = mtod(m, struct cpl_rte_write_req *);
m->m_len = m->m_pkthdr.len = sizeof(*req);
memset(req, 0, sizeof(*req));
req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
req->l2t_idx = htonl(V_L2T_W_IDX(i));
t3_mgmt_tx(adap, m);
}
m = m_gethdr(M_WAITOK, MT_DATA);
greq = mtod(m, struct cpl_set_tcb_field *);
m->m_len = m->m_pkthdr.len = sizeof(*greq);
memset(greq, 0, sizeof(*greq));
greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
greq->mask = htobe64(1);
t3_mgmt_tx(adap, m);
i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
t3_tp_set_offload_mode(adap, 0);
return (i);
}
/**
* setup_rss - configure Receive Side Steering (per-queue connection demux)
* @adap: the adapter
*
* Sets up RSS to distribute packets to multiple receive queues. We
* configure the RSS CPU lookup table to distribute to the number of HW
* receive queues, and the response queue lookup table to narrow that
* down to the response queues actually configured for each port.
* We always configure the RSS mapping for two ports since the mapping
* table has plenty of entries.
*/
static void
setup_rss(adapter_t *adap)
{
int i;
u_int nq[2];
uint8_t cpus[SGE_QSETS + 1];
uint16_t rspq_map[RSS_TABLE_SIZE];
for (i = 0; i < SGE_QSETS; ++i)
cpus[i] = i;
cpus[SGE_QSETS] = 0xff;
nq[0] = nq[1] = 0;
for_each_port(adap, i) {
const struct port_info *pi = adap2pinfo(adap, i);
nq[pi->tx_chan] += pi->nqsets;
}
for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
rspq_map[i] = nq[0] ? i % nq[0] : 0;
rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
}
/* Calculate the reverse RSS map table */
for (i = 0; i < SGE_QSETS; ++i)
adap->rrss_map[i] = 0xff;
for (i = 0; i < RSS_TABLE_SIZE; ++i)
if (adap->rrss_map[rspq_map[i]] == 0xff)
adap->rrss_map[rspq_map[i]] = i;
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
cpus, rspq_map);
}
static void
send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
int hi, int port)
{
struct mbuf *m;
struct mngt_pktsched_wr *req;
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m) {
req = mtod(m, struct mngt_pktsched_wr *);
req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
req->sched = sched;
req->idx = qidx;
req->min = lo;
req->max = hi;
req->binding = port;
m->m_len = m->m_pkthdr.len = sizeof(*req);
t3_mgmt_tx(adap, m);
}
}
static void
bind_qsets(adapter_t *sc)
{
int i, j;
for (i = 0; i < (sc)->params.nports; ++i) {
const struct port_info *pi = adap2pinfo(sc, i);
for (j = 0; j < pi->nqsets; ++j) {
send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
-1, pi->tx_chan);
}
}
}
static void
update_tpeeprom(struct adapter *adap)
{
const struct firmware *tpeeprom;
uint32_t version;
unsigned int major, minor;
int ret, len;
char rev, name[32];
t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
major = G_TP_VERSION_MAJOR(version);
minor = G_TP_VERSION_MINOR(version);
if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
return;
rev = t3rev2char(adap);
snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
tpeeprom = firmware_get(name);
if (tpeeprom == NULL) {
device_printf(adap->dev,
"could not load TP EEPROM: unable to load %s\n",
name);
return;
}
len = tpeeprom->datasize - 4;
ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
if (ret)
goto release_tpeeprom;
if (len != TP_SRAM_LEN) {
device_printf(adap->dev,
"%s length is wrong len=%d expected=%d\n", name,
len, TP_SRAM_LEN);
return;
}
ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
TP_SRAM_OFFSET);
if (!ret) {
device_printf(adap->dev,
"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
} else
device_printf(adap->dev,
"Protocol SRAM image update in EEPROM failed\n");
release_tpeeprom:
firmware_put(tpeeprom, FIRMWARE_UNLOAD);
return;
}
static int
update_tpsram(struct adapter *adap)
{
const struct firmware *tpsram;
int ret;
char rev, name[32];
rev = t3rev2char(adap);
snprintf(name, sizeof(name), TPSRAM_NAME, rev);
update_tpeeprom(adap);
tpsram = firmware_get(name);
if (tpsram == NULL){
device_printf(adap->dev, "could not load TP SRAM\n");
return (EINVAL);
} else
device_printf(adap->dev, "updating TP SRAM\n");
ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
if (ret)
goto release_tpsram;
ret = t3_set_proto_sram(adap, tpsram->data);
if (ret)
device_printf(adap->dev, "loading protocol SRAM failed\n");
release_tpsram:
firmware_put(tpsram, FIRMWARE_UNLOAD);
return ret;
}
/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
* Called when the first port is enabled, this function performs the
* actions necessary to make an adapter operational, such as completing
* the initialization of HW modules, and enabling interrupts.
*/
static int
cxgb_up(struct adapter *sc)
{
int err = 0;
unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
__func__, sc->open_device_map));
if ((sc->flags & FULL_INIT_DONE) == 0) {
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
if ((sc->flags & FW_UPTODATE) == 0)
if ((err = upgrade_fw(sc)))
goto out;
if ((sc->flags & TPS_UPTODATE) == 0)
if ((err = update_tpsram(sc)))
goto out;
if (is_offload(sc) && nfilters != 0) {
sc->params.mc5.nservers = 0;
if (nfilters < 0)
sc->params.mc5.nfilters = mxf;
else
sc->params.mc5.nfilters = min(nfilters, mxf);
}
err = t3_init_hw(sc, 0);
if (err)
goto out;
t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
err = setup_sge_qsets(sc);
if (err)
goto out;
alloc_filters(sc);
setup_rss(sc);
t3_add_configured_sysctls(sc);
sc->flags |= FULL_INIT_DONE;
}
t3_intr_clear(sc);
t3_sge_start(sc);
t3_intr_enable(sc);
if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
is_offload(sc) && init_tp_parity(sc) == 0)
sc->flags |= TP_PARITY_INIT;
if (sc->flags & TP_PARITY_INIT) {
t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
}
if (!(sc->flags & QUEUES_BOUND)) {
bind_qsets(sc);
setup_hw_filters(sc);
sc->flags |= QUEUES_BOUND;
}
t3_sge_reset_adapter(sc);
out:
return (err);
}
/*
* Called when the last open device is closed. Does NOT undo all of cxgb_up's
* work. Specifically, the resources grabbed under FULL_INIT_DONE are released
* during controller_detach, not here.
*/
static void
cxgb_down(struct adapter *sc)
{
t3_sge_stop(sc);
t3_intr_disable(sc);
}
/*
* if_init for cxgb ports.
*/
static void
cxgb_init(void *arg)
{
struct port_info *p = arg;
struct adapter *sc = p->adapter;
ADAPTER_LOCK(sc);
cxgb_init_locked(p); /* releases adapter lock */
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
}
static int
cxgb_init_locked(struct port_info *p)
{
struct adapter *sc = p->adapter;
if_t ifp = p->ifp;
struct cmac *mac = &p->mac;
int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
ADAPTER_LOCK_ASSERT_OWNED(sc);
while (!IS_DOOMED(p) && IS_BUSY(sc)) {
gave_up_lock = 1;
if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
rc = EINTR;
goto done;
}
}
if (IS_DOOMED(p)) {
rc = ENXIO;
goto done;
}
KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
/*
* The code that runs during one-time adapter initialization can sleep
* so it's important not to hold any locks across it.
*/
may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
if (may_sleep) {
SET_BUSY(sc);
gave_up_lock = 1;
ADAPTER_UNLOCK(sc);
}
if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
goto done;
PORT_LOCK(p);
if (isset(&sc->open_device_map, p->port_id) &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
PORT_UNLOCK(p);
goto done;
}
t3_port_intr_enable(sc, p->port_id);
if (!mac->multiport)
t3_mac_init(mac);
cxgb_update_mac_settings(p);
t3_link_start(&p->phy, mac, &p->link_config);
t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
PORT_UNLOCK(p);
for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
struct sge_qset *qs = &sc->sge.qs[i];
struct sge_txq *txq = &qs->txq[TXQ_ETH];
callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
txq->txq_watchdog.c_cpu);
}
/* all ok */
setbit(&sc->open_device_map, p->port_id);
callout_reset(&p->link_check_ch,
p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4,
link_check_callout, p);
done:
if (may_sleep) {
ADAPTER_LOCK(sc);
KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
CLR_BUSY(sc);
}
if (gave_up_lock)
wakeup_one(&sc->flags);
ADAPTER_UNLOCK(sc);
return (rc);
}
static int
cxgb_uninit_locked(struct port_info *p)
{
struct adapter *sc = p->adapter;
int rc;
ADAPTER_LOCK_ASSERT_OWNED(sc);
while (!IS_DOOMED(p) && IS_BUSY(sc)) {
if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
rc = EINTR;
goto done;
}
}
if (IS_DOOMED(p)) {
rc = ENXIO;
goto done;
}
KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
SET_BUSY(sc);
ADAPTER_UNLOCK(sc);
rc = cxgb_uninit_synchronized(p);
ADAPTER_LOCK(sc);
KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
CLR_BUSY(sc);
wakeup_one(&sc->flags);
done:
ADAPTER_UNLOCK(sc);
return (rc);
}
/*
* Called on "ifconfig down", and from port_detach
*/
static int
cxgb_uninit_synchronized(struct port_info *pi)
{
struct adapter *sc = pi->adapter;
if_t ifp = pi->ifp;
/*
* taskqueue_drain may cause a deadlock if the adapter lock is held.
*/
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
/*
* Clear this port's bit from the open device map, and then drain all
* the tasks that can access/manipulate this port's port_info or ifp.
* We disable this port's interrupts here and so the slow/ext
* interrupt tasks won't be enqueued. The tick task will continue to
* be enqueued every second but the runs after this drain will not see
* this port in the open device map.
*
* A well behaved task must take open_device_map into account and ignore
* ports that are not open.
*/
clrbit(&sc->open_device_map, pi->port_id);
t3_port_intr_disable(sc, pi->port_id);
taskqueue_drain(sc->tq, &sc->slow_intr_task);
taskqueue_drain(sc->tq, &sc->tick_task);
callout_drain(&pi->link_check_ch);
taskqueue_drain(sc->tq, &pi->link_check_task);
PORT_LOCK(pi);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
/* disable pause frames */
t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
/* Reset RX FIFO HWM */
t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset,
V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
DELAY(100 * 1000);
/* Wait for TXFIFO empty */
t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
F_TXFIFO_EMPTY, 1, 20, 5);
DELAY(100 * 1000);
t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
pi->phy.ops->power_down(&pi->phy, 1);
PORT_UNLOCK(pi);
pi->link_config.link_ok = 0;
t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
if (sc->open_device_map == 0)
cxgb_down(pi->adapter);
return (0);
}
/*
* Mark lro enabled or disabled in all qsets for this port
*/
static int
cxgb_set_lro(struct port_info *p, int enabled)
{
int i;
struct adapter *adp = p->adapter;
struct sge_qset *q;
for (i = 0; i < p->nqsets; i++) {
q = &adp->sge.qs[p->first_qset + i];
q->lro.enabled = (enabled != 0);
}
return (0);
}
static int
cxgb_ioctl(if_t ifp, unsigned long command, caddr_t data)
{
struct port_info *p = if_getsoftc(ifp);
struct adapter *sc = p->adapter;
struct ifreq *ifr = (struct ifreq *)data;
int flags, error = 0, mtu;
uint32_t mask;
switch (command) {
case SIOCSIFMTU:
ADAPTER_LOCK(sc);
error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
if (error) {
fail:
ADAPTER_UNLOCK(sc);
return (error);
}
mtu = ifr->ifr_mtu;
if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
error = EINVAL;
} else {
if_setmtu(ifp, mtu);
PORT_LOCK(p);
cxgb_update_mac_settings(p);
PORT_UNLOCK(p);
}
ADAPTER_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
ADAPTER_LOCK(sc);
if (IS_DOOMED(p)) {
error = ENXIO;
goto fail;
}
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
flags = p->if_flags;
if (((if_getflags(ifp) ^ flags) & IFF_PROMISC) ||
((if_getflags(ifp) ^ flags) & IFF_ALLMULTI)) {
if (IS_BUSY(sc)) {
error = EBUSY;
goto fail;
}
PORT_LOCK(p);
cxgb_update_mac_settings(p);
PORT_UNLOCK(p);
}
ADAPTER_UNLOCK(sc);
} else
error = cxgb_init_locked(p);
p->if_flags = if_getflags(ifp);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
error = cxgb_uninit_locked(p);
else
ADAPTER_UNLOCK(sc);
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ADAPTER_LOCK(sc);
error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
if (error)
goto fail;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
PORT_LOCK(p);
cxgb_update_mac_settings(p);
PORT_UNLOCK(p);
}
ADAPTER_UNLOCK(sc);
break;
case SIOCSIFCAP:
ADAPTER_LOCK(sc);
error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
if (error)
goto fail;
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP);
if (IFCAP_TSO4 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO4;
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_printf(ifp,
"tso4 disabled due to -txcsum.\n");
}
}
if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if (IFCAP_TSO6 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO6;
if_setcapenablebit(ifp, 0, IFCAP_TSO6);
if_printf(ifp,
"tso6 disabled due to -txcsum6.\n");
}
}
if (mask & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
/*
* Note that we leave CSUM_TSO alone (it is always set). The
* kernel takes both IFCAP_TSOx and CSUM_TSO into account before
* sending a TSO request our way, so it's sufficient to toggle
* IFCAP_TSOx only.
*/
if (mask & IFCAP_TSO4) {
if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
if_printf(ifp, "enable txcsum first.\n");
error = EAGAIN;
goto fail;
}
if_togglecapenable(ifp, IFCAP_TSO4);
}
if (mask & IFCAP_TSO6) {
if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
if_printf(ifp, "enable txcsum6 first.\n");
error = EAGAIN;
goto fail;
}
if_togglecapenable(ifp, IFCAP_TSO6);
}
if (mask & IFCAP_LRO) {
if_togglecapenable(ifp, IFCAP_LRO);
/* Safe to do this even if cxgb_up not called yet */
cxgb_set_lro(p, if_getcapenable(ifp) & IFCAP_LRO);
}
#ifdef TCP_OFFLOAD
if (mask & IFCAP_TOE4) {
int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE4;
error = toe_capability(p, enable);
if (error == 0)
if_togglecapenable(ifp, mask);
}
#endif
if (mask & IFCAP_VLAN_HWTAGGING) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
PORT_LOCK(p);
cxgb_update_mac_settings(p);
PORT_UNLOCK(p);
}
}
if (mask & IFCAP_VLAN_MTU) {
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
PORT_LOCK(p);
cxgb_update_mac_settings(p);
PORT_UNLOCK(p);
}
}
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (mask & IFCAP_VLAN_HWCSUM)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
#ifdef VLAN_CAPABILITIES
VLAN_CAPABILITIES(ifp);
#endif
ADAPTER_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &p->media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
static int
cxgb_media_change(if_t ifp)
{
return (EOPNOTSUPP);
}
/*
* Translates phy->modtype to the correct Ethernet media subtype.
*/
static int
cxgb_ifm_type(int mod)
{
switch (mod) {
case phy_modtype_sr:
return (IFM_10G_SR);
case phy_modtype_lr:
return (IFM_10G_LR);
case phy_modtype_lrm:
return (IFM_10G_LRM);
case phy_modtype_twinax:
return (IFM_10G_TWINAX);
case phy_modtype_twinax_long:
return (IFM_10G_TWINAX_LONG);
case phy_modtype_none:
return (IFM_NONE);
case phy_modtype_unknown:
return (IFM_UNKNOWN);
}
KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
return (IFM_UNKNOWN);
}
/*
* Rebuilds the ifmedia list for this port, and sets the current media.
*/
static void
cxgb_build_medialist(struct port_info *p)
{
struct cphy *phy = &p->phy;
struct ifmedia *media = &p->media;
int mod = phy->modtype;
int m = IFM_ETHER | IFM_FDX;
PORT_LOCK(p);
ifmedia_removeall(media);
if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
/* Copper (RJ45) */
if (phy->caps & SUPPORTED_10000baseT_Full)
ifmedia_add(media, m | IFM_10G_T, mod, NULL);
if (phy->caps & SUPPORTED_1000baseT_Full)
ifmedia_add(media, m | IFM_1000_T, mod, NULL);
if (phy->caps & SUPPORTED_100baseT_Full)
ifmedia_add(media, m | IFM_100_TX, mod, NULL);
if (phy->caps & SUPPORTED_10baseT_Full)
ifmedia_add(media, m | IFM_10_T, mod, NULL);
ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
ifmedia_set(media, IFM_ETHER | IFM_AUTO);
} else if (phy->caps & SUPPORTED_TP) {
/* Copper (CX4) */
KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
("%s: unexpected cap 0x%x", __func__, phy->caps));
ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
ifmedia_set(media, m | IFM_10G_CX4);
} else if (phy->caps & SUPPORTED_FIBRE &&
phy->caps & SUPPORTED_10000baseT_Full) {
/* 10G optical (but includes SFP+ twinax) */
m |= cxgb_ifm_type(mod);
if (IFM_SUBTYPE(m) == IFM_NONE)
m &= ~IFM_FDX;
ifmedia_add(media, m, mod, NULL);
ifmedia_set(media, m);
} else if (phy->caps & SUPPORTED_FIBRE &&
phy->caps & SUPPORTED_1000baseT_Full) {
/* 1G optical */
/* XXX: Lie and claim to be SX, could actually be any 1G-X */
ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
ifmedia_set(media, m | IFM_1000_SX);
} else {
KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
phy->caps));
}
PORT_UNLOCK(p);
}
static void
cxgb_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct port_info *p = if_getsoftc(ifp);
struct ifmedia_entry *cur = p->media.ifm_cur;
int speed = p->link_config.speed;
if (cur->ifm_data != p->phy.modtype) {
cxgb_build_medialist(p);
cur = p->media.ifm_cur;
}
ifmr->ifm_status = IFM_AVALID;
if (!p->link_config.link_ok)
return;
ifmr->ifm_status |= IFM_ACTIVE;
/*
* active and current will differ iff current media is autoselect. That
* can happen only for copper RJ45.
*/
if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
return;
KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
ifmr->ifm_active = IFM_ETHER | IFM_FDX;
if (speed == SPEED_10000)
ifmr->ifm_active |= IFM_10G_T;
else if (speed == SPEED_1000)
ifmr->ifm_active |= IFM_1000_T;
else if (speed == SPEED_100)
ifmr->ifm_active |= IFM_100_TX;
else if (speed == SPEED_10)
ifmr->ifm_active |= IFM_10_T;
else
KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
speed));
}
static uint64_t
cxgb_get_counter(if_t ifp, ift_counter c)
{
struct port_info *pi = if_getsoftc(ifp);
struct adapter *sc = pi->adapter;
struct cmac *mac = &pi->mac;
struct mac_stats *mstats = &mac->stats;
cxgb_refresh_stats(pi);
switch (c) {
case IFCOUNTER_IPACKETS:
return (mstats->rx_frames);
case IFCOUNTER_IERRORS:
return (mstats->rx_jabber + mstats->rx_data_errs +
mstats->rx_sequence_errs + mstats->rx_runt +
mstats->rx_too_long + mstats->rx_mac_internal_errs +
mstats->rx_short + mstats->rx_fcs_errs);
case IFCOUNTER_OPACKETS:
return (mstats->tx_frames);
case IFCOUNTER_OERRORS:
return (mstats->tx_excess_collisions + mstats->tx_underrun +
mstats->tx_len_errs + mstats->tx_mac_internal_errs +
mstats->tx_excess_deferral + mstats->tx_fcs_errs);
case IFCOUNTER_COLLISIONS:
return (mstats->tx_total_collisions);
case IFCOUNTER_IBYTES:
return (mstats->rx_octets);
case IFCOUNTER_OBYTES:
return (mstats->tx_octets);
case IFCOUNTER_IMCASTS:
return (mstats->rx_mcast_frames);
case IFCOUNTER_OMCASTS:
return (mstats->tx_mcast_frames);
case IFCOUNTER_IQDROPS:
return (mstats->rx_cong_drops);
case IFCOUNTER_OQDROPS: {
int i;
uint64_t drops;
drops = 0;
if (sc->flags & FULL_INIT_DONE) {
for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
}
return (drops);
}
default:
return (if_get_counter_default(ifp, c));
}
}
static void
cxgb_async_intr(void *data)
{
adapter_t *sc = data;
t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
}
static void
link_check_callout(void *arg)
{
struct port_info *pi = arg;
struct adapter *sc = pi->adapter;
if (!isset(&sc->open_device_map, pi->port_id))
return;
taskqueue_enqueue(sc->tq, &pi->link_check_task);
}
static void
check_link_status(void *arg, int pending)
{
struct port_info *pi = arg;
struct adapter *sc = pi->adapter;
if (!isset(&sc->open_device_map, pi->port_id))
return;
t3_link_changed(sc, pi->port_id);
if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) ||
pi->link_config.link_ok == 0)
callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
}
void
t3_os_link_intr(struct port_info *pi)
{
/*
* Schedule a link check in the near future. If the link is flapping
* rapidly we'll keep resetting the callout and delaying the check until
* things stabilize a bit.
*/
callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
}
static void
check_t3b2_mac(struct adapter *sc)
{
int i;
if (sc->flags & CXGB_SHUTDOWN)
return;
for_each_port(sc, i) {
struct port_info *p = &sc->port[i];
int status;
#ifdef INVARIANTS
if_t ifp = p->ifp;
#endif
if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
!p->link_config.link_ok)
continue;
KASSERT(if_getdrvflags(ifp) & IFF_DRV_RUNNING,
("%s: state mismatch (drv_flags %x, device_map %x)",
__func__, if_getdrvflags(ifp), sc->open_device_map));
PORT_LOCK(p);
status = t3b2_mac_watchdog_task(&p->mac);
if (status == 1)
p->mac.stats.num_toggled++;
else if (status == 2) {
struct cmac *mac = &p->mac;
cxgb_update_mac_settings(p);
t3_link_start(&p->phy, mac, &p->link_config);
t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
t3_port_intr_enable(sc, p->port_id);
p->mac.stats.num_resets++;
}
PORT_UNLOCK(p);
}
}
static void
cxgb_tick(void *arg)
{
adapter_t *sc = (adapter_t *)arg;
if (sc->flags & CXGB_SHUTDOWN)
return;
taskqueue_enqueue(sc->tq, &sc->tick_task);
callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
}
void
cxgb_refresh_stats(struct port_info *pi)
{
struct timeval tv;
const struct timeval interval = {0, 250000}; /* 250ms */
getmicrotime(&tv);
timevalsub(&tv, &interval);
if (timevalcmp(&tv, &pi->last_refreshed, <))
return;
PORT_LOCK(pi);
t3_mac_update_stats(&pi->mac);
PORT_UNLOCK(pi);
getmicrotime(&pi->last_refreshed);
}
static void
cxgb_tick_handler(void *arg, int count)
{
adapter_t *sc = (adapter_t *)arg;
const struct adapter_params *p = &sc->params;
int i;
uint32_t cause, reset;
if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
return;
if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
check_t3b2_mac(sc);
cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
if (cause) {
struct sge_qset *qs = &sc->sge.qs[0];
uint32_t mask, v;
v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
mask = 1;
for (i = 0; i < SGE_QSETS; i++) {
if (v & mask)
qs[i].rspq.starved++;
mask <<= 1;
}
mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
for (i = 0; i < SGE_QSETS * 2; i++) {
if (v & mask) {
qs[i / 2].fl[i % 2].empty++;
}
mask <<= 1;
}
/* clear */
t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
t3_write_reg(sc, A_SG_INT_CAUSE, cause);
}
for (i = 0; i < sc->params.nports; i++) {
struct port_info *pi = &sc->port[i];
struct cmac *mac = &pi->mac;
if (!isset(&sc->open_device_map, pi->port_id))
continue;
cxgb_refresh_stats(pi);
if (mac->multiport)
continue;
/* Count rx fifo overflows, once per second */
cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
reset = 0;
if (cause & F_RXFIFO_OVERFLOW) {
mac->stats.rx_fifo_ovfl++;
reset |= F_RXFIFO_OVERFLOW;
}
t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
}
}
static void
touch_bars(device_t dev)
{
/*
* Don't enable yet
*/
#if !defined(__LP64__) && 0
u32 v;
pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
#endif
}
static int
set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
{
uint8_t *buf;
int err = 0;
u32 aligned_offset, aligned_len, *p;
struct adapter *adapter = pi->adapter;
aligned_offset = offset & ~3;
aligned_len = (len + (offset & 3) + 3) & ~3;
if (aligned_offset != offset || aligned_len != len) {
buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
if (!buf)
return (ENOMEM);
err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
if (!err && aligned_len > 4)
err = t3_seeprom_read(adapter,
aligned_offset + aligned_len - 4,
(u32 *)&buf[aligned_len - 4]);
if (err)
goto out;
memcpy(buf + (offset & 3), data, len);
} else
buf = (uint8_t *)(uintptr_t)data;
err = t3_seeprom_wp(adapter, 0);
if (err)
goto out;
for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
err = t3_seeprom_write(adapter, aligned_offset, *p);
aligned_offset += 4;
}
if (!err)
err = t3_seeprom_wp(adapter, 1);
out:
if (buf != data)
free(buf, M_DEVBUF);
return err;
}
static int
in_range(int val, int lo, int hi)
{
return val < 0 || (val <= hi && val >= lo);
}
static int
cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
{
return (0);
}
static int
cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
{
return (0);
}
static int
cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
int fflag, struct thread *td)
{
int mmd, error = 0;
struct port_info *pi = dev->si_drv1;
adapter_t *sc = pi->adapter;
#ifdef PRIV_SUPPORTED
if (priv_check(td, PRIV_DRIVER)) {
if (cxgb_debug)
printf("user does not have access to privileged ioctls\n");
return (EPERM);
}
#else
if (suser(td)) {
if (cxgb_debug)
printf("user does not have access to privileged ioctls\n");
return (EPERM);
}
#endif
switch (cmd) {
case CHELSIO_GET_MIIREG: {
uint32_t val;
struct cphy *phy = &pi->phy;
struct ch_mii_data *mid = (struct ch_mii_data *)data;
if (!phy->mdio_read)
return (EOPNOTSUPP);
if (is_10G(sc)) {
mmd = mid->phy_id >> 8;
if (!mmd)
mmd = MDIO_DEV_PCS;
else if (mmd > MDIO_DEV_VEND2)
return (EINVAL);
error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
mid->reg_num, &val);
} else
error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
mid->reg_num & 0x1f, &val);
if (error == 0)
mid->val_out = val;
break;
}
case CHELSIO_SET_MIIREG: {
struct cphy *phy = &pi->phy;
struct ch_mii_data *mid = (struct ch_mii_data *)data;
if (!phy->mdio_write)
return (EOPNOTSUPP);
if (is_10G(sc)) {
mmd = mid->phy_id >> 8;
if (!mmd)
mmd = MDIO_DEV_PCS;
else if (mmd > MDIO_DEV_VEND2)
return (EINVAL);
error = phy->mdio_write(sc, mid->phy_id & 0x1f,
mmd, mid->reg_num, mid->val_in);
} else
error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
mid->reg_num & 0x1f,
mid->val_in);
break;
}
case CHELSIO_SETREG: {
struct ch_reg *edata = (struct ch_reg *)data;
if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
return (EFAULT);
t3_write_reg(sc, edata->addr, edata->val);
break;
}
case CHELSIO_GETREG: {
struct ch_reg *edata = (struct ch_reg *)data;
if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
return (EFAULT);
edata->val = t3_read_reg(sc, edata->addr);
break;
}
case CHELSIO_GET_SGE_CONTEXT: {
struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
mtx_lock_spin(&sc->sge.reg_lock);
switch (ecntxt->cntxt_type) {
case CNTXT_TYPE_EGRESS:
error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
ecntxt->data);
break;
case CNTXT_TYPE_FL:
error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
ecntxt->data);
break;
case CNTXT_TYPE_RSP:
error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
ecntxt->data);
break;
case CNTXT_TYPE_CQ:
error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
ecntxt->data);
break;
default:
error = EINVAL;
break;
}
mtx_unlock_spin(&sc->sge.reg_lock);
break;
}
case CHELSIO_GET_SGE_DESC: {
struct ch_desc *edesc = (struct ch_desc *)data;
int ret;
if (edesc->queue_num >= SGE_QSETS * 6)
return (EINVAL);
ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
edesc->queue_num % 6, edesc->idx, edesc->data);
if (ret < 0)
return (EINVAL);
edesc->size = ret;
break;
}
case CHELSIO_GET_QSET_PARAMS: {
struct qset_params *q;
struct ch_qset_params *t = (struct ch_qset_params *)data;
int q1 = pi->first_qset;
int nqsets = pi->nqsets;
int i;
if (t->qset_idx >= nqsets)
return EINVAL;
i = q1 + t->qset_idx;
q = &sc->params.sge.qset[i];
t->rspq_size = q->rspq_size;
t->txq_size[0] = q->txq_size[0];
t->txq_size[1] = q->txq_size[1];
t->txq_size[2] = q->txq_size[2];
t->fl_size[0] = q->fl_size;
t->fl_size[1] = q->jumbo_size;
t->polling = q->polling;
t->lro = q->lro;
t->intr_lat = q->coalesce_usecs;
t->cong_thres = q->cong_thres;
t->qnum = i;
if ((sc->flags & FULL_INIT_DONE) == 0)
t->vector = 0;
else if (sc->flags & USING_MSIX)
t->vector = rman_get_start(sc->msix_irq_res[i]);
else
t->vector = rman_get_start(sc->irq_res);
break;
}
case CHELSIO_GET_QSET_NUM: {
struct ch_reg *edata = (struct ch_reg *)data;
edata->val = pi->nqsets;
break;
}
case CHELSIO_LOAD_FW: {
uint8_t *fw_data;
uint32_t vers;
struct ch_mem_range *t = (struct ch_mem_range *)data;
/*
* You're allowed to load a firmware only before FULL_INIT_DONE
*
* FW_UPTODATE is also set so the rest of the initialization
* will not overwrite what was loaded here. This gives you the
* flexibility to load any firmware (and maybe shoot yourself in
* the foot).
*/
ADAPTER_LOCK(sc);
if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
ADAPTER_UNLOCK(sc);
return (EBUSY);
}
fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
if (!fw_data)
error = ENOMEM;
else
error = copyin(t->buf, fw_data, t->len);
if (!error)
error = -t3_load_fw(sc, fw_data, t->len);
if (t3_get_fw_version(sc, &vers) == 0) {
snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
"%d.%d.%d", G_FW_VERSION_MAJOR(vers),
G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
}
if (!error)
sc->flags |= FW_UPTODATE;
free(fw_data, M_DEVBUF);
ADAPTER_UNLOCK(sc);
break;
}
case CHELSIO_LOAD_BOOT: {
uint8_t *boot_data;
struct ch_mem_range *t = (struct ch_mem_range *)data;
boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
if (!boot_data)
return ENOMEM;
error = copyin(t->buf, boot_data, t->len);
if (!error)
error = -t3_load_boot(sc, boot_data, t->len);
free(boot_data, M_DEVBUF);
break;
}
case CHELSIO_GET_PM: {
struct ch_pm *m = (struct ch_pm *)data;
struct tp_params *p = &sc->params.tp;
if (!is_offload(sc))
return (EOPNOTSUPP);
m->tx_pg_sz = p->tx_pg_size;
m->tx_num_pg = p->tx_num_pgs;
m->rx_pg_sz = p->rx_pg_size;
m->rx_num_pg = p->rx_num_pgs;
m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
break;
}
case CHELSIO_SET_PM: {
struct ch_pm *m = (struct ch_pm *)data;
struct tp_params *p = &sc->params.tp;
if (!is_offload(sc))
return (EOPNOTSUPP);
if (sc->flags & FULL_INIT_DONE)
return (EBUSY);
if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
!m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
return (EINVAL); /* not power of 2 */
if (!(m->rx_pg_sz & 0x14000))
return (EINVAL); /* not 16KB or 64KB */
if (!(m->tx_pg_sz & 0x1554000))
return (EINVAL);
if (m->tx_num_pg == -1)
m->tx_num_pg = p->tx_num_pgs;
if (m->rx_num_pg == -1)
m->rx_num_pg = p->rx_num_pgs;
if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
return (EINVAL);
if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
return (EINVAL);
p->rx_pg_size = m->rx_pg_sz;
p->tx_pg_size = m->tx_pg_sz;
p->rx_num_pgs = m->rx_num_pg;
p->tx_num_pgs = m->tx_num_pg;
break;
}
case CHELSIO_SETMTUTAB: {
struct ch_mtus *m = (struct ch_mtus *)data;
int i;
if (!is_offload(sc))
return (EOPNOTSUPP);
if (offload_running(sc))
return (EBUSY);
if (m->nmtus != NMTUS)
return (EINVAL);
if (m->mtus[0] < 81) /* accommodate SACK */
return (EINVAL);
/*
* MTUs must be in ascending order
*/
for (i = 1; i < NMTUS; ++i)
if (m->mtus[i] < m->mtus[i - 1])
return (EINVAL);
memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
break;
}
case CHELSIO_GETMTUTAB: {
struct ch_mtus *m = (struct ch_mtus *)data;
if (!is_offload(sc))
return (EOPNOTSUPP);
memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
m->nmtus = NMTUS;
break;
}
case CHELSIO_GET_MEM: {
struct ch_mem_range *t = (struct ch_mem_range *)data;
struct mc7 *mem;
uint8_t *useraddr;
u64 buf[32];
/*
* Use these to avoid modifying len/addr in the return
* struct
*/
uint32_t len = t->len, addr = t->addr;
if (!is_offload(sc))
return (EOPNOTSUPP);
if (!(sc->flags & FULL_INIT_DONE))
return (EIO); /* need the memory controllers */
if ((addr & 0x7) || (len & 0x7))
return (EINVAL);
if (t->mem_id == MEM_CM)
mem = &sc->cm;
else if (t->mem_id == MEM_PMRX)
mem = &sc->pmrx;
else if (t->mem_id == MEM_PMTX)
mem = &sc->pmtx;
else
return (EINVAL);
/*
* Version scheme:
* bits 0..9: chip version
* bits 10..15: chip revision
*/
t->version = 3 | (sc->params.rev << 10);
/*
* Read 256 bytes at a time as len can be large and we don't
* want to use huge intermediate buffers.
*/
useraddr = (uint8_t *)t->buf;
while (len) {
unsigned int chunk = min(len, sizeof(buf));
error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
if (error)
return (-error);
if (copyout(buf, useraddr, chunk))
return (EFAULT);
useraddr += chunk;
addr += chunk;
len -= chunk;
}
break;
}
case CHELSIO_READ_TCAM_WORD: {
struct ch_tcam_word *t = (struct ch_tcam_word *)data;
if (!is_offload(sc))
return (EOPNOTSUPP);
if (!(sc->flags & FULL_INIT_DONE))
return (EIO); /* need MC5 */
return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
break;
}
case CHELSIO_SET_TRACE_FILTER: {
struct ch_trace *t = (struct ch_trace *)data;
const struct trace_params *tp;
tp = (const struct trace_params *)&t->sip;
if (t->config_tx)
t3_config_trace_filter(sc, tp, 0, t->invert_match,
t->trace_tx);
if (t->config_rx)
t3_config_trace_filter(sc, tp, 1, t->invert_match,
t->trace_rx);
break;
}
case CHELSIO_SET_PKTSCHED: {
struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
if (sc->open_device_map == 0)
return (EAGAIN);
send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
p->binding);
break;
}
case CHELSIO_IFCONF_GETREGS: {
struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
int reglen = cxgb_get_regs_len();
uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
if (buf == NULL) {
return (ENOMEM);
}
if (regs->len > reglen)
regs->len = reglen;
else if (regs->len < reglen)
error = ENOBUFS;
if (!error) {
cxgb_get_regs(sc, regs, buf);
error = copyout(buf, regs->data, reglen);
}
free(buf, M_DEVBUF);
break;
}
case CHELSIO_SET_HW_SCHED: {
struct ch_hw_sched *t = (struct ch_hw_sched *)data;
unsigned int ticks_per_usec = core_ticks_per_usec(sc);
if ((sc->flags & FULL_INIT_DONE) == 0)
return (EAGAIN); /* need TP to be initialized */
if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
!in_range(t->channel, 0, 1) ||
!in_range(t->kbps, 0, 10000000) ||
!in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
!in_range(t->flow_ipg, 0,
dack_ticks_to_usec(sc, 0x7ff)))
return (EINVAL);
if (t->kbps >= 0) {
error = t3_config_sched(sc, t->kbps, t->sched);
if (error < 0)
return (-error);
}
if (t->class_ipg >= 0)
t3_set_sched_ipg(sc, t->sched, t->class_ipg);
if (t->flow_ipg >= 0) {
t->flow_ipg *= 1000; /* us -> ns */
t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
}
if (t->mode >= 0) {
int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
bit, t->mode ? bit : 0);
}
if (t->channel >= 0)
t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
1 << t->sched, t->channel << t->sched);
break;
}
case CHELSIO_GET_EEPROM: {
int i;
struct ch_eeprom *e = (struct ch_eeprom *)data;
uint8_t *buf;
if (e->offset & 3 || e->offset >= EEPROMSIZE ||
e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) {
return (EINVAL);
}
buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
if (buf == NULL) {
return (ENOMEM);
}
e->magic = EEPROM_MAGIC;
for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
if (!error)
error = copyout(buf + e->offset, e->data, e->len);
free(buf, M_DEVBUF);
break;
}
case CHELSIO_CLEAR_STATS: {
if (!(sc->flags & FULL_INIT_DONE))
return EAGAIN;
PORT_LOCK(pi);
t3_mac_update_stats(&pi->mac);
memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
PORT_UNLOCK(pi);
break;
}
case CHELSIO_GET_UP_LA: {
struct ch_up_la *la = (struct ch_up_la *)data;
uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
if (buf == NULL) {
return (ENOMEM);
}
if (la->bufsize < LA_BUFSIZE)
error = ENOBUFS;
if (!error)
error = -t3_get_up_la(sc, &la->stopped, &la->idx,
&la->bufsize, buf);
if (!error)
error = copyout(buf, la->data, la->bufsize);
free(buf, M_DEVBUF);
break;
}
case CHELSIO_GET_UP_IOQS: {
struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
uint32_t *v;
if (buf == NULL) {
return (ENOMEM);
}
if (ioqs->bufsize < IOQS_BUFSIZE)
error = ENOBUFS;
if (!error)
error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
if (!error) {
v = (uint32_t *)buf;
ioqs->ioq_rx_enable = *v++;
ioqs->ioq_tx_enable = *v++;
ioqs->ioq_rx_status = *v++;
ioqs->ioq_tx_status = *v++;
error = copyout(v, ioqs->data, ioqs->bufsize);
}
free(buf, M_DEVBUF);
break;
}
case CHELSIO_SET_FILTER: {
struct ch_filter *f = (struct ch_filter *)data;
struct filter_info *p;
unsigned int nfilters = sc->params.mc5.nfilters;
if (!is_offload(sc))
return (EOPNOTSUPP); /* No TCAM */
if (!(sc->flags & FULL_INIT_DONE))
return (EAGAIN); /* mc5 not setup yet */
if (nfilters == 0)
return (EBUSY); /* TOE will use TCAM */
/* sanity checks */
if (f->filter_id >= nfilters ||
(f->val.dip && f->mask.dip != 0xffffffff) ||
(f->val.sport && f->mask.sport != 0xffff) ||
(f->val.dport && f->mask.dport != 0xffff) ||
(f->val.vlan && f->mask.vlan != 0xfff) ||
(f->val.vlan_prio &&
f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
(f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
f->qset >= SGE_QSETS ||
sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
return (EINVAL);
/* Was allocated with M_WAITOK */
KASSERT(sc->filters, ("filter table NULL\n"));
p = &sc->filters[f->filter_id];
if (p->locked)
return (EPERM);
bzero(p, sizeof(*p));
p->sip = f->val.sip;
p->sip_mask = f->mask.sip;
p->dip = f->val.dip;
p->sport = f->val.sport;
p->dport = f->val.dport;
p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
FILTER_NO_VLAN_PRI;
p->mac_hit = f->mac_hit;
p->mac_vld = f->mac_addr_idx != 0xffff;
p->mac_idx = f->mac_addr_idx;
p->pkt_type = f->proto;
p->report_filter_id = f->want_filter_id;
p->pass = f->pass;
p->rss = f->rss;
p->qset = f->qset;
error = set_filter(sc, f->filter_id, p);
if (error == 0)
p->valid = 1;
break;
}
case CHELSIO_DEL_FILTER: {
struct ch_filter *f = (struct ch_filter *)data;
struct filter_info *p;
unsigned int nfilters = sc->params.mc5.nfilters;
if (!is_offload(sc))
return (EOPNOTSUPP);
if (!(sc->flags & FULL_INIT_DONE))
return (EAGAIN);
if (nfilters == 0 || sc->filters == NULL)
return (EINVAL);
if (f->filter_id >= nfilters)
return (EINVAL);
p = &sc->filters[f->filter_id];
if (p->locked)
return (EPERM);
if (!p->valid)
return (EFAULT); /* Read "Bad address" as "Bad index" */
bzero(p, sizeof(*p));
p->sip = p->sip_mask = 0xffffffff;
p->vlan = 0xfff;
p->vlan_prio = FILTER_NO_VLAN_PRI;
p->pkt_type = 1;
error = set_filter(sc, f->filter_id, p);
break;
}
case CHELSIO_GET_FILTER: {
struct ch_filter *f = (struct ch_filter *)data;
struct filter_info *p;
unsigned int i, nfilters = sc->params.mc5.nfilters;
if (!is_offload(sc))
return (EOPNOTSUPP);
if (!(sc->flags & FULL_INIT_DONE))
return (EAGAIN);
if (nfilters == 0 || sc->filters == NULL)
return (EINVAL);
i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
for (; i < nfilters; i++) {
p = &sc->filters[i];
if (!p->valid)
continue;
bzero(f, sizeof(*f));
f->filter_id = i;
f->val.sip = p->sip;
f->mask.sip = p->sip_mask;
f->val.dip = p->dip;
f->mask.dip = p->dip ? 0xffffffff : 0;
f->val.sport = p->sport;
f->mask.sport = p->sport ? 0xffff : 0;
f->val.dport = p->dport;
f->mask.dport = p->dport ? 0xffff : 0;
f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
0 : p->vlan_prio;
f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
0 : FILTER_NO_VLAN_PRI;
f->mac_hit = p->mac_hit;
f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
f->proto = p->pkt_type;
f->want_filter_id = p->report_filter_id;
f->pass = p->pass;
f->rss = p->rss;
f->qset = p->qset;
break;
}
if (i == nfilters)
f->filter_id = 0xffffffff;
break;
}
default:
return (EOPNOTSUPP);
break;
}
return (error);
}
static __inline void
reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
unsigned int end)
{
uint32_t *p = (uint32_t *)(buf + start);
for ( ; start <= end; start += sizeof(uint32_t))
*p++ = t3_read_reg(ap, start);
}
#define T3_REGMAP_SIZE (3 * 1024)
static int
cxgb_get_regs_len(void)
{
return T3_REGMAP_SIZE;
}
static void
cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
{
/*
* Version scheme:
* bits 0..9: chip version
* bits 10..15: chip revision
* bit 31: set for PCIe cards
*/
regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
/*
* We skip the MAC statistics registers because they are clear-on-read.
* Also reading multi-register stats would need to synchronize with the
* periodic mac stats accumulation. Hard to justify the complexity.
*/
memset(buf, 0, cxgb_get_regs_len());
reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
XGM_REG(A_XGM_SERDES_STAT3, 1));
reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
}
static int
alloc_filters(struct adapter *sc)
{
struct filter_info *p;
unsigned int nfilters = sc->params.mc5.nfilters;
if (nfilters == 0)
return (0);
p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
sc->filters = p;
p = &sc->filters[nfilters - 1];
p->vlan = 0xfff;
p->vlan_prio = FILTER_NO_VLAN_PRI;
p->pass = p->rss = p->valid = p->locked = 1;
return (0);
}
static int
setup_hw_filters(struct adapter *sc)
{
int i, rc;
unsigned int nfilters = sc->params.mc5.nfilters;
if (!sc->filters)
return (0);
t3_enable_filters(sc);
for (i = rc = 0; i < nfilters && !rc; i++) {
if (sc->filters[i].locked)
rc = set_filter(sc, i, &sc->filters[i]);
}
return (rc);
}
static int
set_filter(struct adapter *sc, int id, const struct filter_info *f)
{
int len;
struct mbuf *m;
struct ulp_txpkt *txpkt;
struct work_request_hdr *wr;
struct cpl_pass_open_req *oreq;
struct cpl_set_tcb_field *sreq;
len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
sc->params.mc5.nfilters;
m = m_gethdr(M_WAITOK, MT_DATA);
m->m_len = m->m_pkthdr.len = len;
bzero(mtod(m, char *), len);
wr = mtod(m, struct work_request_hdr *);
wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
oreq = (struct cpl_pass_open_req *)(wr + 1);
txpkt = (struct ulp_txpkt *)oreq;
txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
oreq->local_port = htons(f->dport);
oreq->peer_port = htons(f->sport);
oreq->local_ip = htonl(f->dip);
oreq->peer_ip = htonl(f->sip);
oreq->peer_netmask = htonl(f->sip_mask);
oreq->opt0h = 0;
oreq->opt0l = htonl(F_NO_OFFLOAD);
oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
V_VLAN_PRI(f->vlan_prio >> 1) |
V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
sreq = (struct cpl_set_tcb_field *)(oreq + 1);
set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
(f->report_filter_id << 15) | (1 << 23) |
((u64)f->pass << 35) | ((u64)!f->rss << 36));
set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
t3_mgmt_tx(sc, m);
if (f->pass && !f->rss) {
len = sizeof(*sreq);
m = m_gethdr(M_WAITOK, MT_DATA);
m->m_len = m->m_pkthdr.len = len;
bzero(mtod(m, char *), len);
sreq = mtod(m, struct cpl_set_tcb_field *);
sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
mk_set_tcb_field(sreq, id, 25, 0x3f80000,
(u64)sc->rrss_map[f->qset] << 19);
t3_mgmt_tx(sc, m);
}
return 0;
}
static inline void
mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
unsigned int word, u64 mask, u64 val)
{
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(word);
req->mask = htobe64(mask);
req->val = htobe64(val);
}
static inline void
set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
unsigned int word, u64 mask, u64 val)
{
struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
mk_set_tcb_field(req, tid, word, mask, val);
}
void
t3_iterate(void (*func)(struct adapter *, void *), void *arg)
{
struct adapter *sc;
mtx_lock(&t3_list_lock);
SLIST_FOREACH(sc, &t3_list, link) {
/*
* func should not make any assumptions about what state sc is
* in - the only guarantee is that sc->sc_lock is a valid lock.
*/
func(sc, arg);
}
mtx_unlock(&t3_list_lock);
}
#ifdef TCP_OFFLOAD
static int
toe_capability(struct port_info *pi, int enable)
{
int rc;
struct adapter *sc = pi->adapter;
ADAPTER_LOCK_ASSERT_OWNED(sc);
if (!is_offload(sc))
return (ENODEV);
if (enable) {
if (!(sc->flags & FULL_INIT_DONE)) {
log(LOG_WARNING,
"You must enable a cxgb interface first\n");
return (EAGAIN);
}
if (isset(&sc->offload_map, pi->port_id))
return (0);
if (!(sc->flags & TOM_INIT_DONE)) {
rc = t3_activate_uld(sc, ULD_TOM);
if (rc == EAGAIN) {
log(LOG_WARNING,
"You must kldload t3_tom.ko before trying "
"to enable TOE on a cxgb interface.\n");
}
if (rc != 0)
return (rc);
KASSERT(sc->tom_softc != NULL,
("%s: TOM activated but softc NULL", __func__));
KASSERT(sc->flags & TOM_INIT_DONE,
("%s: TOM activated but flag not set", __func__));
}
setbit(&sc->offload_map, pi->port_id);
/*
* XXX: Temporary code to allow iWARP to be enabled when TOE is
* enabled on any port. Need to figure out how to enable,
* disable, load, and unload iWARP cleanly.
*/
if (!isset(&sc->offload_map, MAX_NPORTS) &&
t3_activate_uld(sc, ULD_IWARP) == 0)
setbit(&sc->offload_map, MAX_NPORTS);
} else {
if (!isset(&sc->offload_map, pi->port_id))
return (0);
KASSERT(sc->flags & TOM_INIT_DONE,
("%s: TOM never initialized?", __func__));
clrbit(&sc->offload_map, pi->port_id);
}
return (0);
}
/*
* Add an upper layer driver to the global list.
*/
int
t3_register_uld(struct uld_info *ui)
{
int rc = 0;
struct uld_info *u;
mtx_lock(&t3_uld_list_lock);
SLIST_FOREACH(u, &t3_uld_list, link) {
if (u->uld_id == ui->uld_id) {
rc = EEXIST;
goto done;
}
}
SLIST_INSERT_HEAD(&t3_uld_list, ui, link);
ui->refcount = 0;
done:
mtx_unlock(&t3_uld_list_lock);
return (rc);
}
int
t3_unregister_uld(struct uld_info *ui)
{
int rc = EINVAL;
struct uld_info *u;
mtx_lock(&t3_uld_list_lock);
SLIST_FOREACH(u, &t3_uld_list, link) {
if (u == ui) {
if (ui->refcount > 0) {
rc = EBUSY;
goto done;
}
SLIST_REMOVE(&t3_uld_list, ui, uld_info, link);
rc = 0;
goto done;
}
}
done:
mtx_unlock(&t3_uld_list_lock);
return (rc);
}
int
t3_activate_uld(struct adapter *sc, int id)
{
int rc = EAGAIN;
struct uld_info *ui;
mtx_lock(&t3_uld_list_lock);
SLIST_FOREACH(ui, &t3_uld_list, link) {
if (ui->uld_id == id) {
rc = ui->activate(sc);
if (rc == 0)
ui->refcount++;
goto done;
}
}
done:
mtx_unlock(&t3_uld_list_lock);
return (rc);
}
int
t3_deactivate_uld(struct adapter *sc, int id)
{
int rc = EINVAL;
struct uld_info *ui;
mtx_lock(&t3_uld_list_lock);
SLIST_FOREACH(ui, &t3_uld_list, link) {
if (ui->uld_id == id) {
rc = ui->deactivate(sc);
if (rc == 0)
ui->refcount--;
goto done;
}
}
done:
mtx_unlock(&t3_uld_list_lock);
return (rc);
}
static int
cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused,
struct mbuf *m)
{
m_freem(m);
return (EDOOFUS);
}
int
t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
{
uintptr_t *loc, new;
if (opcode >= NUM_CPL_HANDLERS)
return (EINVAL);
new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
loc = (uintptr_t *) &sc->cpl_handler[opcode];
atomic_store_rel_ptr(loc, new);
return (0);
}
#endif
static int
cxgbc_mod_event(module_t mod, int cmd, void *arg)
{
int rc = 0;
switch (cmd) {
case MOD_LOAD:
mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF);
SLIST_INIT(&t3_list);
#ifdef TCP_OFFLOAD
mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF);
SLIST_INIT(&t3_uld_list);
#endif
break;
case MOD_UNLOAD:
#ifdef TCP_OFFLOAD
mtx_lock(&t3_uld_list_lock);
if (!SLIST_EMPTY(&t3_uld_list)) {
rc = EBUSY;
mtx_unlock(&t3_uld_list_lock);
break;
}
mtx_unlock(&t3_uld_list_lock);
mtx_destroy(&t3_uld_list_lock);
#endif
mtx_lock(&t3_list_lock);
if (!SLIST_EMPTY(&t3_list)) {
rc = EBUSY;
mtx_unlock(&t3_list_lock);
break;
}
mtx_unlock(&t3_list_lock);
mtx_destroy(&t3_list_lock);
break;
}
return (rc);
}
#ifdef DEBUGNET
static void
cxgb_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
{
struct port_info *pi;
adapter_t *adap;
pi = if_getsoftc(ifp);
adap = pi->adapter;
ADAPTER_LOCK(adap);
*nrxr = adap->nqsets;
*ncl = adap->sge.qs[0].fl[1].size;
*clsize = adap->sge.qs[0].fl[1].buf_size;
ADAPTER_UNLOCK(adap);
}
static void
cxgb_debugnet_event(if_t ifp, enum debugnet_ev event)
{
struct port_info *pi;
struct sge_qset *qs;
int i;
pi = if_getsoftc(ifp);
if (event == DEBUGNET_START)
for (i = 0; i < pi->adapter->nqsets; i++) {
qs = &pi->adapter->sge.qs[i];
/* Need to reinit after debugnet_mbuf_start(). */
qs->fl[0].zone = zone_pack;
qs->fl[1].zone = zone_clust;
qs->lro.enabled = 0;
}
}
static int
cxgb_debugnet_transmit(if_t ifp, struct mbuf *m)
{
struct port_info *pi;
struct sge_qset *qs;
pi = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (ENOENT);
qs = &pi->adapter->sge.qs[pi->first_qset];
return (cxgb_debugnet_encap(qs, &m));
}
static int
cxgb_debugnet_poll(if_t ifp, int count)
{
struct port_info *pi;
adapter_t *adap;
int i;
pi = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return (ENOENT);
adap = pi->adapter;
for (i = 0; i < adap->nqsets; i++)
(void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]);
(void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 08cad0324c28..ab477595bdb9 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -1,13309 +1,13298 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011 Chelsio Communications, Inc.
* All rights reserved.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include "opt_ddb.h"
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_kern_tls.h"
#include "opt_ratelimit.h"
#include "opt_rss.h"
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/priv.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/eventhandler.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <sys/pciio.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pci_private.h>
#include <sys/firmware.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/if_dl.h>
#include <net/if_vlan_var.h>
#ifdef RSS
#include <net/rss_config.h>
#endif
#include <netinet/in.h>
#include <netinet/ip.h>
#ifdef KERN_TLS
#include <netinet/tcp_seq.h>
#endif
#if defined(__i386__) || defined(__amd64__)
#include <machine/md_var.h>
#include <machine/cputypes.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#endif
#ifdef DDB
#include <ddb/ddb.h>
#include <ddb/db_lex.h>
#endif
#include "common/common.h"
#include "common/t4_msg.h"
#include "common/t4_regs.h"
#include "common/t4_regs_values.h"
#include "cudbg/cudbg.h"
#include "t4_clip.h"
#include "t4_ioctl.h"
#include "t4_l2t.h"
#include "t4_mp_ring.h"
#include "t4_if.h"
#include "t4_smt.h"
/* T4 bus driver interface */
static int t4_probe(device_t);
static int t4_attach(device_t);
static int t4_detach(device_t);
static int t4_child_location(device_t, device_t, struct sbuf *);
static int t4_ready(device_t);
static int t4_read_port_device(device_t, int, device_t *);
static int t4_suspend(device_t);
static int t4_resume(device_t);
static int t4_reset_prepare(device_t, device_t);
static int t4_reset_post(device_t, device_t);
static device_method_t t4_methods[] = {
DEVMETHOD(device_probe, t4_probe),
DEVMETHOD(device_attach, t4_attach),
DEVMETHOD(device_detach, t4_detach),
DEVMETHOD(device_suspend, t4_suspend),
DEVMETHOD(device_resume, t4_resume),
DEVMETHOD(bus_child_location, t4_child_location),
DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
DEVMETHOD(bus_reset_post, t4_reset_post),
DEVMETHOD(t4_is_main_ready, t4_ready),
DEVMETHOD(t4_read_port_device, t4_read_port_device),
DEVMETHOD_END
};
static driver_t t4_driver = {
"t4nex",
t4_methods,
sizeof(struct adapter)
};
/* T4 port (cxgbe) interface */
static int cxgbe_probe(device_t);
static int cxgbe_attach(device_t);
static int cxgbe_detach(device_t);
device_method_t cxgbe_methods[] = {
DEVMETHOD(device_probe, cxgbe_probe),
DEVMETHOD(device_attach, cxgbe_attach),
DEVMETHOD(device_detach, cxgbe_detach),
{ 0, 0 }
};
static driver_t cxgbe_driver = {
"cxgbe",
cxgbe_methods,
sizeof(struct port_info)
};
/* T4 VI (vcxgbe) interface */
static int vcxgbe_probe(device_t);
static int vcxgbe_attach(device_t);
static int vcxgbe_detach(device_t);
static device_method_t vcxgbe_methods[] = {
DEVMETHOD(device_probe, vcxgbe_probe),
DEVMETHOD(device_attach, vcxgbe_attach),
DEVMETHOD(device_detach, vcxgbe_detach),
{ 0, 0 }
};
static driver_t vcxgbe_driver = {
"vcxgbe",
vcxgbe_methods,
sizeof(struct vi_info)
};
static d_ioctl_t t4_ioctl;
static struct cdevsw t4_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = t4_ioctl,
.d_name = "t4nex",
};
/* T5 bus driver interface */
static int t5_probe(device_t);
static device_method_t t5_methods[] = {
DEVMETHOD(device_probe, t5_probe),
DEVMETHOD(device_attach, t4_attach),
DEVMETHOD(device_detach, t4_detach),
DEVMETHOD(device_suspend, t4_suspend),
DEVMETHOD(device_resume, t4_resume),
DEVMETHOD(bus_child_location, t4_child_location),
DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
DEVMETHOD(bus_reset_post, t4_reset_post),
DEVMETHOD(t4_is_main_ready, t4_ready),
DEVMETHOD(t4_read_port_device, t4_read_port_device),
DEVMETHOD_END
};
static driver_t t5_driver = {
"t5nex",
t5_methods,
sizeof(struct adapter)
};
/* T5 port (cxl) interface */
static driver_t cxl_driver = {
"cxl",
cxgbe_methods,
sizeof(struct port_info)
};
/* T5 VI (vcxl) interface */
static driver_t vcxl_driver = {
"vcxl",
vcxgbe_methods,
sizeof(struct vi_info)
};
/* T6 bus driver interface */
static int t6_probe(device_t);
static device_method_t t6_methods[] = {
DEVMETHOD(device_probe, t6_probe),
DEVMETHOD(device_attach, t4_attach),
DEVMETHOD(device_detach, t4_detach),
DEVMETHOD(device_suspend, t4_suspend),
DEVMETHOD(device_resume, t4_resume),
DEVMETHOD(bus_child_location, t4_child_location),
DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
DEVMETHOD(bus_reset_post, t4_reset_post),
DEVMETHOD(t4_is_main_ready, t4_ready),
DEVMETHOD(t4_read_port_device, t4_read_port_device),
DEVMETHOD_END
};
static driver_t t6_driver = {
"t6nex",
t6_methods,
sizeof(struct adapter)
};
/* T6 port (cc) interface */
static driver_t cc_driver = {
"cc",
cxgbe_methods,
sizeof(struct port_info)
};
/* T6 VI (vcc) interface */
static driver_t vcc_driver = {
"vcc",
vcxgbe_methods,
sizeof(struct vi_info)
};
/* ifnet interface */
static void cxgbe_init(void *);
static int cxgbe_ioctl(if_t, unsigned long, caddr_t);
static int cxgbe_transmit(if_t, struct mbuf *);
static void cxgbe_qflush(if_t);
#if defined(KERN_TLS) || defined(RATELIMIT)
static int cxgbe_snd_tag_alloc(if_t, union if_snd_tag_alloc_params *,
struct m_snd_tag **);
#endif
MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
/*
* Correct lock order when you need to acquire multiple locks is t4_list_lock,
* then ADAPTER_LOCK, then t4_uld_list_lock.
*/
static struct sx t4_list_lock;
SLIST_HEAD(, adapter) t4_list;
#ifdef TCP_OFFLOAD
static struct sx t4_uld_list_lock;
SLIST_HEAD(, uld_info) t4_uld_list;
#endif
/*
* Tunables. See tweak_tunables() too.
*
* Each tunable is set to a default value here if it's known at compile-time.
* Otherwise it is set to -n as an indication to tweak_tunables() that it should
* provide a reasonable default (upto n) when the driver is loaded.
*
* Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
* T5 are under hw.cxl.
*/
SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"cxgbe(4) parameters");
SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"cxgbe(4) T5+ parameters");
SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"cxgbe(4) TOE parameters");
/*
* Number of queues for tx and rx, NIC and offload.
*/
#define NTXQ 16
int t4_ntxq = -NTXQ;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0,
"Number of TX queues per port");
TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
#define NRXQ 8
int t4_nrxq = -NRXQ;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0,
"Number of RX queues per port");
TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
#define NTXQ_VI 1
static int t4_ntxq_vi = -NTXQ_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0,
"Number of TX queues per VI");
#define NRXQ_VI 1
static int t4_nrxq_vi = -NRXQ_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0,
"Number of RX queues per VI");
static int t4_rsrv_noflowq = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq,
0, "Reserve TX queue 0 of each VI for non-flowid packets");
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
#define NOFLDTXQ 8
static int t4_nofldtxq = -NOFLDTXQ;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
"Number of offload TX queues per port");
#define NOFLDRXQ 2
static int t4_nofldrxq = -NOFLDRXQ;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
"Number of offload RX queues per port");
#define NOFLDTXQ_VI 1
static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
"Number of offload TX queues per VI");
#define NOFLDRXQ_VI 1
static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
"Number of offload RX queues per VI");
#define TMR_IDX_OFLD 1
int t4_tmr_idx_ofld = TMR_IDX_OFLD;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
&t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
#define PKTC_IDX_OFLD (-1)
int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
&t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
/* 0 means chip/fw default, non-zero number is value in microseconds */
static u_long t4_toe_keepalive_idle = 0;
SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN,
&t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)");
/* 0 means chip/fw default, non-zero number is value in microseconds */
static u_long t4_toe_keepalive_interval = 0;
SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN,
&t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)");
/* 0 means chip/fw default, non-zero number is # of keepalives before abort */
static int t4_toe_keepalive_count = 0;
SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN,
&t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort");
/* 0 means chip/fw default, non-zero number is value in microseconds */
static u_long t4_toe_rexmt_min = 0;
SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN,
&t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)");
/* 0 means chip/fw default, non-zero number is value in microseconds */
static u_long t4_toe_rexmt_max = 0;
SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN,
&t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)");
/* 0 means chip/fw default, non-zero number is # of rexmt before abort */
static int t4_toe_rexmt_count = 0;
SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN,
&t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort");
/* -1 means chip/fw default, other values are raw backoff values to use */
static int t4_toe_rexmt_backoff[16] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff,
CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"cxgbe(4) TOE retransmit backoff values");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[0], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[1], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[2], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[3], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[4], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[5], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[6], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[7], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[8], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[9], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[10], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[11], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[12], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[13], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[14], 0, "");
SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN,
&t4_toe_rexmt_backoff[15], 0, "");
int t4_ddp_rcvbuf_len = 256 * 1024;
SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_len, CTLFLAG_RWTUN,
&t4_ddp_rcvbuf_len, 0, "length of each DDP RX buffer");
unsigned int t4_ddp_rcvbuf_cache = 4;
SYSCTL_UINT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_cache, CTLFLAG_RWTUN,
&t4_ddp_rcvbuf_cache, 0,
"maximum number of free DDP RX buffers to cache per connection");
#endif
#ifdef DEV_NETMAP
#define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */
#define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */
static int t4_native_netmap = NN_EXTRA_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap,
0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs");
#define NNMTXQ 8
static int t4_nnmtxq = -NNMTXQ;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0,
"Number of netmap TX queues");
#define NNMRXQ 8
static int t4_nnmrxq = -NNMRXQ;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0,
"Number of netmap RX queues");
#define NNMTXQ_VI 2
static int t4_nnmtxq_vi = -NNMTXQ_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0,
"Number of netmap TX queues per VI");
#define NNMRXQ_VI 2
static int t4_nnmrxq_vi = -NNMRXQ_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0,
"Number of netmap RX queues per VI");
#endif
/*
* Holdoff parameters for ports.
*/
#define TMR_IDX 1
int t4_tmr_idx = TMR_IDX;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx,
0, "Holdoff timer index");
TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
#define PKTC_IDX (-1)
int t4_pktc_idx = PKTC_IDX;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx,
0, "Holdoff packet counter index");
TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
/*
* Size (# of entries) of each tx and rx queue.
*/
unsigned int t4_qsize_txq = TX_EQ_QSIZE;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0,
"Number of descriptors in each TX queue");
unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0,
"Number of descriptors in each RX queue");
/*
* Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
*/
int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types,
0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
/*
* Configuration file. All the _CF names here are special.
*/
#define DEFAULT_CF "default"
#define BUILTIN_CF "built-in"
#define FLASH_CF "flash"
#define UWIRE_CF "uwire"
#define FPGA_CF "fpga"
static char t4_cfg_file[32] = DEFAULT_CF;
SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file,
sizeof(t4_cfg_file), "Firmware configuration file");
/*
* PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
* rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
* tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
* mark or when signalled to do so, 0 to never emit PAUSE.
* pause_autoneg = 1 means PAUSE will be negotiated if possible and the
* negotiated settings will override rx_pause/tx_pause.
* Otherwise rx_pause/tx_pause are applied forcibly.
*/
static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN,
&t4_pause_settings, 0,
"PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
/*
* Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
* -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
* 0 to disable FEC.
*/
static int t4_fec = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
"Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
/*
* Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
* issues to the firmware. If the firmware doesn't support FORCE_FEC then the
* driver runs as if this is set to 0.
* -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay.
* 0 to never set FORCE_FEC. requested_fec = AUTO means use the hint from the
* transceiver. Multiple FEC bits may not be okay but will be passed on to
* the firmware anyway (may result in l1cfg errors with old firmwares).
* 1 to always set FORCE_FEC. Multiple FEC bits are okay. requested_fec = AUTO
* means set all FEC bits that are valid for the speed.
*/
static int t4_force_fec = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, force_fec, CTLFLAG_RDTUN, &t4_force_fec, 0,
"Controls the use of FORCE_FEC bit in L1 configuration.");
/*
* Link autonegotiation.
* -1 to run with the firmware default.
* 0 to disable.
* 1 to enable.
*/
static int t4_autoneg = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0,
"Link autonegotiation");
/*
* Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
* encouraged respectively). '-n' is the same as 'n' except the firmware
* version used in the checks is read from the firmware bundled with the driver.
*/
static int t4_fw_install = 1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0,
"Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
/*
* ASIC features that will be used. Disable the ones you don't want so that the
* chip resources aren't wasted on features that will not be used.
*/
static int t4_nbmcaps_allowed = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN,
&t4_nbmcaps_allowed, 0, "Default NBM capabilities");
static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN,
&t4_linkcaps_allowed, 0, "Default link capabilities");
static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
FW_CAPS_CONFIG_SWITCH_EGRESS;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
&t4_switchcaps_allowed, 0, "Default switch capabilities");
#ifdef RATELIMIT
static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
#else
static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
FW_CAPS_CONFIG_NIC_HASHFILTER;
#endif
SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN,
&t4_niccaps_allowed, 0, "Default NIC capabilities");
static int t4_toecaps_allowed = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN,
&t4_toecaps_allowed, 0, "Default TCP offload capabilities");
static int t4_rdmacaps_allowed = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN,
&t4_rdmacaps_allowed, 0, "Default RDMA capabilities");
static int t4_cryptocaps_allowed = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN,
&t4_cryptocaps_allowed, 0, "Default crypto capabilities");
static int t4_iscsicaps_allowed = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN,
&t4_iscsicaps_allowed, 0, "Default iSCSI capabilities");
static int t4_fcoecaps_allowed = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN,
&t4_fcoecaps_allowed, 0, "Default FCoE capabilities");
static int t5_write_combine = 0;
SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
0, "Use WC instead of UC for BAR2");
/* From t4_sysctls: doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"} */
static int t4_doorbells_allowed = 0xf;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, doorbells_allowed, CTLFLAG_RDTUN,
&t4_doorbells_allowed, 0, "Limit tx queues to these doorbells");
static int t4_num_vis = 1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
"Number of VIs per port");
/*
* PCIe Relaxed Ordering.
* -1: driver should figure out a good value.
* 0: disable RO.
* 1: enable RO.
* 2: leave RO alone.
*/
static int pcie_relaxed_ordering = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN,
&pcie_relaxed_ordering, 0,
"PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone");
static int t4_panic_on_fatal_err = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN,
&t4_panic_on_fatal_err, 0, "panic on fatal errors");
static int t4_reset_on_fatal_err = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN,
&t4_reset_on_fatal_err, 0, "reset adapter on fatal errors");
static int t4_clock_gate_on_suspend = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, clock_gate_on_suspend, CTLFLAG_RWTUN,
&t4_clock_gate_on_suspend, 0, "gate the clock on suspend");
static int t4_tx_vm_wr = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0,
"Use VM work requests to transmit packets.");
/*
* Set to non-zero to enable the attack filter. A packet that matches any of
* these conditions will get dropped on ingress:
* 1) IP && source address == destination address.
* 2) TCP/IP && source address is not a unicast address.
* 3) TCP/IP && destination address is not a unicast address.
* 4) IP && source address is loopback (127.x.y.z).
* 5) IP && destination address is loopback (127.x.y.z).
* 6) IPv6 && source address == destination address.
* 7) IPv6 && source address is not a unicast address.
* 8) IPv6 && source address is loopback (::1/128).
* 9) IPv6 && destination address is loopback (::1/128).
* 10) IPv6 && source address is unspecified (::/128).
* 11) IPv6 && destination address is unspecified (::/128).
* 12) TCP/IPv6 && source address is multicast (ff00::/8).
* 13) TCP/IPv6 && destination address is multicast (ff00::/8).
*/
static int t4_attack_filter = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN,
&t4_attack_filter, 0, "Drop suspicious traffic");
static int t4_drop_ip_fragments = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN,
&t4_drop_ip_fragments, 0, "Drop IP fragments");
static int t4_drop_pkts_with_l2_errors = 1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN,
&t4_drop_pkts_with_l2_errors, 0,
"Drop all frames with Layer 2 length or checksum errors");
static int t4_drop_pkts_with_l3_errors = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN,
&t4_drop_pkts_with_l3_errors, 0,
"Drop all frames with IP version, length, or checksum errors");
static int t4_drop_pkts_with_l4_errors = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN,
&t4_drop_pkts_with_l4_errors, 0,
"Drop all frames with Layer 4 length, checksum, or other errors");
#ifdef TCP_OFFLOAD
/*
* TOE tunables.
*/
static int t4_cop_managed_offloading = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
&t4_cop_managed_offloading, 0,
"COP (Connection Offload Policy) controls all TOE offload");
#endif
#ifdef KERN_TLS
/*
* This enables KERN_TLS for all adapters if set.
*/
static int t4_kern_tls = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0,
"Enable KERN_TLS mode for T6 adapters");
SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"cxgbe(4) KERN_TLS parameters");
static int t4_tls_inline_keys = 0;
SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
&t4_tls_inline_keys, 0,
"Always pass TLS keys in work requests (1) or attempt to store TLS keys "
"in card memory.");
static int t4_tls_combo_wrs = 0;
SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
0, "Attempt to combine TCB field updates with TLS record work requests.");
#endif
/* Functions used by VIs to obtain unique MAC addresses for each VI. */
static int vi_mac_funcs[] = {
FW_VI_FUNC_ETH,
FW_VI_FUNC_OFLD,
FW_VI_FUNC_IWARP,
FW_VI_FUNC_OPENISCSI,
FW_VI_FUNC_OPENFCOE,
FW_VI_FUNC_FOISCSI,
FW_VI_FUNC_FOFCOE,
};
struct intrs_and_queues {
uint16_t intr_type; /* INTx, MSI, or MSI-X */
uint16_t num_vis; /* number of VIs for each port */
uint16_t nirq; /* Total # of vectors */
uint16_t ntxq; /* # of NIC txq's for each port */
uint16_t nrxq; /* # of NIC rxq's for each port */
uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
uint16_t nofldrxq; /* # of TOE rxq's for each port */
uint16_t nnmtxq; /* # of netmap txq's */
uint16_t nnmrxq; /* # of netmap rxq's */
/* The vcxgbe/vcxl interfaces use these and not the ones above. */
uint16_t ntxq_vi; /* # of NIC txq's */
uint16_t nrxq_vi; /* # of NIC rxq's */
uint16_t nofldtxq_vi; /* # of TOE txq's */
uint16_t nofldrxq_vi; /* # of TOE rxq's */
uint16_t nnmtxq_vi; /* # of netmap txq's */
uint16_t nnmrxq_vi; /* # of netmap rxq's */
};
static void setup_memwin(struct adapter *);
static void position_memwin(struct adapter *, int, uint32_t);
static int validate_mem_range(struct adapter *, uint32_t, uint32_t);
static int fwmtype_to_hwmtype(int);
static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t,
uint32_t *);
static int fixup_devlog_params(struct adapter *);
static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
static int contact_firmware(struct adapter *);
static int partition_resources(struct adapter *);
static int get_params__pre_init(struct adapter *);
static int set_params__pre_init(struct adapter *);
static int get_params__post_init(struct adapter *);
static int set_params__post_init(struct adapter *);
static void t4_set_desc(struct adapter *);
static bool fixed_ifmedia(struct port_info *);
static void build_medialist(struct port_info *);
static void init_link_config(struct port_info *);
static int fixup_link_config(struct port_info *);
static int apply_link_config(struct port_info *);
static int cxgbe_init_synchronized(struct vi_info *);
static int cxgbe_uninit_synchronized(struct vi_info *);
static int adapter_full_init(struct adapter *);
static void adapter_full_uninit(struct adapter *);
static int vi_full_init(struct vi_info *);
static void vi_full_uninit(struct vi_info *);
static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *);
static void quiesce_txq(struct sge_txq *);
static void quiesce_wrq(struct sge_wrq *);
static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *);
static void quiesce_vi(struct vi_info *);
static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
driver_intr_t *, void *, char *);
static int t4_free_irq(struct adapter *, struct irq *);
static void t4_init_atid_table(struct adapter *);
static void t4_free_atid_table(struct adapter *);
static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
static void vi_refresh_stats(struct vi_info *);
static void cxgbe_refresh_stats(struct vi_info *);
static void cxgbe_tick(void *);
static void vi_tick(void *);
static void cxgbe_sysctls(struct port_info *);
static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
static int sysctl_link_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tids(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
static int sysctl_reset(SYSCTL_HANDLER_ARGS);
#ifdef TCP_OFFLOAD
static int sysctl_tls(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
#endif
static int get_sge_context(struct adapter *, struct t4_sge_context *);
static int load_fw(struct adapter *, struct t4_data *);
static int load_cfg(struct adapter *, struct t4_data *);
static int load_boot(struct adapter *, struct t4_bootrom *);
static int load_bootcfg(struct adapter *, struct t4_data *);
static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
static void free_offload_policy(struct t4_offload_policy *);
static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
static int read_i2c(struct adapter *, struct t4_i2c_data *);
static int clear_stats(struct adapter *, u_int);
static int hold_clip_addr(struct adapter *, struct t4_clip_addr *);
static int release_clip_addr(struct adapter *, struct t4_clip_addr *);
#ifdef TCP_OFFLOAD
static int toe_capability(struct vi_info *, bool);
static int t4_deactivate_all_uld(struct adapter *);
static void t4_async_event(struct adapter *);
#endif
#ifdef KERN_TLS
static int ktls_capability(struct adapter *, bool);
#endif
static int mod_event(module_t, int, void *);
static int notify_siblings(device_t, int);
static uint64_t vi_get_counter(if_t, ift_counter);
static uint64_t cxgbe_get_counter(if_t, ift_counter);
static void enable_vxlan_rx(struct adapter *);
static void reset_adapter_task(void *, int);
static void fatal_error_task(void *, int);
static void dump_devlog(struct adapter *);
static void dump_cim_regs(struct adapter *);
static void dump_cimla(struct adapter *);
struct {
uint16_t device;
char *desc;
} t4_pciids[] = {
{0xa000, "Chelsio Terminator 4 FPGA"},
{0x4400, "Chelsio T440-dbg"},
{0x4401, "Chelsio T420-CR"},
{0x4402, "Chelsio T422-CR"},
{0x4403, "Chelsio T440-CR"},
{0x4404, "Chelsio T420-BCH"},
{0x4405, "Chelsio T440-BCH"},
{0x4406, "Chelsio T440-CH"},
{0x4407, "Chelsio T420-SO"},
{0x4408, "Chelsio T420-CX"},
{0x4409, "Chelsio T420-BT"},
{0x440a, "Chelsio T404-BT"},
{0x440e, "Chelsio T440-LP-CR"},
}, t5_pciids[] = {
{0xb000, "Chelsio Terminator 5 FPGA"},
{0x5400, "Chelsio T580-dbg"},
{0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
{0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
{0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
{0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
{0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
{0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
{0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
{0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
{0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
{0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
{0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
{0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
{0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
{0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
{0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
{0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
{0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
/* Custom */
{0x5483, "Custom T540-CR"},
{0x5484, "Custom T540-BT"},
}, t6_pciids[] = {
{0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
{0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
{0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
{0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
{0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
{0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
{0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
{0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
{0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
{0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
{0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
{0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
{0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
{0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
{0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
{0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
/* Custom */
{0x6480, "Custom T6225-CR"},
{0x6481, "Custom T62100-CR"},
{0x6482, "Custom T6225-CR"},
{0x6483, "Custom T62100-CR"},
{0x6484, "Custom T64100-CR"},
{0x6485, "Custom T6240-SO"},
{0x6486, "Custom T6225-SO-CR"},
{0x6487, "Custom T6225-CR"},
};
#ifdef TCP_OFFLOAD
/*
* service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
* be exactly the same for both rxq and ofld_rxq.
*/
CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
#endif
CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
static int
t4_probe(device_t dev)
{
int i;
uint16_t v = pci_get_vendor(dev);
uint16_t d = pci_get_device(dev);
uint8_t f = pci_get_function(dev);
if (v != PCI_VENDOR_ID_CHELSIO)
return (ENXIO);
/* Attach only to PF0 of the FPGA */
if (d == 0xa000 && f != 0)
return (ENXIO);
for (i = 0; i < nitems(t4_pciids); i++) {
if (d == t4_pciids[i].device) {
device_set_desc(dev, t4_pciids[i].desc);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
t5_probe(device_t dev)
{
int i;
uint16_t v = pci_get_vendor(dev);
uint16_t d = pci_get_device(dev);
uint8_t f = pci_get_function(dev);
if (v != PCI_VENDOR_ID_CHELSIO)
return (ENXIO);
/* Attach only to PF0 of the FPGA */
if (d == 0xb000 && f != 0)
return (ENXIO);
for (i = 0; i < nitems(t5_pciids); i++) {
if (d == t5_pciids[i].device) {
device_set_desc(dev, t5_pciids[i].desc);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
t6_probe(device_t dev)
{
int i;
uint16_t v = pci_get_vendor(dev);
uint16_t d = pci_get_device(dev);
if (v != PCI_VENDOR_ID_CHELSIO)
return (ENXIO);
for (i = 0; i < nitems(t6_pciids); i++) {
if (d == t6_pciids[i].device) {
device_set_desc(dev, t6_pciids[i].desc);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static void
t5_attribute_workaround(device_t dev)
{
device_t root_port;
uint32_t v;
/*
* The T5 chips do not properly echo the No Snoop and Relaxed
* Ordering attributes when replying to a TLP from a Root
* Port. As a workaround, find the parent Root Port and
* disable No Snoop and Relaxed Ordering. Note that this
* affects all devices under this root port.
*/
root_port = pci_find_pcie_root_port(dev);
if (root_port == NULL) {
device_printf(dev, "Unable to find parent root port\n");
return;
}
v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
0)
device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
device_get_nameunit(root_port));
}
static const struct devnames devnames[] = {
{
.nexus_name = "t4nex",
.ifnet_name = "cxgbe",
.vi_ifnet_name = "vcxgbe",
.pf03_drv_name = "t4iov",
.vf_nexus_name = "t4vf",
.vf_ifnet_name = "cxgbev"
}, {
.nexus_name = "t5nex",
.ifnet_name = "cxl",
.vi_ifnet_name = "vcxl",
.pf03_drv_name = "t5iov",
.vf_nexus_name = "t5vf",
.vf_ifnet_name = "cxlv"
}, {
.nexus_name = "t6nex",
.ifnet_name = "cc",
.vi_ifnet_name = "vcc",
.pf03_drv_name = "t6iov",
.vf_nexus_name = "t6vf",
.vf_ifnet_name = "ccv"
}
};
void
t4_init_devnames(struct adapter *sc)
{
int id;
id = chip_id(sc);
if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
sc->names = &devnames[id - CHELSIO_T4];
else {
device_printf(sc->dev, "chip id %d is not supported.\n", id);
sc->names = NULL;
}
}
static int
t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
{
const char *parent, *name;
long value;
int line, unit;
line = 0;
parent = device_get_nameunit(sc->dev);
name = sc->names->ifnet_name;
while (resource_find_dev(&line, name, &unit, "at", parent) == 0) {
if (resource_long_value(name, unit, "port", &value) == 0 &&
value == pi->port_id)
return (unit);
}
return (-1);
}
static void
t4_calibration(void *arg)
{
struct adapter *sc;
struct clock_sync *cur, *nex;
uint64_t hw;
sbintime_t sbt;
int next_up;
sc = (struct adapter *)arg;
KASSERT((hw_off_limits(sc) == 0), ("hw_off_limits at t4_calibration"));
hw = t4_read_reg64(sc, A_SGE_TIMESTAMP_LO);
sbt = sbinuptime();
cur = &sc->cal_info[sc->cal_current];
next_up = (sc->cal_current + 1) % CNT_CAL_INFO;
nex = &sc->cal_info[next_up];
if (__predict_false(sc->cal_count == 0)) {
/* First time in, just get the values in */
cur->hw_cur = hw;
cur->sbt_cur = sbt;
sc->cal_count++;
goto done;
}
if (cur->hw_cur == hw) {
/* The clock is not advancing? */
sc->cal_count = 0;
atomic_store_rel_int(&cur->gen, 0);
goto done;
}
seqc_write_begin(&nex->gen);
nex->hw_prev = cur->hw_cur;
nex->sbt_prev = cur->sbt_cur;
nex->hw_cur = hw;
nex->sbt_cur = sbt;
seqc_write_end(&nex->gen);
sc->cal_current = next_up;
done:
callout_reset_sbt_curcpu(&sc->cal_callout, SBT_1S, 0, t4_calibration,
sc, C_DIRECT_EXEC);
}
static void
t4_calibration_start(struct adapter *sc)
{
/*
* Here if we have not done a calibration
* then do so otherwise start the appropriate
* timer.
*/
int i;
for (i = 0; i < CNT_CAL_INFO; i++) {
sc->cal_info[i].gen = 0;
}
sc->cal_current = 0;
sc->cal_count = 0;
sc->cal_gen = 0;
t4_calibration(sc);
}
static int
t4_attach(device_t dev)
{
struct adapter *sc;
int rc = 0, i, j, rqidx, tqidx, nports;
struct make_dev_args mda;
struct intrs_and_queues iaq;
struct sge *s;
uint32_t *buf;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
int ofld_tqidx;
#endif
#ifdef TCP_OFFLOAD
int ofld_rqidx;
#endif
#ifdef DEV_NETMAP
int nm_rqidx, nm_tqidx;
#endif
int num_vis;
sc = device_get_softc(dev);
sc->dev = dev;
sysctl_ctx_init(&sc->ctx);
TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
if ((pci_get_device(dev) & 0xff00) == 0x5400)
t5_attribute_workaround(dev);
pci_enable_busmaster(dev);
if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
uint32_t v;
pci_set_max_read_req(dev, 4096);
v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
if (pcie_relaxed_ordering == 0 &&
(v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
} else if (pcie_relaxed_ordering == 1 &&
(v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
}
}
sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
sc->traceq = -1;
mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
device_get_nameunit(dev));
snprintf(sc->lockname, sizeof(sc->lockname), "%s",
device_get_nameunit(dev));
mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
t4_add_adapter(sc);
mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
TAILQ_INIT(&sc->sfl);
callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
sc->policy = NULL;
rw_init(&sc->policy_lock, "connection offload policy");
callout_init(&sc->ktls_tick, 1);
callout_init(&sc->cal_callout, 1);
refcount_init(&sc->vxlan_refcount, 0);
TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc);
TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc);
sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues");
sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue");
rc = t4_map_bars_0_and_4(sc);
if (rc != 0)
goto done; /* error message displayed already */
memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
/* Prepare the adapter for operation. */
buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
rc = -t4_prep_adapter(sc, buf);
free(buf, M_CXGBE);
if (rc != 0) {
device_printf(dev, "failed to prepare adapter: %d.\n", rc);
goto done;
}
/*
* This is the real PF# to which we're attaching. Works from within PCI
* passthrough environments too, where pci_get_function() could return a
* different PF# depending on the passthrough configuration. We need to
* use the real PF# in all our communication with the firmware.
*/
j = t4_read_reg(sc, A_PL_WHOAMI);
sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
sc->mbox = sc->pf;
t4_init_devnames(sc);
if (sc->names == NULL) {
rc = ENOTSUP;
goto done; /* error message displayed already */
}
/*
* Do this really early, with the memory windows set up even before the
* character device. The userland tool's register i/o and mem read
* will work even in "recovery mode".
*/
setup_memwin(sc);
if (t4_init_devlog_params(sc, 0) == 0)
fixup_devlog_params(sc);
make_dev_args_init(&mda);
mda.mda_devsw = &t4_cdevsw;
mda.mda_uid = UID_ROOT;
mda.mda_gid = GID_WHEEL;
mda.mda_mode = 0600;
mda.mda_si_drv1 = sc;
rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
if (rc != 0)
device_printf(dev, "failed to create nexus char device: %d.\n",
rc);
/* Go no further if recovery mode has been requested. */
if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
device_printf(dev, "recovery mode.\n");
goto done;
}
#if defined(__i386__)
if ((cpu_feature & CPUID_CX8) == 0) {
device_printf(dev, "64 bit atomics not available.\n");
rc = ENOTSUP;
goto done;
}
#endif
/* Contact the firmware and try to become the master driver. */
rc = contact_firmware(sc);
if (rc != 0)
goto done; /* error message displayed already */
MPASS(sc->flags & FW_OK);
rc = get_params__pre_init(sc);
if (rc != 0)
goto done; /* error message displayed already */
if (sc->flags & MASTER_PF) {
rc = partition_resources(sc);
if (rc != 0)
goto done; /* error message displayed already */
}
rc = get_params__post_init(sc);
if (rc != 0)
goto done; /* error message displayed already */
rc = set_params__post_init(sc);
if (rc != 0)
goto done; /* error message displayed already */
rc = t4_map_bar_2(sc);
if (rc != 0)
goto done; /* error message displayed already */
rc = t4_adj_doorbells(sc);
if (rc != 0)
goto done; /* error message displayed already */
rc = t4_create_dma_tag(sc);
if (rc != 0)
goto done; /* error message displayed already */
/*
* First pass over all the ports - allocate VIs and initialize some
* basic parameters like mac address, port type, etc.
*/
for_each_port(sc, i) {
struct port_info *pi;
pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
sc->port[i] = pi;
/* These must be set before t4_port_init */
pi->adapter = sc;
pi->port_id = i;
/*
* XXX: vi[0] is special so we can't delay this allocation until
* pi->nvi's final value is known.
*/
pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
M_ZERO | M_WAITOK);
/*
* Allocate the "main" VI and initialize parameters
* like mac addr.
*/
rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
if (rc != 0) {
device_printf(dev, "unable to initialize port %d: %d\n",
i, rc);
free(pi->vi, M_CXGBE);
free(pi, M_CXGBE);
sc->port[i] = NULL;
goto done;
}
if (is_bt(pi->port_type))
setbit(&sc->bt_map, pi->tx_chan);
else
MPASS(!isset(&sc->bt_map, pi->tx_chan));
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
device_get_nameunit(dev), i);
mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
sc->chan_map[pi->tx_chan] = i;
/*
* The MPS counter for FCS errors doesn't work correctly on the
* T6 so we use the MAC counter here. Which MAC is in use
* depends on the link settings which will be known when the
* link comes up.
*/
if (is_t6(sc))
pi->fcs_reg = -1;
else {
pi->fcs_reg = t4_port_reg(sc, pi->tx_chan,
A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
}
pi->fcs_base = 0;
/* All VIs on this port share this media. */
ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
cxgbe_media_status);
PORT_LOCK(pi);
init_link_config(pi);
fixup_link_config(pi);
build_medialist(pi);
if (fixed_ifmedia(pi))
pi->flags |= FIXED_IFMEDIA;
PORT_UNLOCK(pi);
pi->dev = device_add_child(dev, sc->names->ifnet_name,
t4_ifnet_unit(sc, pi));
if (pi->dev == NULL) {
device_printf(dev,
"failed to add device for port %d.\n", i);
rc = ENXIO;
goto done;
}
pi->vi[0].dev = pi->dev;
device_set_softc(pi->dev, pi);
}
/*
* Interrupt type, # of interrupts, # of rx/tx queues, etc.
*/
nports = sc->params.nports;
rc = cfg_itype_and_nqueues(sc, &iaq);
if (rc != 0)
goto done; /* error message displayed already */
num_vis = iaq.num_vis;
sc->intr_type = iaq.intr_type;
sc->intr_count = iaq.nirq;
s = &sc->sge;
s->nrxq = nports * iaq.nrxq;
s->ntxq = nports * iaq.ntxq;
if (num_vis > 1) {
s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
}
s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
s->neq += nports; /* ctrl queues: 1 per port */
s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
if (is_offload(sc) || is_ethoffload(sc)) {
s->nofldtxq = nports * iaq.nofldtxq;
if (num_vis > 1)
s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
s->neq += s->nofldtxq;
s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq),
M_CXGBE, M_ZERO | M_WAITOK);
}
#endif
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
s->nofldrxq = nports * iaq.nofldrxq;
if (num_vis > 1)
s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
s->neq += s->nofldrxq; /* free list */
s->niq += s->nofldrxq;
s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
M_CXGBE, M_ZERO | M_WAITOK);
}
#endif
#ifdef DEV_NETMAP
s->nnmrxq = 0;
s->nnmtxq = 0;
if (t4_native_netmap & NN_MAIN_VI) {
s->nnmrxq += nports * iaq.nnmrxq;
s->nnmtxq += nports * iaq.nnmtxq;
}
if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) {
s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi;
s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi;
}
s->neq += s->nnmtxq + s->nnmrxq;
s->niq += s->nnmrxq;
s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
M_CXGBE, M_ZERO | M_WAITOK);
s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
M_CXGBE, M_ZERO | M_WAITOK);
#endif
MPASS(s->niq <= s->iqmap_sz);
MPASS(s->neq <= s->eqmap_sz);
s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
M_ZERO | M_WAITOK);
s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
M_ZERO | M_WAITOK);
s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
M_ZERO | M_WAITOK);
s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE,
M_ZERO | M_WAITOK);
s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE,
M_ZERO | M_WAITOK);
sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
M_ZERO | M_WAITOK);
t4_init_l2t(sc, M_WAITOK);
t4_init_smt(sc, M_WAITOK);
t4_init_tx_sched(sc);
t4_init_atid_table(sc);
#ifdef RATELIMIT
t4_init_etid_table(sc);
#endif
#ifdef INET6
t4_init_clip_table(sc);
#endif
if (sc->vres.key.size != 0)
sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
/*
* Second pass over the ports. This time we know the number of rx and
* tx queues that each port should get.
*/
rqidx = tqidx = 0;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
ofld_tqidx = 0;
#endif
#ifdef TCP_OFFLOAD
ofld_rqidx = 0;
#endif
#ifdef DEV_NETMAP
nm_rqidx = nm_tqidx = 0;
#endif
for_each_port(sc, i) {
struct port_info *pi = sc->port[i];
struct vi_info *vi;
if (pi == NULL)
continue;
pi->nvi = num_vis;
for_each_vi(pi, j, vi) {
vi->pi = pi;
vi->adapter = sc;
vi->first_intr = -1;
vi->qsize_rxq = t4_qsize_rxq;
vi->qsize_txq = t4_qsize_txq;
vi->first_rxq = rqidx;
vi->first_txq = tqidx;
vi->tmr_idx = t4_tmr_idx;
vi->pktc_idx = t4_pktc_idx;
vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
rqidx += vi->nrxq;
tqidx += vi->ntxq;
if (j == 0 && vi->ntxq > 1)
vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
else
vi->rsrv_noflowq = 0;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
vi->first_ofld_txq = ofld_tqidx;
vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
ofld_tqidx += vi->nofldtxq;
#endif
#ifdef TCP_OFFLOAD
vi->ofld_tmr_idx = t4_tmr_idx_ofld;
vi->ofld_pktc_idx = t4_pktc_idx_ofld;
vi->first_ofld_rxq = ofld_rqidx;
vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
ofld_rqidx += vi->nofldrxq;
#endif
#ifdef DEV_NETMAP
vi->first_nm_rxq = nm_rqidx;
vi->first_nm_txq = nm_tqidx;
if (j == 0) {
vi->nnmrxq = iaq.nnmrxq;
vi->nnmtxq = iaq.nnmtxq;
} else {
vi->nnmrxq = iaq.nnmrxq_vi;
vi->nnmtxq = iaq.nnmtxq_vi;
}
nm_rqidx += vi->nnmrxq;
nm_tqidx += vi->nnmtxq;
#endif
}
}
rc = t4_setup_intr_handlers(sc);
if (rc != 0) {
device_printf(dev,
"failed to setup interrupt handlers: %d\n", rc);
goto done;
}
rc = bus_generic_probe(dev);
if (rc != 0) {
device_printf(dev, "failed to probe child drivers: %d\n", rc);
goto done;
}
/*
* Ensure thread-safe mailbox access (in debug builds).
*
* So far this was the only thread accessing the mailbox but various
* ifnets and sysctls are about to be created and their handlers/ioctls
* will access the mailbox from different threads.
*/
sc->flags |= CHK_MBOX_ACCESS;
rc = bus_generic_attach(dev);
if (rc != 0) {
device_printf(dev,
"failed to attach all child ports: %d\n", rc);
goto done;
}
t4_calibration_start(sc);
device_printf(dev,
"PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
(sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
t4_set_desc(sc);
notify_siblings(dev, 0);
done:
if (rc != 0 && sc->cdev) {
/* cdev was created and so cxgbetool works; recover that way. */
device_printf(dev,
"error during attach, adapter is now in recovery mode.\n");
rc = 0;
}
if (rc != 0)
t4_detach_common(dev);
else
t4_sysctls(sc);
return (rc);
}
static int
t4_child_location(device_t bus, device_t dev, struct sbuf *sb)
{
struct adapter *sc;
struct port_info *pi;
int i;
sc = device_get_softc(bus);
for_each_port(sc, i) {
pi = sc->port[i];
if (pi != NULL && pi->dev == dev) {
sbuf_printf(sb, "port=%d", pi->port_id);
break;
}
}
return (0);
}
static int
t4_ready(device_t dev)
{
struct adapter *sc;
sc = device_get_softc(dev);
if (sc->flags & FW_OK)
return (0);
return (ENXIO);
}
static int
t4_read_port_device(device_t dev, int port, device_t *child)
{
struct adapter *sc;
struct port_info *pi;
sc = device_get_softc(dev);
if (port < 0 || port >= MAX_NPORTS)
return (EINVAL);
pi = sc->port[port];
if (pi == NULL || pi->dev == NULL)
return (ENXIO);
*child = pi->dev;
return (0);
}
static int
notify_siblings(device_t dev, int detaching)
{
device_t sibling;
int error, i;
error = 0;
for (i = 0; i < PCI_FUNCMAX; i++) {
if (i == pci_get_function(dev))
continue;
sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
pci_get_slot(dev), i);
if (sibling == NULL || !device_is_attached(sibling))
continue;
if (detaching)
error = T4_DETACH_CHILD(sibling);
else
(void)T4_ATTACH_CHILD(sibling);
if (error)
break;
}
return (error);
}
/*
* Idempotent
*/
static int
t4_detach(device_t dev)
{
int rc;
rc = notify_siblings(dev, 1);
if (rc) {
device_printf(dev,
"failed to detach sibling devices: %d\n", rc);
return (rc);
}
return (t4_detach_common(dev));
}
int
t4_detach_common(device_t dev)
{
struct adapter *sc;
struct port_info *pi;
int i, rc;
sc = device_get_softc(dev);
#ifdef TCP_OFFLOAD
rc = t4_deactivate_all_uld(sc);
if (rc) {
device_printf(dev,
"failed to detach upper layer drivers: %d\n", rc);
return (rc);
}
#endif
if (sc->cdev) {
destroy_dev(sc->cdev);
sc->cdev = NULL;
}
sx_xlock(&t4_list_lock);
SLIST_REMOVE(&t4_list, sc, adapter, link);
sx_xunlock(&t4_list_lock);
sc->flags &= ~CHK_MBOX_ACCESS;
if (sc->flags & FULL_INIT_DONE) {
if (!(sc->flags & IS_VF))
t4_intr_disable(sc);
}
if (device_is_attached(dev)) {
rc = bus_generic_detach(dev);
if (rc) {
device_printf(dev,
"failed to detach child devices: %d\n", rc);
return (rc);
}
}
for (i = 0; i < sc->intr_count; i++)
t4_free_irq(sc, &sc->irq[i]);
if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
t4_free_tx_sched(sc);
for (i = 0; i < MAX_NPORTS; i++) {
pi = sc->port[i];
if (pi) {
t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
if (pi->dev)
device_delete_child(dev, pi->dev);
mtx_destroy(&pi->pi_lock);
free(pi->vi, M_CXGBE);
free(pi, M_CXGBE);
}
}
callout_stop(&sc->cal_callout);
callout_drain(&sc->cal_callout);
device_delete_children(dev);
sysctl_ctx_free(&sc->ctx);
adapter_full_uninit(sc);
if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
t4_fw_bye(sc, sc->mbox);
if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
pci_release_msi(dev);
if (sc->regs_res)
bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
sc->regs_res);
if (sc->udbs_res)
bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
sc->udbs_res);
if (sc->msix_res)
bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
sc->msix_res);
if (sc->l2t)
t4_free_l2t(sc->l2t);
if (sc->smt)
t4_free_smt(sc->smt);
t4_free_atid_table(sc);
#ifdef RATELIMIT
t4_free_etid_table(sc);
#endif
if (sc->key_map)
vmem_destroy(sc->key_map);
#ifdef INET6
t4_destroy_clip_table(sc);
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
free(sc->sge.ofld_txq, M_CXGBE);
#endif
#ifdef TCP_OFFLOAD
free(sc->sge.ofld_rxq, M_CXGBE);
#endif
#ifdef DEV_NETMAP
free(sc->sge.nm_rxq, M_CXGBE);
free(sc->sge.nm_txq, M_CXGBE);
#endif
free(sc->irq, M_CXGBE);
free(sc->sge.rxq, M_CXGBE);
free(sc->sge.txq, M_CXGBE);
free(sc->sge.ctrlq, M_CXGBE);
free(sc->sge.iqmap, M_CXGBE);
free(sc->sge.eqmap, M_CXGBE);
free(sc->tids.ftid_tab, M_CXGBE);
free(sc->tids.hpftid_tab, M_CXGBE);
free_hftid_hash(&sc->tids);
free(sc->tids.tid_tab, M_CXGBE);
t4_destroy_dma_tag(sc);
callout_drain(&sc->ktls_tick);
callout_drain(&sc->sfl_callout);
if (mtx_initialized(&sc->tids.ftid_lock)) {
mtx_destroy(&sc->tids.ftid_lock);
cv_destroy(&sc->tids.ftid_cv);
}
if (mtx_initialized(&sc->tids.atid_lock))
mtx_destroy(&sc->tids.atid_lock);
if (mtx_initialized(&sc->ifp_lock))
mtx_destroy(&sc->ifp_lock);
if (rw_initialized(&sc->policy_lock)) {
rw_destroy(&sc->policy_lock);
#ifdef TCP_OFFLOAD
if (sc->policy != NULL)
free_offload_policy(sc->policy);
#endif
}
for (i = 0; i < NUM_MEMWIN; i++) {
struct memwin *mw = &sc->memwin[i];
if (rw_initialized(&mw->mw_lock))
rw_destroy(&mw->mw_lock);
}
mtx_destroy(&sc->sfl_lock);
mtx_destroy(&sc->reg_lock);
mtx_destroy(&sc->sc_lock);
bzero(sc, sizeof(*sc));
return (0);
}
static inline bool
ok_to_reset(struct adapter *sc)
{
struct tid_info *t = &sc->tids;
struct port_info *pi;
struct vi_info *vi;
int i, j;
int caps = IFCAP_TOE | IFCAP_NETMAP | IFCAP_TXRTLMT;
if (is_t6(sc))
caps |= IFCAP_TXTLS;
ASSERT_SYNCHRONIZED_OP(sc);
MPASS(!(sc->flags & IS_VF));
for_each_port(sc, i) {
pi = sc->port[i];
for_each_vi(pi, j, vi) {
if (if_getcapenable(vi->ifp) & caps)
return (false);
}
}
if (atomic_load_int(&t->tids_in_use) > 0)
return (false);
if (atomic_load_int(&t->stids_in_use) > 0)
return (false);
if (atomic_load_int(&t->atids_in_use) > 0)
return (false);
if (atomic_load_int(&t->ftids_in_use) > 0)
return (false);
if (atomic_load_int(&t->hpftids_in_use) > 0)
return (false);
if (atomic_load_int(&t->etids_in_use) > 0)
return (false);
return (true);
}
static inline int
stop_adapter(struct adapter *sc)
{
if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED)))
return (1); /* Already stopped. */
return (t4_shutdown_adapter(sc));
}
static int
t4_suspend(device_t dev)
{
struct adapter *sc = device_get_softc(dev);
struct port_info *pi;
struct vi_info *vi;
if_t ifp;
struct sge_rxq *rxq;
struct sge_txq *txq;
struct sge_wrq *wrq;
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
struct sge_ofld_txq *ofld_txq;
#endif
int rc, i, j, k;
CH_ALERT(sc, "suspend requested\n");
rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4sus");
if (rc != 0)
return (ENXIO);
/* XXX: Can the kernel call suspend repeatedly without resume? */
MPASS(!hw_off_limits(sc));
if (!ok_to_reset(sc)) {
/* XXX: should list what resource is preventing suspend. */
CH_ERR(sc, "not safe to suspend.\n");
rc = EBUSY;
goto done;
}
/* No more DMA or interrupts. */
stop_adapter(sc);
/* Quiesce all activity. */
for_each_port(sc, i) {
pi = sc->port[i];
pi->vxlan_tcam_entry = false;
PORT_LOCK(pi);
if (pi->up_vis > 0) {
/*
* t4_shutdown_adapter has already shut down all the
* PHYs but it also disables interrupts and DMA so there
* won't be a link interrupt. So we update the state
* manually and inform the kernel.
*/
pi->link_cfg.link_ok = false;
t4_os_link_changed(pi);
}
PORT_UNLOCK(pi);
for_each_vi(pi, j, vi) {
vi->xact_addr_filt = -1;
mtx_lock(&vi->tick_mtx);
vi->flags |= VI_SKIP_STATS;
mtx_unlock(&vi->tick_mtx);
if (!(vi->flags & VI_INIT_DONE))
continue;
ifp = vi->ifp;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
mtx_lock(&vi->tick_mtx);
callout_stop(&vi->tick);
mtx_unlock(&vi->tick_mtx);
callout_drain(&vi->tick);
}
/*
* Note that the HW is not available.
*/
for_each_txq(vi, k, txq) {
TXQ_LOCK(txq);
txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED);
TXQ_UNLOCK(txq);
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
for_each_ofld_txq(vi, k, ofld_txq) {
ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED;
}
#endif
for_each_rxq(vi, k, rxq) {
rxq->iq.flags &= ~IQ_HW_ALLOCATED;
}
#if defined(TCP_OFFLOAD)
for_each_ofld_rxq(vi, k, ofld_rxq) {
ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED;
}
#endif
quiesce_vi(vi);
}
if (sc->flags & FULL_INIT_DONE) {
/* Control queue */
wrq = &sc->sge.ctrlq[i];
wrq->eq.flags &= ~EQ_HW_ALLOCATED;
quiesce_wrq(wrq);
}
}
if (sc->flags & FULL_INIT_DONE) {
/* Firmware event queue */
sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED;
quiesce_iq_fl(sc, &sc->sge.fwq, NULL);
}
/* Stop calibration */
callout_stop(&sc->cal_callout);
callout_drain(&sc->cal_callout);
/* Mark the adapter totally off limits. */
mtx_lock(&sc->reg_lock);
atomic_set_int(&sc->error_flags, HW_OFF_LIMITS);
sc->flags &= ~(FW_OK | MASTER_PF);
sc->reset_thread = NULL;
mtx_unlock(&sc->reg_lock);
if (t4_clock_gate_on_suspend) {
t4_set_reg_field(sc, A_PMU_PART_CG_PWRMODE, F_MA_PART_CGEN |
F_LE_PART_CGEN | F_EDC1_PART_CGEN | F_EDC0_PART_CGEN |
F_TP_PART_CGEN | F_PDP_PART_CGEN | F_SGE_PART_CGEN, 0);
}
CH_ALERT(sc, "suspend completed.\n");
done:
end_synchronized_op(sc, 0);
return (rc);
}
struct adapter_pre_reset_state {
u_int flags;
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
uint16_t cryptocaps;
uint16_t iscsicaps;
uint16_t fcoecaps;
u_int cfcsum;
char cfg_file[32];
struct adapter_params params;
struct t4_virt_res vres;
struct tid_info tids;
struct sge sge;
int rawf_base;
int nrawf;
};
static void
save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
{
ASSERT_SYNCHRONIZED_OP(sc);
o->flags = sc->flags;
o->nbmcaps = sc->nbmcaps;
o->linkcaps = sc->linkcaps;
o->switchcaps = sc->switchcaps;
o->niccaps = sc->niccaps;
o->toecaps = sc->toecaps;
o->rdmacaps = sc->rdmacaps;
o->cryptocaps = sc->cryptocaps;
o->iscsicaps = sc->iscsicaps;
o->fcoecaps = sc->fcoecaps;
o->cfcsum = sc->cfcsum;
MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file));
memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file));
o->params = sc->params;
o->vres = sc->vres;
o->tids = sc->tids;
o->sge = sc->sge;
o->rawf_base = sc->rawf_base;
o->nrawf = sc->nrawf;
}
static int
compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
{
int rc = 0;
ASSERT_SYNCHRONIZED_OP(sc);
/* Capabilities */
#define COMPARE_CAPS(c) do { \
if (o->c##caps != sc->c##caps) { \
CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \
sc->c##caps); \
rc = EINVAL; \
} \
} while (0)
COMPARE_CAPS(nbm);
COMPARE_CAPS(link);
COMPARE_CAPS(switch);
COMPARE_CAPS(nic);
COMPARE_CAPS(toe);
COMPARE_CAPS(rdma);
COMPARE_CAPS(crypto);
COMPARE_CAPS(iscsi);
COMPARE_CAPS(fcoe);
#undef COMPARE_CAPS
/* Firmware config file */
if (o->cfcsum != sc->cfcsum) {
CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file,
o->cfcsum, sc->cfg_file, sc->cfcsum);
rc = EINVAL;
}
#define COMPARE_PARAM(p, name) do { \
if (o->p != sc->p) { \
CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \
rc = EINVAL; \
} \
} while (0)
COMPARE_PARAM(sge.iq_start, iq_start);
COMPARE_PARAM(sge.eq_start, eq_start);
COMPARE_PARAM(tids.ftid_base, ftid_base);
COMPARE_PARAM(tids.ftid_end, ftid_end);
COMPARE_PARAM(tids.nftids, nftids);
COMPARE_PARAM(vres.l2t.start, l2t_start);
COMPARE_PARAM(vres.l2t.size, l2t_size);
COMPARE_PARAM(sge.iqmap_sz, iqmap_sz);
COMPARE_PARAM(sge.eqmap_sz, eqmap_sz);
COMPARE_PARAM(tids.tid_base, tid_base);
COMPARE_PARAM(tids.hpftid_base, hpftid_base);
COMPARE_PARAM(tids.hpftid_end, hpftid_end);
COMPARE_PARAM(tids.nhpftids, nhpftids);
COMPARE_PARAM(rawf_base, rawf_base);
COMPARE_PARAM(nrawf, nrawf);
COMPARE_PARAM(params.mps_bg_map, mps_bg_map);
COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support);
COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl);
COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support);
COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr);
COMPARE_PARAM(tids.ntids, ntids);
COMPARE_PARAM(tids.etid_base, etid_base);
COMPARE_PARAM(tids.etid_end, etid_end);
COMPARE_PARAM(tids.netids, netids);
COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred);
COMPARE_PARAM(params.ethoffload, ethoffload);
COMPARE_PARAM(tids.natids, natids);
COMPARE_PARAM(tids.stid_base, stid_base);
COMPARE_PARAM(vres.ddp.start, ddp_start);
COMPARE_PARAM(vres.ddp.size, ddp_size);
COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred);
COMPARE_PARAM(vres.stag.start, stag_start);
COMPARE_PARAM(vres.stag.size, stag_size);
COMPARE_PARAM(vres.rq.start, rq_start);
COMPARE_PARAM(vres.rq.size, rq_size);
COMPARE_PARAM(vres.pbl.start, pbl_start);
COMPARE_PARAM(vres.pbl.size, pbl_size);
COMPARE_PARAM(vres.qp.start, qp_start);
COMPARE_PARAM(vres.qp.size, qp_size);
COMPARE_PARAM(vres.cq.start, cq_start);
COMPARE_PARAM(vres.cq.size, cq_size);
COMPARE_PARAM(vres.ocq.start, ocq_start);
COMPARE_PARAM(vres.ocq.size, ocq_size);
COMPARE_PARAM(vres.srq.start, srq_start);
COMPARE_PARAM(vres.srq.size, srq_size);
COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp);
COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter);
COMPARE_PARAM(vres.iscsi.start, iscsi_start);
COMPARE_PARAM(vres.iscsi.size, iscsi_size);
COMPARE_PARAM(vres.key.start, key_start);
COMPARE_PARAM(vres.key.size, key_size);
#undef COMPARE_PARAM
return (rc);
}
static int
t4_resume(device_t dev)
{
struct adapter *sc = device_get_softc(dev);
struct adapter_pre_reset_state *old_state = NULL;
struct port_info *pi;
struct vi_info *vi;
if_t ifp;
struct sge_txq *txq;
int rc, i, j, k;
CH_ALERT(sc, "resume requested.\n");
rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4res");
if (rc != 0)
return (ENXIO);
MPASS(hw_off_limits(sc));
MPASS((sc->flags & FW_OK) == 0);
MPASS((sc->flags & MASTER_PF) == 0);
MPASS(sc->reset_thread == NULL);
sc->reset_thread = curthread;
/* Register access is expected to work by the time we're here. */
if (t4_read_reg(sc, A_PL_WHOAMI) == 0xffffffff) {
CH_ERR(sc, "%s: can't read device registers\n", __func__);
rc = ENXIO;
goto done;
}
/* Note that HW_OFF_LIMITS is cleared a bit later. */
atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR | ADAP_STOPPED);
/* Restore memory window. */
setup_memwin(sc);
/* Go no further if recovery mode has been requested. */
if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
CH_ALERT(sc, "recovery mode on resume.\n");
rc = 0;
mtx_lock(&sc->reg_lock);
atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
mtx_unlock(&sc->reg_lock);
goto done;
}
old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK);
save_caps_and_params(sc, old_state);
/* Reestablish contact with firmware and become the primary PF. */
rc = contact_firmware(sc);
if (rc != 0)
goto done; /* error message displayed already */
MPASS(sc->flags & FW_OK);
if (sc->flags & MASTER_PF) {
rc = partition_resources(sc);
if (rc != 0)
goto done; /* error message displayed already */
}
rc = get_params__post_init(sc);
if (rc != 0)
goto done; /* error message displayed already */
rc = set_params__post_init(sc);
if (rc != 0)
goto done; /* error message displayed already */
rc = compare_caps_and_params(sc, old_state);
if (rc != 0)
goto done; /* error message displayed already */
for_each_port(sc, i) {
pi = sc->port[i];
MPASS(pi != NULL);
MPASS(pi->vi != NULL);
MPASS(pi->vi[0].dev == pi->dev);
rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
if (rc != 0) {
CH_ERR(sc,
"failed to re-initialize port %d: %d\n", i, rc);
goto done;
}
MPASS(sc->chan_map[pi->tx_chan] == i);
PORT_LOCK(pi);
fixup_link_config(pi);
build_medialist(pi);
PORT_UNLOCK(pi);
for_each_vi(pi, j, vi) {
if (IS_MAIN_VI(vi))
continue;
rc = alloc_extra_vi(sc, pi, vi);
if (rc != 0) {
CH_ERR(vi,
"failed to re-allocate extra VI: %d\n", rc);
goto done;
}
}
}
/*
* Interrupts and queues are about to be enabled and other threads will
* want to access the hardware too. It is safe to do so. Note that
* this thread is still in the middle of a synchronized_op.
*/
mtx_lock(&sc->reg_lock);
atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
mtx_unlock(&sc->reg_lock);
if (sc->flags & FULL_INIT_DONE) {
rc = adapter_full_init(sc);
if (rc != 0) {
CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc);
goto done;
}
if (sc->vxlan_refcount > 0)
enable_vxlan_rx(sc);
for_each_port(sc, i) {
pi = sc->port[i];
for_each_vi(pi, j, vi) {
mtx_lock(&vi->tick_mtx);
vi->flags &= ~VI_SKIP_STATS;
mtx_unlock(&vi->tick_mtx);
if (!(vi->flags & VI_INIT_DONE))
continue;
rc = vi_full_init(vi);
if (rc != 0) {
CH_ERR(vi, "failed to re-initialize "
"interface: %d\n", rc);
goto done;
}
ifp = vi->ifp;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
continue;
/*
* Note that we do not setup multicast addresses
* in the first pass. This ensures that the
* unicast DMACs for all VIs on all ports get an
* MPS TCAM entry.
*/
rc = update_mac_settings(ifp, XGMAC_ALL &
~XGMAC_MCADDRS);
if (rc != 0) {
CH_ERR(vi, "failed to re-configure MAC: %d\n", rc);
goto done;
}
rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true,
true);
if (rc != 0) {
CH_ERR(vi, "failed to re-enable VI: %d\n", rc);
goto done;
}
for_each_txq(vi, k, txq) {
TXQ_LOCK(txq);
txq->eq.flags |= EQ_ENABLED;
TXQ_UNLOCK(txq);
}
mtx_lock(&vi->tick_mtx);
callout_schedule(&vi->tick, hz);
mtx_unlock(&vi->tick_mtx);
}
PORT_LOCK(pi);
if (pi->up_vis > 0) {
t4_update_port_info(pi);
fixup_link_config(pi);
build_medialist(pi);
apply_link_config(pi);
if (pi->link_cfg.link_ok)
t4_os_link_changed(pi);
}
PORT_UNLOCK(pi);
}
/* Now reprogram the L2 multicast addresses. */
for_each_port(sc, i) {
pi = sc->port[i];
for_each_vi(pi, j, vi) {
if (!(vi->flags & VI_INIT_DONE))
continue;
ifp = vi->ifp;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
continue;
rc = update_mac_settings(ifp, XGMAC_MCADDRS);
if (rc != 0) {
CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc);
rc = 0; /* carry on */
}
}
}
}
/* Reset all calibration */
t4_calibration_start(sc);
done:
if (rc == 0) {
sc->incarnation++;
CH_ALERT(sc, "resume completed.\n");
}
end_synchronized_op(sc, 0);
free(old_state, M_CXGBE);
return (rc);
}
static int
t4_reset_prepare(device_t dev, device_t child)
{
struct adapter *sc = device_get_softc(dev);
CH_ALERT(sc, "reset_prepare.\n");
return (0);
}
static int
t4_reset_post(device_t dev, device_t child)
{
struct adapter *sc = device_get_softc(dev);
CH_ALERT(sc, "reset_post.\n");
return (0);
}
static int
reset_adapter(struct adapter *sc)
{
int rc, oldinc, error_flags;
CH_ALERT(sc, "reset requested.\n");
rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst1");
if (rc != 0)
return (EBUSY);
if (hw_off_limits(sc)) {
CH_ERR(sc, "adapter is suspended, use resume (not reset).\n");
rc = ENXIO;
goto done;
}
if (!ok_to_reset(sc)) {
/* XXX: should list what resource is preventing reset. */
CH_ERR(sc, "not safe to reset.\n");
rc = EBUSY;
goto done;
}
done:
oldinc = sc->incarnation;
end_synchronized_op(sc, 0);
if (rc != 0)
return (rc); /* Error logged already. */
atomic_add_int(&sc->num_resets, 1);
mtx_lock(&Giant);
rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0);
mtx_unlock(&Giant);
if (rc != 0)
CH_ERR(sc, "bus_reset_child failed: %d.\n", rc);
else {
rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst2");
if (rc != 0)
return (EBUSY);
error_flags = atomic_load_int(&sc->error_flags);
if (sc->incarnation > oldinc && error_flags == 0) {
CH_ALERT(sc, "bus_reset_child succeeded.\n");
} else {
CH_ERR(sc, "adapter did not reset properly, flags "
"0x%08x, error_flags 0x%08x.\n", sc->flags,
error_flags);
rc = ENXIO;
}
end_synchronized_op(sc, 0);
}
return (rc);
}
static void
reset_adapter_task(void *arg, int pending)
{
/* XXX: t4_async_event here? */
reset_adapter(arg);
}
static int
cxgbe_probe(device_t dev)
{
struct port_info *pi = device_get_softc(dev);
device_set_descf(dev, "port %d", pi->port_id);
return (BUS_PROBE_DEFAULT);
}
#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
IFCAP_HWRXTSTMP | IFCAP_MEXTPG)
#define T4_CAP_ENABLE (T4_CAP)
-static int
+static void
cxgbe_vi_attach(device_t dev, struct vi_info *vi)
{
if_t ifp;
struct sbuf *sb;
struct sysctl_ctx_list *ctx = &vi->ctx;
struct sysctl_oid_list *children;
struct pfil_head_args pa;
struct adapter *sc = vi->adapter;
sysctl_ctx_init(ctx);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev));
vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC rx queues");
vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC tx queues");
#ifdef DEV_NETMAP
vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap rx queues");
vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queues");
#endif
#ifdef TCP_OFFLOAD
vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE rx queues");
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE/ETHOFLD tx queues");
#endif
vi->xact_addr_filt = -1;
mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF);
callout_init_mtx(&vi->tick, &vi->tick_mtx, 0);
if (sc->flags & IS_VF || t4_tx_vm_wr != 0)
vi->flags |= TX_USES_VM_WR;
/* Allocate an ifnet and set it up */
ifp = if_alloc_dev(IFT_ETHER, dev);
- if (ifp == NULL) {
- device_printf(dev, "Cannot allocate ifnet\n");
- return (ENOMEM);
- }
vi->ifp = ifp;
if_setsoftc(ifp, vi);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, cxgbe_init);
if_setioctlfn(ifp, cxgbe_ioctl);
if_settransmitfn(ifp, cxgbe_transmit);
if_setqflushfn(ifp, cxgbe_qflush);
if (vi->pi->nvi > 1 || sc->flags & IS_VF)
if_setgetcounterfn(ifp, vi_get_counter);
else
if_setgetcounterfn(ifp, cxgbe_get_counter);
#if defined(KERN_TLS) || defined(RATELIMIT)
if_setsndtagallocfn(ifp, cxgbe_snd_tag_alloc);
#endif
#ifdef RATELIMIT
if_setratelimitqueryfn(ifp, cxgbe_ratelimit_query);
#endif
if_setcapabilities(ifp, T4_CAP);
if_setcapenable(ifp, T4_CAP_ENABLE);
if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if (chip_id(sc) >= CHELSIO_T6) {
if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
if_setcapenablebit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
if_sethwassistbits(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP |
CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN, 0);
}
#ifdef TCP_OFFLOAD
if (vi->nofldrxq != 0)
if_setcapabilitiesbit(ifp, IFCAP_TOE, 0);
#endif
#ifdef RATELIMIT
if (is_ethoffload(sc) && vi->nofldtxq != 0) {
if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT, 0);
if_setcapenablebit(ifp, IFCAP_TXRTLMT, 0);
}
#endif
if_sethwtsomax(ifp, IP_MAXPACKET);
if (vi->flags & TX_USES_VM_WR)
if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_VM_TSO);
else
if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_TSO);
#ifdef RATELIMIT
if (is_ethoffload(sc) && vi->nofldtxq != 0)
if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_EO_TSO);
#endif
if_sethwtsomaxsegsize(ifp, 65536);
#ifdef KERN_TLS
if (is_ktls(sc)) {
if_setcapabilitiesbit(ifp, IFCAP_TXTLS, 0);
if (sc->flags & KERN_TLS_ON || !is_t6(sc))
if_setcapenablebit(ifp, IFCAP_TXTLS, 0);
}
#endif
ether_ifattach(ifp, vi->hw_addr);
#ifdef DEV_NETMAP
if (vi->nnmrxq != 0)
cxgbe_nm_attach(vi);
#endif
sb = sbuf_new_auto();
sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
switch (if_getcapabilities(ifp) & (IFCAP_TOE | IFCAP_TXRTLMT)) {
case IFCAP_TOE:
sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
break;
case IFCAP_TOE | IFCAP_TXRTLMT:
sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
break;
case IFCAP_TXRTLMT:
sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
break;
}
#endif
#ifdef TCP_OFFLOAD
if (if_getcapabilities(ifp) & IFCAP_TOE)
sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
#endif
#ifdef DEV_NETMAP
if (if_getcapabilities(ifp) & IFCAP_NETMAP)
sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
vi->nnmtxq, vi->nnmrxq);
#endif
sbuf_finish(sb);
device_printf(dev, "%s\n", sbuf_data(sb));
sbuf_delete(sb);
vi_sysctls(vi);
pa.pa_version = PFIL_VERSION;
pa.pa_flags = PFIL_IN;
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = if_name(ifp);
vi->pfil = pfil_head_register(&pa);
-
- return (0);
}
static int
cxgbe_attach(device_t dev)
{
struct port_info *pi = device_get_softc(dev);
struct adapter *sc = pi->adapter;
struct vi_info *vi;
- int i, rc;
+ int i;
sysctl_ctx_init(&pi->ctx);
- rc = cxgbe_vi_attach(dev, &pi->vi[0]);
- if (rc)
- return (rc);
+ cxgbe_vi_attach(dev, &pi->vi[0]);
for_each_vi(pi, i, vi) {
if (i == 0)
continue;
vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
if (vi->dev == NULL) {
device_printf(dev, "failed to add VI %d\n", i);
continue;
}
device_set_softc(vi->dev, vi);
}
cxgbe_sysctls(pi);
bus_generic_attach(dev);
return (0);
}
static void
cxgbe_vi_detach(struct vi_info *vi)
{
if_t ifp = vi->ifp;
if (vi->pfil != NULL) {
pfil_head_unregister(vi->pfil);
vi->pfil = NULL;
}
ether_ifdetach(ifp);
/* Let detach proceed even if these fail. */
#ifdef DEV_NETMAP
if (if_getcapabilities(ifp) & IFCAP_NETMAP)
cxgbe_nm_detach(vi);
#endif
cxgbe_uninit_synchronized(vi);
callout_drain(&vi->tick);
mtx_destroy(&vi->tick_mtx);
sysctl_ctx_free(&vi->ctx);
vi_full_uninit(vi);
if_free(vi->ifp);
vi->ifp = NULL;
}
static int
cxgbe_detach(device_t dev)
{
struct port_info *pi = device_get_softc(dev);
struct adapter *sc = pi->adapter;
int rc;
/* Detach the extra VIs first. */
rc = bus_generic_detach(dev);
if (rc)
return (rc);
device_delete_children(dev);
sysctl_ctx_free(&pi->ctx);
begin_vi_detach(sc, &pi->vi[0]);
if (pi->flags & HAS_TRACEQ) {
sc->traceq = -1; /* cloner should not create ifnet */
t4_tracer_port_detach(sc);
}
cxgbe_vi_detach(&pi->vi[0]);
ifmedia_removeall(&pi->media);
end_vi_detach(sc, &pi->vi[0]);
return (0);
}
static void
cxgbe_init(void *arg)
{
struct vi_info *vi = arg;
struct adapter *sc = vi->adapter;
if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
return;
cxgbe_init_synchronized(vi);
end_synchronized_op(sc, 0);
}
static int
cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
{
int rc = 0, mtu, flags;
struct vi_info *vi = if_getsoftc(ifp);
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
struct ifreq *ifr = (struct ifreq *)data;
uint32_t mask;
switch (cmd) {
case SIOCSIFMTU:
mtu = ifr->ifr_mtu;
if (mtu < ETHERMIN || mtu > MAX_MTU)
return (EINVAL);
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
if (rc)
return (rc);
if_setmtu(ifp, mtu);
if (vi->flags & VI_INIT_DONE) {
t4_update_fl_bufsize(ifp);
if (!hw_off_limits(sc) &&
if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MTU);
}
end_synchronized_op(sc, 0);
break;
case SIOCSIFFLAGS:
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto fail;
}
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
flags = vi->if_flags;
if ((if_getflags(ifp) ^ flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
rc = update_mac_settings(ifp,
XGMAC_PROMISC | XGMAC_ALLMULTI);
}
} else {
rc = cxgbe_init_synchronized(vi);
}
vi->if_flags = if_getflags(ifp);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
rc = cxgbe_uninit_synchronized(vi);
}
end_synchronized_op(sc, 0);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
if (rc)
return (rc);
if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MCADDRS);
end_synchronized_op(sc, 0);
break;
case SIOCSIFCAP:
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
if (rc)
return (rc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP);
if (IFCAP_TSO4 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO4;
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_printf(ifp,
"tso4 disabled due to -txcsum.\n");
}
}
if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if (IFCAP_TSO6 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO6;
if_setcapenablebit(ifp, 0, IFCAP_TSO6);
if_printf(ifp,
"tso6 disabled due to -txcsum6.\n");
}
}
if (mask & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
/*
* Note that we leave CSUM_TSO alone (it is always set). The
* kernel takes both IFCAP_TSOx and CSUM_TSO into account before
* sending a TSO request our way, so it's sufficient to toggle
* IFCAP_TSOx only.
*/
if (mask & IFCAP_TSO4) {
if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
if_printf(ifp, "enable txcsum first.\n");
rc = EAGAIN;
goto fail;
}
if_togglecapenable(ifp, IFCAP_TSO4);
}
if (mask & IFCAP_TSO6) {
if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
if_printf(ifp, "enable txcsum6 first.\n");
rc = EAGAIN;
goto fail;
}
if_togglecapenable(ifp, IFCAP_TSO6);
}
if (mask & IFCAP_LRO) {
#if defined(INET) || defined(INET6)
int i;
struct sge_rxq *rxq;
if_togglecapenable(ifp, IFCAP_LRO);
for_each_rxq(vi, i, rxq) {
if (if_getcapenable(ifp) & IFCAP_LRO)
rxq->iq.flags |= IQ_LRO_ENABLED;
else
rxq->iq.flags &= ~IQ_LRO_ENABLED;
}
#endif
}
#ifdef TCP_OFFLOAD
if (mask & IFCAP_TOE) {
int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE;
rc = toe_capability(vi, enable);
if (rc != 0)
goto fail;
if_togglecapenable(ifp, mask);
}
#endif
if (mask & IFCAP_VLAN_HWTAGGING) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_VLANEX);
}
if (mask & IFCAP_VLAN_MTU) {
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
/* Need to find out how to disable auto-mtu-inflation */
}
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (mask & IFCAP_VLAN_HWCSUM)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
#ifdef RATELIMIT
if (mask & IFCAP_TXRTLMT)
if_togglecapenable(ifp, IFCAP_TXRTLMT);
#endif
if (mask & IFCAP_HWRXTSTMP) {
int i;
struct sge_rxq *rxq;
if_togglecapenable(ifp, IFCAP_HWRXTSTMP);
for_each_rxq(vi, i, rxq) {
if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP)
rxq->iq.flags |= IQ_RX_TIMESTAMP;
else
rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
}
}
if (mask & IFCAP_MEXTPG)
if_togglecapenable(ifp, IFCAP_MEXTPG);
#ifdef KERN_TLS
if (mask & IFCAP_TXTLS) {
int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TXTLS;
rc = ktls_capability(sc, enable);
if (rc != 0)
goto fail;
if_togglecapenable(ifp, mask & IFCAP_TXTLS);
}
#endif
if (mask & IFCAP_VXLAN_HWCSUM) {
if_togglecapenable(ifp, IFCAP_VXLAN_HWCSUM);
if_togglehwassist(ifp, CSUM_INNER_IP6_UDP |
CSUM_INNER_IP6_TCP | CSUM_INNER_IP |
CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP);
}
if (mask & IFCAP_VXLAN_HWTSO) {
if_togglecapenable(ifp, IFCAP_VXLAN_HWTSO);
if_togglehwassist(ifp, CSUM_INNER_IP6_TSO |
CSUM_INNER_IP_TSO);
}
#ifdef VLAN_CAPABILITIES
VLAN_CAPABILITIES(ifp);
#endif
fail:
end_synchronized_op(sc, 0);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
break;
case SIOCGI2C: {
struct ifi2creq i2c;
rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (rc != 0)
break;
if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
rc = EPERM;
break;
}
if (i2c.len > sizeof(i2c.data)) {
rc = EINVAL;
break;
}
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else
rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
i2c.offset, i2c.len, &i2c.data[0]);
end_synchronized_op(sc, 0);
if (rc == 0)
rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
break;
}
default:
rc = ether_ioctl(ifp, cmd, data);
}
return (rc);
}
static int
cxgbe_transmit(if_t ifp, struct mbuf *m)
{
struct vi_info *vi = if_getsoftc(ifp);
struct port_info *pi = vi->pi;
struct adapter *sc;
struct sge_txq *txq;
void *items[1];
int rc;
M_ASSERTPKTHDR(m);
MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
#if defined(KERN_TLS) || defined(RATELIMIT)
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
#endif
if (__predict_false(pi->link_cfg.link_ok == false)) {
m_freem(m);
return (ENETDOWN);
}
rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR);
if (__predict_false(rc != 0)) {
if (__predict_true(rc == EINPROGRESS)) {
/* queued by parse_pkt */
MPASS(m != NULL);
return (0);
}
MPASS(m == NULL); /* was freed already */
atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
return (rc);
}
/* Select a txq. */
sc = vi->adapter;
txq = &sc->sge.txq[vi->first_txq];
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
vi->rsrv_noflowq);
items[0] = m;
rc = mp_ring_enqueue(txq->r, items, 1, 256);
if (__predict_false(rc != 0))
m_freem(m);
return (rc);
}
static void
cxgbe_qflush(if_t ifp)
{
struct vi_info *vi = if_getsoftc(ifp);
struct sge_txq *txq;
int i;
/* queues do not exist if !VI_INIT_DONE. */
if (vi->flags & VI_INIT_DONE) {
for_each_txq(vi, i, txq) {
TXQ_LOCK(txq);
txq->eq.flags |= EQ_QFLUSH;
TXQ_UNLOCK(txq);
while (!mp_ring_is_idle(txq->r)) {
mp_ring_check_drainage(txq->r, 4096);
pause("qflush", 1);
}
TXQ_LOCK(txq);
txq->eq.flags &= ~EQ_QFLUSH;
TXQ_UNLOCK(txq);
}
}
if_qflush(ifp);
}
static uint64_t
vi_get_counter(if_t ifp, ift_counter c)
{
struct vi_info *vi = if_getsoftc(ifp);
struct fw_vi_stats_vf *s = &vi->stats;
mtx_lock(&vi->tick_mtx);
vi_refresh_stats(vi);
mtx_unlock(&vi->tick_mtx);
switch (c) {
case IFCOUNTER_IPACKETS:
return (s->rx_bcast_frames + s->rx_mcast_frames +
s->rx_ucast_frames);
case IFCOUNTER_IERRORS:
return (s->rx_err_frames);
case IFCOUNTER_OPACKETS:
return (s->tx_bcast_frames + s->tx_mcast_frames +
s->tx_ucast_frames + s->tx_offload_frames);
case IFCOUNTER_OERRORS:
return (s->tx_drop_frames);
case IFCOUNTER_IBYTES:
return (s->rx_bcast_bytes + s->rx_mcast_bytes +
s->rx_ucast_bytes);
case IFCOUNTER_OBYTES:
return (s->tx_bcast_bytes + s->tx_mcast_bytes +
s->tx_ucast_bytes + s->tx_offload_bytes);
case IFCOUNTER_IMCASTS:
return (s->rx_mcast_frames);
case IFCOUNTER_OMCASTS:
return (s->tx_mcast_frames);
case IFCOUNTER_OQDROPS: {
uint64_t drops;
drops = 0;
if (vi->flags & VI_INIT_DONE) {
int i;
struct sge_txq *txq;
for_each_txq(vi, i, txq)
drops += counter_u64_fetch(txq->r->dropped);
}
return (drops);
}
default:
return (if_get_counter_default(ifp, c));
}
}
static uint64_t
cxgbe_get_counter(if_t ifp, ift_counter c)
{
struct vi_info *vi = if_getsoftc(ifp);
struct port_info *pi = vi->pi;
struct port_stats *s = &pi->stats;
mtx_lock(&vi->tick_mtx);
cxgbe_refresh_stats(vi);
mtx_unlock(&vi->tick_mtx);
switch (c) {
case IFCOUNTER_IPACKETS:
return (s->rx_frames);
case IFCOUNTER_IERRORS:
return (s->rx_jabber + s->rx_runt + s->rx_too_long +
s->rx_fcs_err + s->rx_len_err);
case IFCOUNTER_OPACKETS:
return (s->tx_frames);
case IFCOUNTER_OERRORS:
return (s->tx_error_frames);
case IFCOUNTER_IBYTES:
return (s->rx_octets);
case IFCOUNTER_OBYTES:
return (s->tx_octets);
case IFCOUNTER_IMCASTS:
return (s->rx_mcast_frames);
case IFCOUNTER_OMCASTS:
return (s->tx_mcast_frames);
case IFCOUNTER_IQDROPS:
return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
s->rx_trunc3 + pi->tnl_cong_drops);
case IFCOUNTER_OQDROPS: {
uint64_t drops;
drops = s->tx_drop;
if (vi->flags & VI_INIT_DONE) {
int i;
struct sge_txq *txq;
for_each_txq(vi, i, txq)
drops += counter_u64_fetch(txq->r->dropped);
}
return (drops);
}
default:
return (if_get_counter_default(ifp, c));
}
}
#if defined(KERN_TLS) || defined(RATELIMIT)
static int
cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
struct m_snd_tag **pt)
{
int error;
switch (params->hdr.type) {
#ifdef RATELIMIT
case IF_SND_TAG_TYPE_RATE_LIMIT:
error = cxgbe_rate_tag_alloc(ifp, params, pt);
break;
#endif
#ifdef KERN_TLS
case IF_SND_TAG_TYPE_TLS:
{
struct vi_info *vi = if_getsoftc(ifp);
if (is_t6(vi->pi->adapter))
error = t6_tls_tag_alloc(ifp, params, pt);
else
error = EOPNOTSUPP;
break;
}
#endif
default:
error = EOPNOTSUPP;
}
return (error);
}
#endif
/*
* The kernel picks a media from the list we had provided but we still validate
* the requeste.
*/
int
cxgbe_media_change(if_t ifp)
{
struct vi_info *vi = if_getsoftc(ifp);
struct port_info *pi = vi->pi;
struct ifmedia *ifm = &pi->media;
struct link_config *lc = &pi->link_cfg;
struct adapter *sc = pi->adapter;
int rc;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
if (rc != 0)
return (rc);
PORT_LOCK(pi);
if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
/* ifconfig .. media autoselect */
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
rc = ENOTSUP; /* AN not supported by transceiver */
goto done;
}
lc->requested_aneg = AUTONEG_ENABLE;
lc->requested_speed = 0;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
lc->requested_aneg = AUTONEG_DISABLE;
lc->requested_speed =
ifmedia_baudrate(ifm->ifm_media) / 1000000;
lc->requested_fc = 0;
if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
lc->requested_fc |= PAUSE_RX;
if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
lc->requested_fc |= PAUSE_TX;
}
if (pi->up_vis > 0 && !hw_off_limits(sc)) {
fixup_link_config(pi);
rc = apply_link_config(pi);
}
done:
PORT_UNLOCK(pi);
end_synchronized_op(sc, 0);
return (rc);
}
/*
* Base media word (without ETHER, pause, link active, etc.) for the port at the
* given speed.
*/
static int
port_mword(struct port_info *pi, uint32_t speed)
{
MPASS(speed & M_FW_PORT_CAP32_SPEED);
MPASS(powerof2(speed));
switch(pi->port_type) {
case FW_PORT_TYPE_BT_SGMII:
case FW_PORT_TYPE_BT_XFI:
case FW_PORT_TYPE_BT_XAUI:
/* BaseT */
switch (speed) {
case FW_PORT_CAP32_SPEED_100M:
return (IFM_100_T);
case FW_PORT_CAP32_SPEED_1G:
return (IFM_1000_T);
case FW_PORT_CAP32_SPEED_10G:
return (IFM_10G_T);
}
break;
case FW_PORT_TYPE_KX4:
if (speed == FW_PORT_CAP32_SPEED_10G)
return (IFM_10G_KX4);
break;
case FW_PORT_TYPE_CX4:
if (speed == FW_PORT_CAP32_SPEED_10G)
return (IFM_10G_CX4);
break;
case FW_PORT_TYPE_KX:
if (speed == FW_PORT_CAP32_SPEED_1G)
return (IFM_1000_KX);
break;
case FW_PORT_TYPE_KR:
case FW_PORT_TYPE_BP_AP:
case FW_PORT_TYPE_BP4_AP:
case FW_PORT_TYPE_BP40_BA:
case FW_PORT_TYPE_KR4_100G:
case FW_PORT_TYPE_KR_SFP28:
case FW_PORT_TYPE_KR_XLAUI:
switch (speed) {
case FW_PORT_CAP32_SPEED_1G:
return (IFM_1000_KX);
case FW_PORT_CAP32_SPEED_10G:
return (IFM_10G_KR);
case FW_PORT_CAP32_SPEED_25G:
return (IFM_25G_KR);
case FW_PORT_CAP32_SPEED_40G:
return (IFM_40G_KR4);
case FW_PORT_CAP32_SPEED_50G:
return (IFM_50G_KR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_KR4);
}
break;
case FW_PORT_TYPE_FIBER_XFI:
case FW_PORT_TYPE_FIBER_XAUI:
case FW_PORT_TYPE_SFP:
case FW_PORT_TYPE_QSFP_10G:
case FW_PORT_TYPE_QSA:
case FW_PORT_TYPE_QSFP:
case FW_PORT_TYPE_CR4_QSFP:
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_CR2_QSFP:
case FW_PORT_TYPE_SFP28:
/* Pluggable transceiver */
switch (pi->mod_type) {
case FW_PORT_MOD_TYPE_LR:
switch (speed) {
case FW_PORT_CAP32_SPEED_1G:
return (IFM_1000_LX);
case FW_PORT_CAP32_SPEED_10G:
return (IFM_10G_LR);
case FW_PORT_CAP32_SPEED_25G:
return (IFM_25G_LR);
case FW_PORT_CAP32_SPEED_40G:
return (IFM_40G_LR4);
case FW_PORT_CAP32_SPEED_50G:
return (IFM_50G_LR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_LR4);
}
break;
case FW_PORT_MOD_TYPE_SR:
switch (speed) {
case FW_PORT_CAP32_SPEED_1G:
return (IFM_1000_SX);
case FW_PORT_CAP32_SPEED_10G:
return (IFM_10G_SR);
case FW_PORT_CAP32_SPEED_25G:
return (IFM_25G_SR);
case FW_PORT_CAP32_SPEED_40G:
return (IFM_40G_SR4);
case FW_PORT_CAP32_SPEED_50G:
return (IFM_50G_SR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_SR4);
}
break;
case FW_PORT_MOD_TYPE_ER:
if (speed == FW_PORT_CAP32_SPEED_10G)
return (IFM_10G_ER);
break;
case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
switch (speed) {
case FW_PORT_CAP32_SPEED_1G:
return (IFM_1000_CX);
case FW_PORT_CAP32_SPEED_10G:
return (IFM_10G_TWINAX);
case FW_PORT_CAP32_SPEED_25G:
return (IFM_25G_CR);
case FW_PORT_CAP32_SPEED_40G:
return (IFM_40G_CR4);
case FW_PORT_CAP32_SPEED_50G:
return (IFM_50G_CR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_CR4);
}
break;
case FW_PORT_MOD_TYPE_LRM:
if (speed == FW_PORT_CAP32_SPEED_10G)
return (IFM_10G_LRM);
break;
case FW_PORT_MOD_TYPE_NA:
MPASS(0); /* Not pluggable? */
/* fall throough */
case FW_PORT_MOD_TYPE_ERROR:
case FW_PORT_MOD_TYPE_UNKNOWN:
case FW_PORT_MOD_TYPE_NOTSUPPORTED:
break;
case FW_PORT_MOD_TYPE_NONE:
return (IFM_NONE);
}
break;
case FW_PORT_TYPE_NONE:
return (IFM_NONE);
}
return (IFM_UNKNOWN);
}
void
cxgbe_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct vi_info *vi = if_getsoftc(ifp);
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
struct link_config *lc = &pi->link_cfg;
if (begin_synchronized_op(sc, vi , SLEEP_OK | INTR_OK, "t4med") != 0)
return;
PORT_LOCK(pi);
if (pi->up_vis == 0 && !hw_off_limits(sc)) {
/*
* If all the interfaces are administratively down the firmware
* does not report transceiver changes. Refresh port info here
* so that ifconfig displays accurate ifmedia at all times.
* This is the only reason we have a synchronized op in this
* function. Just PORT_LOCK would have been enough otherwise.
*/
t4_update_port_info(pi);
build_medialist(pi);
}
/* ifm_status */
ifmr->ifm_status = IFM_AVALID;
if (lc->link_ok == false)
goto done;
ifmr->ifm_status |= IFM_ACTIVE;
/* ifm_active */
ifmr->ifm_active = IFM_ETHER | IFM_FDX;
ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
if (lc->fc & PAUSE_RX)
ifmr->ifm_active |= IFM_ETH_RXPAUSE;
if (lc->fc & PAUSE_TX)
ifmr->ifm_active |= IFM_ETH_TXPAUSE;
ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
done:
PORT_UNLOCK(pi);
end_synchronized_op(sc, 0);
}
static int
vcxgbe_probe(device_t dev)
{
struct vi_info *vi = device_get_softc(dev);
device_set_descf(dev, "port %d vi %td", vi->pi->port_id,
vi - vi->pi->vi);
return (BUS_PROBE_DEFAULT);
}
static int
alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
{
int func, index, rc;
uint32_t param, val;
ASSERT_SYNCHRONIZED_OP(sc);
index = vi - pi->vi;
MPASS(index > 0); /* This function deals with _extra_ VIs only */
KASSERT(index < nitems(vi_mac_funcs),
("%s: VI %s doesn't have a MAC func", __func__,
device_get_nameunit(vi->dev)));
func = vi_mac_funcs[index];
rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
if (rc < 0) {
CH_ERR(vi, "failed to allocate virtual interface %d"
"for port %d: %d\n", index, pi->port_id, -rc);
return (-rc);
}
vi->viid = rc;
if (vi->rss_size == 1) {
/*
* This VI didn't get a slice of the RSS table. Reduce the
* number of VIs being created (hw.cxgbe.num_vis) or modify the
* configuration file (nvi, rssnvi for this PF) if this is a
* problem.
*/
device_printf(vi->dev, "RSS table not available.\n");
vi->rss_base = 0xffff;
return (0);
}
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
V_FW_PARAMS_PARAM_YZ(vi->viid);
rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc)
vi->rss_base = 0xffff;
else {
MPASS((val >> 16) == vi->rss_size);
vi->rss_base = val & 0xffff;
}
return (0);
}
static int
vcxgbe_attach(device_t dev)
{
struct vi_info *vi;
struct port_info *pi;
struct adapter *sc;
int rc;
vi = device_get_softc(dev);
pi = vi->pi;
sc = pi->adapter;
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
if (rc)
return (rc);
rc = alloc_extra_vi(sc, pi, vi);
end_synchronized_op(sc, 0);
if (rc)
return (rc);
- rc = cxgbe_vi_attach(dev, vi);
- if (rc) {
- t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
- return (rc);
- }
+ cxgbe_vi_attach(dev, vi);
+
return (0);
}
static int
vcxgbe_detach(device_t dev)
{
struct vi_info *vi;
struct adapter *sc;
vi = device_get_softc(dev);
sc = vi->adapter;
begin_vi_detach(sc, vi);
cxgbe_vi_detach(vi);
t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
end_vi_detach(sc, vi);
return (0);
}
static struct callout fatal_callout;
static struct taskqueue *reset_tq;
static void
delayed_panic(void *arg)
{
struct adapter *sc = arg;
panic("%s: panic on fatal error", device_get_nameunit(sc->dev));
}
static void
fatal_error_task(void *arg, int pending)
{
struct adapter *sc = arg;
int rc;
#ifdef TCP_OFFLOAD
t4_async_event(sc);
#endif
if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) {
dump_cim_regs(sc);
dump_cimla(sc);
dump_devlog(sc);
}
if (t4_reset_on_fatal_err) {
CH_ALERT(sc, "resetting on fatal error.\n");
rc = reset_adapter(sc);
if (rc == 0 && t4_panic_on_fatal_err) {
CH_ALERT(sc, "reset was successful, "
"system will NOT panic.\n");
return;
}
}
if (t4_panic_on_fatal_err) {
CH_ALERT(sc, "panicking on fatal error (after 30s).\n");
callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
}
}
void
t4_fatal_err(struct adapter *sc, bool fw_error)
{
const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
stop_adapter(sc);
if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR)))
return;
if (fw_error) {
/*
* We are here because of a firmware error/timeout and not
* because of a hardware interrupt. It is possible (although
* not very likely) that an error interrupt was also raised but
* this thread ran first and inhibited t4_intr_err. We walk the
* main INT_CAUSE registers here to make sure we haven't missed
* anything interesting.
*/
t4_slow_intr_handler(sc, verbose);
atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
}
t4_report_fw_error(sc);
log(LOG_ALERT, "%s: encountered fatal error, adapter stopped (%d).\n",
device_get_nameunit(sc->dev), fw_error);
taskqueue_enqueue(reset_tq, &sc->fatal_error_task);
}
void
t4_add_adapter(struct adapter *sc)
{
sx_xlock(&t4_list_lock);
SLIST_INSERT_HEAD(&t4_list, sc, link);
sx_xunlock(&t4_list_lock);
}
int
t4_map_bars_0_and_4(struct adapter *sc)
{
sc->regs_rid = PCIR_BAR(0);
sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&sc->regs_rid, RF_ACTIVE);
if (sc->regs_res == NULL) {
device_printf(sc->dev, "cannot map registers.\n");
return (ENXIO);
}
sc->bt = rman_get_bustag(sc->regs_res);
sc->bh = rman_get_bushandle(sc->regs_res);
sc->mmio_len = rman_get_size(sc->regs_res);
setbit(&sc->doorbells, DOORBELL_KDB);
sc->msix_rid = PCIR_BAR(4);
sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&sc->msix_rid, RF_ACTIVE);
if (sc->msix_res == NULL) {
device_printf(sc->dev, "cannot map MSI-X BAR.\n");
return (ENXIO);
}
return (0);
}
int
t4_map_bar_2(struct adapter *sc)
{
/*
* T4: only iWARP driver uses the userspace doorbells. There is no need
* to map it if RDMA is disabled.
*/
if (is_t4(sc) && sc->rdmacaps == 0)
return (0);
sc->udbs_rid = PCIR_BAR(2);
sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&sc->udbs_rid, RF_ACTIVE);
if (sc->udbs_res == NULL) {
device_printf(sc->dev, "cannot map doorbell BAR.\n");
return (ENXIO);
}
sc->udbs_base = rman_get_virtual(sc->udbs_res);
if (chip_id(sc) >= CHELSIO_T5) {
setbit(&sc->doorbells, DOORBELL_UDB);
#if defined(__i386__) || defined(__amd64__)
if (t5_write_combine) {
int rc, mode;
/*
* Enable write combining on BAR2. This is the
* userspace doorbell BAR and is split into 128B
* (UDBS_SEG_SIZE) doorbell regions, each associated
* with an egress queue. The first 64B has the doorbell
* and the second 64B can be used to submit a tx work
* request with an implicit doorbell.
*/
rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
if (rc == 0) {
clrbit(&sc->doorbells, DOORBELL_UDB);
setbit(&sc->doorbells, DOORBELL_WCWR);
setbit(&sc->doorbells, DOORBELL_UDBWC);
} else {
device_printf(sc->dev,
"couldn't enable write combining: %d\n",
rc);
}
mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
t4_write_reg(sc, A_SGE_STAT_CFG,
V_STATSOURCE_T5(7) | mode);
}
#endif
}
sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
return (0);
}
int
t4_adj_doorbells(struct adapter *sc)
{
if ((sc->doorbells & t4_doorbells_allowed) != 0) {
sc->doorbells &= t4_doorbells_allowed;
return (0);
}
CH_ERR(sc, "No usable doorbell (available = 0x%x, allowed = 0x%x).\n",
sc->doorbells, t4_doorbells_allowed);
return (EINVAL);
}
struct memwin_init {
uint32_t base;
uint32_t aperture;
};
static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
{ MEMWIN0_BASE, MEMWIN0_APERTURE },
{ MEMWIN1_BASE, MEMWIN1_APERTURE },
{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
};
static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
{ MEMWIN0_BASE, MEMWIN0_APERTURE },
{ MEMWIN1_BASE, MEMWIN1_APERTURE },
{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
};
static void
setup_memwin(struct adapter *sc)
{
const struct memwin_init *mw_init;
struct memwin *mw;
int i;
uint32_t bar0;
if (is_t4(sc)) {
/*
* Read low 32b of bar0 indirectly via the hardware backdoor
* mechanism. Works from within PCI passthrough environments
* too, where rman_get_start() can return a different value. We
* need to program the T4 memory window decoders with the actual
* addresses that will be coming across the PCIe link.
*/
bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
mw_init = &t4_memwin[0];
} else {
/* T5+ use the relative offset inside the PCIe BAR */
bar0 = 0;
mw_init = &t5_memwin[0];
}
for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
if (!rw_initialized(&mw->mw_lock)) {
rw_init(&mw->mw_lock, "memory window access");
mw->mw_base = mw_init->base;
mw->mw_aperture = mw_init->aperture;
mw->mw_curpos = 0;
}
t4_write_reg(sc,
PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
(mw->mw_base + bar0) | V_BIR(0) |
V_WINDOW(ilog2(mw->mw_aperture) - 10));
rw_wlock(&mw->mw_lock);
position_memwin(sc, i, mw->mw_curpos);
rw_wunlock(&mw->mw_lock);
}
/* flush */
t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
}
/*
* Positions the memory window at the given address in the card's address space.
* There are some alignment requirements and the actual position may be at an
* address prior to the requested address. mw->mw_curpos always has the actual
* position of the window.
*/
static void
position_memwin(struct adapter *sc, int idx, uint32_t addr)
{
struct memwin *mw;
uint32_t pf;
uint32_t reg;
MPASS(idx >= 0 && idx < NUM_MEMWIN);
mw = &sc->memwin[idx];
rw_assert(&mw->mw_lock, RA_WLOCKED);
if (is_t4(sc)) {
pf = 0;
mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
} else {
pf = V_PFNUM(sc->pf);
mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
}
reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
t4_write_reg(sc, reg, mw->mw_curpos | pf);
t4_read_reg(sc, reg); /* flush */
}
int
rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
int len, int rw)
{
struct memwin *mw;
uint32_t mw_end, v;
MPASS(idx >= 0 && idx < NUM_MEMWIN);
/* Memory can only be accessed in naturally aligned 4 byte units */
if (addr & 3 || len & 3 || len <= 0)
return (EINVAL);
mw = &sc->memwin[idx];
while (len > 0) {
rw_rlock(&mw->mw_lock);
mw_end = mw->mw_curpos + mw->mw_aperture;
if (addr >= mw_end || addr < mw->mw_curpos) {
/* Will need to reposition the window */
if (!rw_try_upgrade(&mw->mw_lock)) {
rw_runlock(&mw->mw_lock);
rw_wlock(&mw->mw_lock);
}
rw_assert(&mw->mw_lock, RA_WLOCKED);
position_memwin(sc, idx, addr);
rw_downgrade(&mw->mw_lock);
mw_end = mw->mw_curpos + mw->mw_aperture;
}
rw_assert(&mw->mw_lock, RA_RLOCKED);
while (addr < mw_end && len > 0) {
if (rw == 0) {
v = t4_read_reg(sc, mw->mw_base + addr -
mw->mw_curpos);
*val++ = le32toh(v);
} else {
v = *val++;
t4_write_reg(sc, mw->mw_base + addr -
mw->mw_curpos, htole32(v));
}
addr += 4;
len -= 4;
}
rw_runlock(&mw->mw_lock);
}
return (0);
}
CTASSERT(M_TID_COOKIE == M_COOKIE);
CTASSERT(MAX_ATIDS <= (M_TID_TID + 1));
static void
t4_init_atid_table(struct adapter *sc)
{
struct tid_info *t;
int i;
t = &sc->tids;
if (t->natids == 0)
return;
MPASS(t->atid_tab == NULL);
t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
M_ZERO | M_WAITOK);
mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
t->afree = t->atid_tab;
t->atids_in_use = 0;
for (i = 1; i < t->natids; i++)
t->atid_tab[i - 1].next = &t->atid_tab[i];
t->atid_tab[t->natids - 1].next = NULL;
}
static void
t4_free_atid_table(struct adapter *sc)
{
struct tid_info *t;
t = &sc->tids;
KASSERT(t->atids_in_use == 0,
("%s: %d atids still in use.", __func__, t->atids_in_use));
if (mtx_initialized(&t->atid_lock))
mtx_destroy(&t->atid_lock);
free(t->atid_tab, M_CXGBE);
t->atid_tab = NULL;
}
int
alloc_atid(struct adapter *sc, void *ctx)
{
struct tid_info *t = &sc->tids;
int atid = -1;
mtx_lock(&t->atid_lock);
if (t->afree) {
union aopen_entry *p = t->afree;
atid = p - t->atid_tab;
MPASS(atid <= M_TID_TID);
t->afree = p->next;
p->data = ctx;
t->atids_in_use++;
}
mtx_unlock(&t->atid_lock);
return (atid);
}
void *
lookup_atid(struct adapter *sc, int atid)
{
struct tid_info *t = &sc->tids;
return (t->atid_tab[atid].data);
}
void
free_atid(struct adapter *sc, int atid)
{
struct tid_info *t = &sc->tids;
union aopen_entry *p = &t->atid_tab[atid];
mtx_lock(&t->atid_lock);
p->next = t->afree;
t->afree = p;
t->atids_in_use--;
mtx_unlock(&t->atid_lock);
}
static void
queue_tid_release(struct adapter *sc, int tid)
{
CXGBE_UNIMPLEMENTED("deferred tid release");
}
void
release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
{
struct wrqe *wr;
struct cpl_tid_release *req;
wr = alloc_wrqe(sizeof(*req), ctrlq);
if (wr == NULL) {
queue_tid_release(sc, tid); /* defer */
return;
}
req = wrtod(wr);
INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
t4_wrq_tx(sc, wr);
}
static int
t4_range_cmp(const void *a, const void *b)
{
return ((const struct t4_range *)a)->start -
((const struct t4_range *)b)->start;
}
/*
* Verify that the memory range specified by the addr/len pair is valid within
* the card's address space.
*/
static int
validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len)
{
struct t4_range mem_ranges[4], *r, *next;
uint32_t em, addr_len;
int i, n, remaining;
/* Memory can only be accessed in naturally aligned 4 byte units */
if (addr & 3 || len & 3 || len == 0)
return (EINVAL);
/* Enabled memories */
em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
r = &mem_ranges[0];
n = 0;
bzero(r, sizeof(mem_ranges));
if (em & F_EDRAM0_ENABLE) {
addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
r->size = G_EDRAM0_SIZE(addr_len) << 20;
if (r->size > 0) {
r->start = G_EDRAM0_BASE(addr_len) << 20;
if (addr >= r->start &&
addr + len <= r->start + r->size)
return (0);
r++;
n++;
}
}
if (em & F_EDRAM1_ENABLE) {
addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
r->size = G_EDRAM1_SIZE(addr_len) << 20;
if (r->size > 0) {
r->start = G_EDRAM1_BASE(addr_len) << 20;
if (addr >= r->start &&
addr + len <= r->start + r->size)
return (0);
r++;
n++;
}
}
if (em & F_EXT_MEM_ENABLE) {
addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
r->size = G_EXT_MEM_SIZE(addr_len) << 20;
if (r->size > 0) {
r->start = G_EXT_MEM_BASE(addr_len) << 20;
if (addr >= r->start &&
addr + len <= r->start + r->size)
return (0);
r++;
n++;
}
}
if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
if (r->size > 0) {
r->start = G_EXT_MEM1_BASE(addr_len) << 20;
if (addr >= r->start &&
addr + len <= r->start + r->size)
return (0);
r++;
n++;
}
}
MPASS(n <= nitems(mem_ranges));
if (n > 1) {
/* Sort and merge the ranges. */
qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
/* Start from index 0 and examine the next n - 1 entries. */
r = &mem_ranges[0];
for (remaining = n - 1; remaining > 0; remaining--, r++) {
MPASS(r->size > 0); /* r is a valid entry. */
next = r + 1;
MPASS(next->size > 0); /* and so is the next one. */
while (r->start + r->size >= next->start) {
/* Merge the next one into the current entry. */
r->size = max(r->start + r->size,
next->start + next->size) - r->start;
n--; /* One fewer entry in total. */
if (--remaining == 0)
goto done; /* short circuit */
next++;
}
if (next != r + 1) {
/*
* Some entries were merged into r and next
* points to the first valid entry that couldn't
* be merged.
*/
MPASS(next->size > 0); /* must be valid */
memcpy(r + 1, next, remaining * sizeof(*r));
#ifdef INVARIANTS
/*
* This so that the foo->size assertion in the
* next iteration of the loop do the right
* thing for entries that were pulled up and are
* no longer valid.
*/
MPASS(n < nitems(mem_ranges));
bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
sizeof(struct t4_range));
#endif
}
}
done:
/* Done merging the ranges. */
MPASS(n > 0);
r = &mem_ranges[0];
for (i = 0; i < n; i++, r++) {
if (addr >= r->start &&
addr + len <= r->start + r->size)
return (0);
}
}
return (EFAULT);
}
static int
fwmtype_to_hwmtype(int mtype)
{
switch (mtype) {
case FW_MEMTYPE_EDC0:
return (MEM_EDC0);
case FW_MEMTYPE_EDC1:
return (MEM_EDC1);
case FW_MEMTYPE_EXTMEM:
return (MEM_MC0);
case FW_MEMTYPE_EXTMEM1:
return (MEM_MC1);
default:
panic("%s: cannot translate fw mtype %d.", __func__, mtype);
}
}
/*
* Verify that the memory range specified by the memtype/offset/len pair is
* valid and lies entirely within the memtype specified. The global address of
* the start of the range is returned in addr.
*/
static int
validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len,
uint32_t *addr)
{
uint32_t em, addr_len, maddr;
/* Memory can only be accessed in naturally aligned 4 byte units */
if (off & 3 || len & 3 || len == 0)
return (EINVAL);
em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
switch (fwmtype_to_hwmtype(mtype)) {
case MEM_EDC0:
if (!(em & F_EDRAM0_ENABLE))
return (EINVAL);
addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
maddr = G_EDRAM0_BASE(addr_len) << 20;
break;
case MEM_EDC1:
if (!(em & F_EDRAM1_ENABLE))
return (EINVAL);
addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
maddr = G_EDRAM1_BASE(addr_len) << 20;
break;
case MEM_MC:
if (!(em & F_EXT_MEM_ENABLE))
return (EINVAL);
addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
maddr = G_EXT_MEM_BASE(addr_len) << 20;
break;
case MEM_MC1:
if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
return (EINVAL);
addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
maddr = G_EXT_MEM1_BASE(addr_len) << 20;
break;
default:
return (EINVAL);
}
*addr = maddr + off; /* global address */
return (validate_mem_range(sc, *addr, len));
}
static int
fixup_devlog_params(struct adapter *sc)
{
struct devlog_params *dparams = &sc->params.devlog;
int rc;
rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
dparams->size, &dparams->addr);
return (rc);
}
static void
update_nirq(struct intrs_and_queues *iaq, int nports)
{
iaq->nirq = T4_EXTRA_INTR;
iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq);
iaq->nirq += nports * iaq->nofldrxq;
iaq->nirq += nports * (iaq->num_vis - 1) *
max(iaq->nrxq_vi, iaq->nnmrxq_vi);
iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
}
/*
* Adjust requirements to fit the number of interrupts available.
*/
static void
calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
int navail)
{
int old_nirq;
const int nports = sc->params.nports;
MPASS(nports > 0);
MPASS(navail > 0);
bzero(iaq, sizeof(*iaq));
iaq->intr_type = itype;
iaq->num_vis = t4_num_vis;
iaq->ntxq = t4_ntxq;
iaq->ntxq_vi = t4_ntxq_vi;
iaq->nrxq = t4_nrxq;
iaq->nrxq_vi = t4_nrxq_vi;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
if (is_offload(sc) || is_ethoffload(sc)) {
iaq->nofldtxq = t4_nofldtxq;
iaq->nofldtxq_vi = t4_nofldtxq_vi;
}
#endif
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
iaq->nofldrxq = t4_nofldrxq;
iaq->nofldrxq_vi = t4_nofldrxq_vi;
}
#endif
#ifdef DEV_NETMAP
if (t4_native_netmap & NN_MAIN_VI) {
iaq->nnmtxq = t4_nnmtxq;
iaq->nnmrxq = t4_nnmrxq;
}
if (t4_native_netmap & NN_EXTRA_VI) {
iaq->nnmtxq_vi = t4_nnmtxq_vi;
iaq->nnmrxq_vi = t4_nnmrxq_vi;
}
#endif
update_nirq(iaq, nports);
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq))) {
/*
* This is the normal case -- there are enough interrupts for
* everything.
*/
goto done;
}
/*
* If extra VIs have been configured try reducing their count and see if
* that works.
*/
while (iaq->num_vis > 1) {
iaq->num_vis--;
update_nirq(iaq, nports);
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq))) {
device_printf(sc->dev, "virtual interfaces per port "
"reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
"nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
"itype %d, navail %u, nirq %d.\n",
iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
itype, navail, iaq->nirq);
goto done;
}
}
/*
* Extra VIs will not be created. Log a message if they were requested.
*/
MPASS(iaq->num_vis == 1);
iaq->ntxq_vi = iaq->nrxq_vi = 0;
iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
if (iaq->num_vis != t4_num_vis) {
device_printf(sc->dev, "extra virtual interfaces disabled. "
"nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
"nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
iaq->nnmrxq_vi, itype, navail, iaq->nirq);
}
/*
* Keep reducing the number of NIC rx queues to the next lower power of
* 2 (for even RSS distribution) and halving the TOE rx queues and see
* if that works.
*/
do {
if (iaq->nrxq > 1) {
iaq->nrxq = rounddown_pow_of_two(iaq->nrxq - 1);
if (iaq->nnmrxq > iaq->nrxq)
iaq->nnmrxq = iaq->nrxq;
}
if (iaq->nofldrxq > 1)
iaq->nofldrxq >>= 1;
old_nirq = iaq->nirq;
update_nirq(iaq, nports);
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq))) {
device_printf(sc->dev, "running with reduced number of "
"rx queues because of shortage of interrupts. "
"nrxq=%u, nofldrxq=%u. "
"itype %d, navail %u, nirq %d.\n", iaq->nrxq,
iaq->nofldrxq, itype, navail, iaq->nirq);
goto done;
}
} while (old_nirq != iaq->nirq);
/* One interrupt for everything. Ugh. */
device_printf(sc->dev, "running with minimal number of queues. "
"itype %d, navail %u.\n", itype, navail);
iaq->nirq = 1;
iaq->nrxq = 1;
iaq->ntxq = 1;
if (iaq->nofldrxq > 0) {
iaq->nofldrxq = 1;
iaq->nofldtxq = 1;
}
iaq->nnmtxq = 0;
iaq->nnmrxq = 0;
done:
MPASS(iaq->num_vis > 0);
if (iaq->num_vis > 1) {
MPASS(iaq->nrxq_vi > 0);
MPASS(iaq->ntxq_vi > 0);
}
MPASS(iaq->nirq > 0);
MPASS(iaq->nrxq > 0);
MPASS(iaq->ntxq > 0);
if (itype == INTR_MSI) {
MPASS(powerof2(iaq->nirq));
}
}
static int
cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
{
int rc, itype, navail, nalloc;
for (itype = INTR_MSIX; itype; itype >>= 1) {
if ((itype & t4_intr_types) == 0)
continue; /* not allowed */
if (itype == INTR_MSIX)
navail = pci_msix_count(sc->dev);
else if (itype == INTR_MSI)
navail = pci_msi_count(sc->dev);
else
navail = 1;
restart:
if (navail == 0)
continue;
calculate_iaq(sc, iaq, itype, navail);
nalloc = iaq->nirq;
rc = 0;
if (itype == INTR_MSIX)
rc = pci_alloc_msix(sc->dev, &nalloc);
else if (itype == INTR_MSI)
rc = pci_alloc_msi(sc->dev, &nalloc);
if (rc == 0 && nalloc > 0) {
if (nalloc == iaq->nirq)
return (0);
/*
* Didn't get the number requested. Use whatever number
* the kernel is willing to allocate.
*/
device_printf(sc->dev, "fewer vectors than requested, "
"type=%d, req=%d, rcvd=%d; will downshift req.\n",
itype, iaq->nirq, nalloc);
pci_release_msi(sc->dev);
navail = nalloc;
goto restart;
}
device_printf(sc->dev,
"failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
itype, rc, iaq->nirq, nalloc);
}
device_printf(sc->dev,
"failed to find a usable interrupt type. "
"allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
pci_msix_count(sc->dev), pci_msi_count(sc->dev));
return (ENXIO);
}
#define FW_VERSION(chip) ( \
V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
/* Just enough of fw_hdr to cover all version info. */
struct fw_h {
__u8 ver;
__u8 chip;
__be16 len512;
__be32 fw_ver;
__be32 tp_microcode_ver;
__u8 intfver_nic;
__u8 intfver_vnic;
__u8 intfver_ofld;
__u8 intfver_ri;
__u8 intfver_iscsipdu;
__u8 intfver_iscsi;
__u8 intfver_fcoepdu;
__u8 intfver_fcoe;
};
/* Spot check a couple of fields. */
CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver));
CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic));
CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe));
struct fw_info {
uint8_t chip;
char *kld_name;
char *fw_mod_name;
struct fw_h fw_h;
} fw_info[] = {
{
.chip = CHELSIO_T4,
.kld_name = "t4fw_cfg",
.fw_mod_name = "t4fw",
.fw_h = {
.chip = FW_HDR_CHIP_T4,
.fw_ver = htobe32(FW_VERSION(T4)),
.intfver_nic = FW_INTFVER(T4, NIC),
.intfver_vnic = FW_INTFVER(T4, VNIC),
.intfver_ofld = FW_INTFVER(T4, OFLD),
.intfver_ri = FW_INTFVER(T4, RI),
.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
.intfver_iscsi = FW_INTFVER(T4, ISCSI),
.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T4, FCOE),
},
}, {
.chip = CHELSIO_T5,
.kld_name = "t5fw_cfg",
.fw_mod_name = "t5fw",
.fw_h = {
.chip = FW_HDR_CHIP_T5,
.fw_ver = htobe32(FW_VERSION(T5)),
.intfver_nic = FW_INTFVER(T5, NIC),
.intfver_vnic = FW_INTFVER(T5, VNIC),
.intfver_ofld = FW_INTFVER(T5, OFLD),
.intfver_ri = FW_INTFVER(T5, RI),
.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
}, {
.chip = CHELSIO_T6,
.kld_name = "t6fw_cfg",
.fw_mod_name = "t6fw",
.fw_h = {
.chip = FW_HDR_CHIP_T6,
.fw_ver = htobe32(FW_VERSION(T6)),
.intfver_nic = FW_INTFVER(T6, NIC),
.intfver_vnic = FW_INTFVER(T6, VNIC),
.intfver_ofld = FW_INTFVER(T6, OFLD),
.intfver_ri = FW_INTFVER(T6, RI),
.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
.intfver_iscsi = FW_INTFVER(T6, ISCSI),
.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T6, FCOE),
},
}
};
static struct fw_info *
find_fw_info(int chip)
{
int i;
for (i = 0; i < nitems(fw_info); i++) {
if (fw_info[i].chip == chip)
return (&fw_info[i]);
}
return (NULL);
}
/*
* Is the given firmware API compatible with the one the driver was compiled
* with?
*/
static int
fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
{
/* short circuit if it's the exact same firmware version */
if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
return (1);
/*
* XXX: Is this too conservative? Perhaps I should limit this to the
* features that are supported in the driver.
*/
#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
return (1);
#undef SAME_INTF
return (0);
}
static int
load_fw_module(struct adapter *sc, const struct firmware **dcfg,
const struct firmware **fw)
{
struct fw_info *fw_info;
*dcfg = NULL;
if (fw != NULL)
*fw = NULL;
fw_info = find_fw_info(chip_id(sc));
if (fw_info == NULL) {
device_printf(sc->dev,
"unable to look up firmware information for chip %d.\n",
chip_id(sc));
return (EINVAL);
}
*dcfg = firmware_get(fw_info->kld_name);
if (*dcfg != NULL) {
if (fw != NULL)
*fw = firmware_get(fw_info->fw_mod_name);
return (0);
}
return (ENOENT);
}
static void
unload_fw_module(struct adapter *sc, const struct firmware *dcfg,
const struct firmware *fw)
{
if (fw != NULL)
firmware_put(fw, FIRMWARE_UNLOAD);
if (dcfg != NULL)
firmware_put(dcfg, FIRMWARE_UNLOAD);
}
/*
* Return values:
* 0 means no firmware install attempted.
* ERESTART means a firmware install was attempted and was successful.
* +ve errno means a firmware install was attempted but failed.
*/
static int
install_kld_firmware(struct adapter *sc, struct fw_h *card_fw,
const struct fw_h *drv_fw, const char *reason, int *already)
{
const struct firmware *cfg, *fw;
const uint32_t c = be32toh(card_fw->fw_ver);
uint32_t d, k;
int rc, fw_install;
struct fw_h bundled_fw;
bool load_attempted;
cfg = fw = NULL;
load_attempted = false;
fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install;
memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw));
if (t4_fw_install < 0) {
rc = load_fw_module(sc, &cfg, &fw);
if (rc != 0 || fw == NULL) {
device_printf(sc->dev,
"failed to load firmware module: %d. cfg %p, fw %p;"
" will use compiled-in firmware version for"
"hw.cxgbe.fw_install checks.\n",
rc, cfg, fw);
} else {
memcpy(&bundled_fw, fw->data, sizeof(bundled_fw));
}
load_attempted = true;
}
d = be32toh(bundled_fw.fw_ver);
if (reason != NULL)
goto install;
if ((sc->flags & FW_OK) == 0) {
if (c == 0xffffffff) {
reason = "missing";
goto install;
}
rc = 0;
goto done;
}
if (!fw_compatible(card_fw, &bundled_fw)) {
reason = "incompatible or unusable";
goto install;
}
if (d > c) {
reason = "older than the version bundled with this driver";
goto install;
}
if (fw_install == 2 && d != c) {
reason = "different than the version bundled with this driver";
goto install;
}
/* No reason to do anything to the firmware already on the card. */
rc = 0;
goto done;
install:
rc = 0;
if ((*already)++)
goto done;
if (fw_install == 0) {
device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
"but the driver is prohibited from installing a firmware "
"on the card.\n",
G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
goto done;
}
/*
* We'll attempt to install a firmware. Load the module first (if it
* hasn't been loaded already).
*/
if (!load_attempted) {
rc = load_fw_module(sc, &cfg, &fw);
if (rc != 0 || fw == NULL) {
device_printf(sc->dev,
"failed to load firmware module: %d. cfg %p, fw %p\n",
rc, cfg, fw);
/* carry on */
}
}
if (fw == NULL) {
device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
"but the driver cannot take corrective action because it "
"is unable to load the firmware module.\n",
G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
rc = sc->flags & FW_OK ? 0 : ENOENT;
goto done;
}
k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver);
if (k != d) {
MPASS(t4_fw_install > 0);
device_printf(sc->dev,
"firmware in KLD (%u.%u.%u.%u) is not what the driver was "
"expecting (%u.%u.%u.%u) and will not be used.\n",
G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k),
G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
rc = sc->flags & FW_OK ? 0 : EINVAL;
goto done;
}
device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
"installing firmware %u.%u.%u.%u on card.\n",
G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
if (rc != 0) {
device_printf(sc->dev, "failed to install firmware: %d\n", rc);
} else {
/* Installed successfully, update the cached header too. */
rc = ERESTART;
memcpy(card_fw, fw->data, sizeof(*card_fw));
}
done:
unload_fw_module(sc, cfg, fw);
return (rc);
}
/*
* Establish contact with the firmware and attempt to become the master driver.
*
* A firmware will be installed to the card if needed (if the driver is allowed
* to do so).
*/
static int
contact_firmware(struct adapter *sc)
{
int rc, already = 0;
enum dev_state state;
struct fw_info *fw_info;
struct fw_hdr *card_fw; /* fw on the card */
const struct fw_h *drv_fw;
fw_info = find_fw_info(chip_id(sc));
if (fw_info == NULL) {
device_printf(sc->dev,
"unable to look up firmware information for chip %d.\n",
chip_id(sc));
return (EINVAL);
}
drv_fw = &fw_info->fw_h;
/* Read the header of the firmware on the card */
card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
restart:
rc = -t4_get_fw_hdr(sc, card_fw);
if (rc != 0) {
device_printf(sc->dev,
"unable to read firmware header from card's flash: %d\n",
rc);
goto done;
}
rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL,
&already);
if (rc == ERESTART)
goto restart;
if (rc != 0)
goto done;
rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
if (rc < 0 || state == DEV_STATE_ERR) {
rc = -rc;
device_printf(sc->dev,
"failed to connect to the firmware: %d, %d. "
"PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
#if 0
if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
"not responding properly to HELLO", &already) == ERESTART)
goto restart;
#endif
goto done;
}
MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT);
sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */
if (rc == sc->pf) {
sc->flags |= MASTER_PF;
rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
NULL, &already);
if (rc == ERESTART)
rc = 0;
else if (rc != 0)
goto done;
} else if (state == DEV_STATE_UNINIT) {
/*
* We didn't get to be the master so we definitely won't be
* configuring the chip. It's a bug if someone else hasn't
* configured it already.
*/
device_printf(sc->dev, "couldn't be master(%d), "
"device not already initialized either(%d). "
"PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
rc = EPROTO;
goto done;
} else {
/*
* Some other PF is the master and has configured the chip.
* This is allowed but untested.
*/
device_printf(sc->dev, "PF%d is master, device state %d. "
"PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc);
sc->cfcsum = 0;
rc = 0;
}
done:
if (rc != 0 && sc->flags & FW_OK) {
t4_fw_bye(sc, sc->mbox);
sc->flags &= ~FW_OK;
}
free(card_fw, M_CXGBE);
return (rc);
}
static int
copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
uint32_t mtype, uint32_t moff)
{
struct fw_info *fw_info;
const struct firmware *dcfg, *rcfg = NULL;
const uint32_t *cfdata;
uint32_t cflen, addr;
int rc;
load_fw_module(sc, &dcfg, NULL);
/* Card specific interpretation of "default". */
if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
if (pci_get_device(sc->dev) == 0x440a)
snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF);
if (is_fpga(sc))
snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF);
}
if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
if (dcfg == NULL) {
device_printf(sc->dev,
"KLD with default config is not available.\n");
rc = ENOENT;
goto done;
}
cfdata = dcfg->data;
cflen = dcfg->datasize & ~3;
} else {
char s[32];
fw_info = find_fw_info(chip_id(sc));
if (fw_info == NULL) {
device_printf(sc->dev,
"unable to look up firmware information for chip %d.\n",
chip_id(sc));
rc = EINVAL;
goto done;
}
snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file);
rcfg = firmware_get(s);
if (rcfg == NULL) {
device_printf(sc->dev,
"unable to load module \"%s\" for configuration "
"profile \"%s\".\n", s, cfg_file);
rc = ENOENT;
goto done;
}
cfdata = rcfg->data;
cflen = rcfg->datasize & ~3;
}
if (cflen > FLASH_CFG_MAX_SIZE) {
device_printf(sc->dev,
"config file too long (%d, max allowed is %d).\n",
cflen, FLASH_CFG_MAX_SIZE);
rc = EINVAL;
goto done;
}
rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
if (rc != 0) {
device_printf(sc->dev,
"%s: addr (%d/0x%x) or len %d is not valid: %d.\n",
__func__, mtype, moff, cflen, rc);
rc = EINVAL;
goto done;
}
write_via_memwin(sc, 2, addr, cfdata, cflen);
done:
if (rcfg != NULL)
firmware_put(rcfg, FIRMWARE_UNLOAD);
unload_fw_module(sc, dcfg, NULL);
return (rc);
}
struct caps_allowed {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
uint16_t cryptocaps;
uint16_t iscsicaps;
uint16_t fcoecaps;
};
#define FW_PARAM_DEV(param) \
(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
/*
* Provide a configuration profile to the firmware and have it initialize the
* chip accordingly. This may involve uploading a configuration file to the
* card.
*/
static int
apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
const struct caps_allowed *caps_allowed)
{
int rc;
struct fw_caps_config_cmd caps;
uint32_t mtype, moff, finicsum, cfcsum, param, val;
rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
if (rc != 0) {
device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
return (rc);
}
bzero(&caps, sizeof(caps));
caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ);
if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) {
mtype = 0;
moff = 0;
caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
} else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
mtype = FW_MEMTYPE_FLASH;
moff = t4_flash_cfg_addr(sc);
caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
FW_LEN16(caps));
} else {
/*
* Ask the firmware where it wants us to upload the config file.
*/
param = FW_PARAM_DEV(CF);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc != 0) {
/* No support for config file? Shouldn't happen. */
device_printf(sc->dev,
"failed to query config file location: %d.\n", rc);
goto done;
}
mtype = G_FW_PARAMS_PARAM_Y(val);
moff = G_FW_PARAMS_PARAM_Z(val) << 16;
caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
FW_LEN16(caps));
rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
if (rc != 0) {
device_printf(sc->dev,
"failed to upload config file to card: %d.\n", rc);
goto done;
}
}
rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
if (rc != 0) {
device_printf(sc->dev, "failed to pre-process config file: %d "
"(mtype %d, moff 0x%x).\n", rc, mtype, moff);
goto done;
}
finicsum = be32toh(caps.finicsum);
cfcsum = be32toh(caps.cfcsum); /* actual */
if (finicsum != cfcsum) {
device_printf(sc->dev,
"WARNING: config file checksum mismatch: %08x %08x\n",
finicsum, cfcsum);
}
sc->cfcsum = cfcsum;
snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file);
/*
* Let the firmware know what features will (not) be used so it can tune
* things accordingly.
*/
#define LIMIT_CAPS(x) do { \
caps.x##caps &= htobe16(caps_allowed->x##caps); \
} while (0)
LIMIT_CAPS(nbm);
LIMIT_CAPS(link);
LIMIT_CAPS(switch);
LIMIT_CAPS(nic);
LIMIT_CAPS(toe);
LIMIT_CAPS(rdma);
LIMIT_CAPS(crypto);
LIMIT_CAPS(iscsi);
LIMIT_CAPS(fcoe);
#undef LIMIT_CAPS
if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
/*
* TOE and hashfilters are mutually exclusive. It is a config
* file or firmware bug if both are reported as available. Try
* to cope with the situation in non-debug builds by disabling
* TOE.
*/
MPASS(caps.toecaps == 0);
caps.toecaps = 0;
caps.rdmacaps = 0;
caps.iscsicaps = 0;
}
caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
if (rc != 0) {
device_printf(sc->dev,
"failed to process config file: %d.\n", rc);
goto done;
}
t4_tweak_chip_settings(sc);
set_params__pre_init(sc);
/* get basic stuff going */
rc = -t4_fw_initialize(sc, sc->mbox);
if (rc != 0) {
device_printf(sc->dev, "fw_initialize failed: %d.\n", rc);
goto done;
}
done:
return (rc);
}
/*
* Partition chip resources for use between various PFs, VFs, etc.
*/
static int
partition_resources(struct adapter *sc)
{
char cfg_file[sizeof(t4_cfg_file)];
struct caps_allowed caps_allowed;
int rc;
bool fallback;
/* Only the master driver gets to configure the chip resources. */
MPASS(sc->flags & MASTER_PF);
#define COPY_CAPS(x) do { \
caps_allowed.x##caps = t4_##x##caps_allowed; \
} while (0)
bzero(&caps_allowed, sizeof(caps_allowed));
COPY_CAPS(nbm);
COPY_CAPS(link);
COPY_CAPS(switch);
COPY_CAPS(nic);
COPY_CAPS(toe);
COPY_CAPS(rdma);
COPY_CAPS(crypto);
COPY_CAPS(iscsi);
COPY_CAPS(fcoe);
fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true;
snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file);
retry:
rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed);
if (rc != 0 && fallback) {
dump_devlog(sc);
device_printf(sc->dev,
"failed (%d) to configure card with \"%s\" profile, "
"will fall back to a basic configuration and retry.\n",
rc, cfg_file);
snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF);
bzero(&caps_allowed, sizeof(caps_allowed));
COPY_CAPS(switch);
caps_allowed.niccaps = FW_CAPS_CONFIG_NIC;
fallback = false;
goto retry;
}
#undef COPY_CAPS
return (rc);
}
/*
* Retrieve parameters that are needed (or nice to have) very early.
*/
static int
get_params__pre_init(struct adapter *sc)
{
int rc;
uint32_t param[2], val[2];
t4_get_version_info(sc);
snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
param[0] = FW_PARAM_DEV(PORTVEC);
param[1] = FW_PARAM_DEV(CCLK);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query parameters (pre_init): %d.\n", rc);
return (rc);
}
sc->params.portvec = val[0];
sc->params.nports = bitcount32(val[0]);
sc->params.vpd.cclk = val[1];
/* Read device log parameters. */
rc = -t4_init_devlog_params(sc, 1);
if (rc == 0)
fixup_devlog_params(sc);
else {
device_printf(sc->dev,
"failed to get devlog parameters: %d.\n", rc);
rc = 0; /* devlog isn't critical for device operation */
}
return (rc);
}
/*
* Any params that need to be set before FW_INITIALIZE.
*/
static int
set_params__pre_init(struct adapter *sc)
{
int rc = 0;
uint32_t param, val;
if (chip_id(sc) >= CHELSIO_T6) {
param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
val = 1;
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
/* firmwares < 1.20.1.0 do not have this param. */
if (rc == FW_EINVAL &&
sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) {
rc = 0;
}
if (rc != 0) {
device_printf(sc->dev,
"failed to enable high priority filters :%d.\n",
rc);
}
param = FW_PARAM_DEV(PPOD_EDRAM);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc == 0 && val == 1) {
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param,
&val);
if (rc != 0) {
device_printf(sc->dev,
"failed to set PPOD_EDRAM: %d.\n", rc);
}
}
}
/* Enable opaque VIIDs with firmwares that support it. */
param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
val = 1;
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc == 0 && val == 1)
sc->params.viid_smt_extn_support = true;
else
sc->params.viid_smt_extn_support = false;
return (rc);
}
/*
* Retrieve various parameters that are of interest to the driver. The device
* has been initialized by the firmware at this point.
*/
static int
get_params__post_init(struct adapter *sc)
{
int rc;
uint32_t param[7], val[7];
struct fw_caps_config_cmd caps;
param[0] = FW_PARAM_PFVF(IQFLINT_START);
param[1] = FW_PARAM_PFVF(EQ_START);
param[2] = FW_PARAM_PFVF(FILTER_START);
param[3] = FW_PARAM_PFVF(FILTER_END);
param[4] = FW_PARAM_PFVF(L2T_START);
param[5] = FW_PARAM_PFVF(L2T_END);
param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query parameters (post_init): %d.\n", rc);
return (rc);
}
sc->sge.iq_start = val[0];
sc->sge.eq_start = val[1];
if ((int)val[3] > (int)val[2]) {
sc->tids.ftid_base = val[2];
sc->tids.ftid_end = val[3];
sc->tids.nftids = val[3] - val[2] + 1;
}
sc->vres.l2t.start = val[4];
sc->vres.l2t.size = val[5] - val[4] + 1;
KASSERT(sc->vres.l2t.size <= L2T_SIZE,
("%s: L2 table size (%u) larger than expected (%u)",
__func__, sc->vres.l2t.size, L2T_SIZE));
sc->params.core_vdd = val[6];
param[0] = FW_PARAM_PFVF(IQFLINT_END);
param[1] = FW_PARAM_PFVF(EQ_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query parameters (post_init2): %d.\n", rc);
return (rc);
}
MPASS((int)val[0] >= sc->sge.iq_start);
sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
MPASS((int)val[1] >= sc->sge.eq_start);
sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
if (chip_id(sc) >= CHELSIO_T6) {
sc->tids.tid_base = t4_read_reg(sc,
A_LE_DB_ACTIVE_TABLE_START_INDEX);
param[0] = FW_PARAM_PFVF(HPFILTER_START);
param[1] = FW_PARAM_PFVF(HPFILTER_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query hpfilter parameters: %d.\n", rc);
return (rc);
}
if ((int)val[1] > (int)val[0]) {
sc->tids.hpftid_base = val[0];
sc->tids.hpftid_end = val[1];
sc->tids.nhpftids = val[1] - val[0] + 1;
/*
* These should go off if the layout changes and the
* driver needs to catch up.
*/
MPASS(sc->tids.hpftid_base == 0);
MPASS(sc->tids.tid_base == sc->tids.nhpftids);
}
param[0] = FW_PARAM_PFVF(RAWF_START);
param[1] = FW_PARAM_PFVF(RAWF_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query rawf parameters: %d.\n", rc);
return (rc);
}
if ((int)val[1] > (int)val[0]) {
sc->rawf_base = val[0];
sc->nrawf = val[1] - val[0] + 1;
}
}
/*
* The parameters that follow may not be available on all firmwares. We
* query them individually rather than in a compound query because old
* firmwares fail the entire query if an unknown parameter is queried.
*/
/*
* MPS buffer group configuration.
*/
param[0] = FW_PARAM_DEV(MPSBGMAP);
val[0] = 0;
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.mps_bg_map = val[0];
else
sc->params.mps_bg_map = UINT32_MAX; /* Not a legal value. */
param[0] = FW_PARAM_DEV(TPCHMAP);
val[0] = 0;
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.tp_ch_map = val[0];
else
sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */
/*
* Determine whether the firmware supports the filter2 work request.
*/
param[0] = FW_PARAM_DEV(FILTER2_WR);
val[0] = 0;
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.filter2_wr_support = val[0] != 0;
else
sc->params.filter2_wr_support = 0;
/*
* Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
*/
param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
val[0] = 0;
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.ulptx_memwrite_dsgl = val[0] != 0;
else
sc->params.ulptx_memwrite_dsgl = false;
/* FW_RI_FR_NSMR_TPTE_WR support */
param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.fr_nsmr_tpte_wr_support = val[0] != 0;
else
sc->params.fr_nsmr_tpte_wr_support = false;
/* Support for 512 SGL entries per FR MR. */
param[0] = FW_PARAM_DEV(DEV_512SGL_MR);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.dev_512sgl_mr = val[0] != 0;
else
sc->params.dev_512sgl_mr = false;
param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.max_pkts_per_eth_tx_pkts_wr = val[0];
else
sc->params.max_pkts_per_eth_tx_pkts_wr = 15;
param[0] = FW_PARAM_DEV(NUM_TM_CLASS);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0) {
MPASS(val[0] > 0 && val[0] < 256); /* nsched_cls is 8b */
sc->params.nsched_cls = val[0];
} else
sc->params.nsched_cls = sc->chip_params->nsched_cls;
/* get capabilites */
bzero(&caps, sizeof(caps));
caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ);
caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
if (rc != 0) {
device_printf(sc->dev,
"failed to get card capabilities: %d.\n", rc);
return (rc);
}
#define READ_CAPS(x) do { \
sc->x = htobe16(caps.x); \
} while (0)
READ_CAPS(nbmcaps);
READ_CAPS(linkcaps);
READ_CAPS(switchcaps);
READ_CAPS(niccaps);
READ_CAPS(toecaps);
READ_CAPS(rdmacaps);
READ_CAPS(cryptocaps);
READ_CAPS(iscsicaps);
READ_CAPS(fcoecaps);
if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
MPASS(chip_id(sc) > CHELSIO_T4);
MPASS(sc->toecaps == 0);
sc->toecaps = 0;
param[0] = FW_PARAM_DEV(NTID);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query HASHFILTER parameters: %d.\n", rc);
return (rc);
}
sc->tids.ntids = val[0];
if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
MPASS(sc->tids.ntids >= sc->tids.nhpftids);
sc->tids.ntids -= sc->tids.nhpftids;
}
sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
sc->params.hash_filter = 1;
}
if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
param[0] = FW_PARAM_PFVF(ETHOFLD_START);
param[1] = FW_PARAM_PFVF(ETHOFLD_END);
param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query NIC parameters: %d.\n", rc);
return (rc);
}
if ((int)val[1] > (int)val[0]) {
sc->tids.etid_base = val[0];
sc->tids.etid_end = val[1];
sc->tids.netids = val[1] - val[0] + 1;
sc->params.eo_wr_cred = val[2];
sc->params.ethoffload = 1;
}
}
if (sc->toecaps) {
/* query offload-related parameters */
param[0] = FW_PARAM_DEV(NTID);
param[1] = FW_PARAM_PFVF(SERVER_START);
param[2] = FW_PARAM_PFVF(SERVER_END);
param[3] = FW_PARAM_PFVF(TDDP_START);
param[4] = FW_PARAM_PFVF(TDDP_END);
param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query TOE parameters: %d.\n", rc);
return (rc);
}
sc->tids.ntids = val[0];
if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
MPASS(sc->tids.ntids >= sc->tids.nhpftids);
sc->tids.ntids -= sc->tids.nhpftids;
}
sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
if ((int)val[2] > (int)val[1]) {
sc->tids.stid_base = val[1];
sc->tids.nstids = val[2] - val[1] + 1;
}
sc->vres.ddp.start = val[3];
sc->vres.ddp.size = val[4] - val[3] + 1;
sc->params.ofldq_wr_cred = val[5];
sc->params.offload = 1;
} else {
/*
* The firmware attempts memfree TOE configuration for -SO cards
* and will report toecaps=0 if it runs out of resources (this
* depends on the config file). It may not report 0 for other
* capabilities dependent on the TOE in this case. Set them to
* 0 here so that the driver doesn't bother tracking resources
* that will never be used.
*/
sc->iscsicaps = 0;
sc->rdmacaps = 0;
}
if (sc->rdmacaps) {
param[0] = FW_PARAM_PFVF(STAG_START);
param[1] = FW_PARAM_PFVF(STAG_END);
param[2] = FW_PARAM_PFVF(RQ_START);
param[3] = FW_PARAM_PFVF(RQ_END);
param[4] = FW_PARAM_PFVF(PBL_START);
param[5] = FW_PARAM_PFVF(PBL_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query RDMA parameters(1): %d.\n", rc);
return (rc);
}
sc->vres.stag.start = val[0];
sc->vres.stag.size = val[1] - val[0] + 1;
sc->vres.rq.start = val[2];
sc->vres.rq.size = val[3] - val[2] + 1;
sc->vres.pbl.start = val[4];
sc->vres.pbl.size = val[5] - val[4] + 1;
param[0] = FW_PARAM_PFVF(SQRQ_START);
param[1] = FW_PARAM_PFVF(SQRQ_END);
param[2] = FW_PARAM_PFVF(CQ_START);
param[3] = FW_PARAM_PFVF(CQ_END);
param[4] = FW_PARAM_PFVF(OCQ_START);
param[5] = FW_PARAM_PFVF(OCQ_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query RDMA parameters(2): %d.\n", rc);
return (rc);
}
sc->vres.qp.start = val[0];
sc->vres.qp.size = val[1] - val[0] + 1;
sc->vres.cq.start = val[2];
sc->vres.cq.size = val[3] - val[2] + 1;
sc->vres.ocq.start = val[4];
sc->vres.ocq.size = val[5] - val[4] + 1;
param[0] = FW_PARAM_PFVF(SRQ_START);
param[1] = FW_PARAM_PFVF(SRQ_END);
param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query RDMA parameters(3): %d.\n", rc);
return (rc);
}
sc->vres.srq.start = val[0];
sc->vres.srq.size = val[1] - val[0] + 1;
sc->params.max_ordird_qp = val[2];
sc->params.max_ird_adapter = val[3];
}
if (sc->iscsicaps) {
param[0] = FW_PARAM_PFVF(ISCSI_START);
param[1] = FW_PARAM_PFVF(ISCSI_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query iSCSI parameters: %d.\n", rc);
return (rc);
}
sc->vres.iscsi.start = val[0];
sc->vres.iscsi.size = val[1] - val[0] + 1;
}
if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
param[0] = FW_PARAM_PFVF(TLS_START);
param[1] = FW_PARAM_PFVF(TLS_END);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
if (rc != 0) {
device_printf(sc->dev,
"failed to query TLS parameters: %d.\n", rc);
return (rc);
}
sc->vres.key.start = val[0];
sc->vres.key.size = val[1] - val[0] + 1;
}
/*
* We've got the params we wanted to query directly from the firmware.
* Grab some others via other means.
*/
t4_init_sge_params(sc);
t4_init_tp_params(sc);
t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
rc = t4_verify_chip_settings(sc);
if (rc != 0)
return (rc);
t4_init_rx_buf_info(sc);
return (rc);
}
#ifdef KERN_TLS
static void
ktls_tick(void *arg)
{
struct adapter *sc;
uint32_t tstamp;
sc = arg;
tstamp = tcp_ts_getticks();
t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1);
t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31);
callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK);
}
static int
t6_config_kern_tls(struct adapter *sc, bool enable)
{
int rc;
uint32_t param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_KTLS_HW) |
V_FW_PARAMS_PARAM_Y(enable ? 1 : 0) |
V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &param);
if (rc != 0) {
CH_ERR(sc, "failed to %s NIC TLS: %d\n",
enable ? "enable" : "disable", rc);
return (rc);
}
if (enable) {
sc->flags |= KERN_TLS_ON;
callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc,
C_HARDCLOCK);
} else {
sc->flags &= ~KERN_TLS_ON;
callout_stop(&sc->ktls_tick);
}
return (rc);
}
#endif
static int
set_params__post_init(struct adapter *sc)
{
uint32_t mask, param, val;
#ifdef TCP_OFFLOAD
int i, v, shift;
#endif
/* ask for encapsulated CPLs */
param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
val = 1;
(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
/* Enable 32b port caps if the firmware supports it. */
param = FW_PARAM_PFVF(PORT_CAPS32);
val = 1;
if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val) == 0)
sc->params.port_caps32 = 1;
/* Let filter + maskhash steer to a part of the VI's RSS region. */
val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1);
t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER),
V_MASKFILTER(val - 1));
mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER |
F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN |
F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM;
val = 0;
if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) {
t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE,
F_ATTACKFILTERENABLE);
val |= F_DROPERRORATTACK;
}
if (t4_drop_ip_fragments != 0) {
t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP,
F_FRAGMENTDROP);
val |= F_DROPERRORFRAG;
}
if (t4_drop_pkts_with_l2_errors != 0)
val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN;
if (t4_drop_pkts_with_l3_errors != 0) {
val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN |
F_DROPERRORCSUMIP;
}
if (t4_drop_pkts_with_l4_errors != 0) {
val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
F_DROPERRORTCPOPT | F_DROPERRORCSUM;
}
t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val);
#ifdef TCP_OFFLOAD
/*
* Override the TOE timers with user provided tunables. This is not the
* recommended way to change the timers (the firmware config file is) so
* these tunables are not documented.
*
* All the timer tunables are in microseconds.
*/
if (t4_toe_keepalive_idle != 0) {
v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
v &= M_KEEPALIVEIDLE;
t4_set_reg_field(sc, A_TP_KEEP_IDLE,
V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
}
if (t4_toe_keepalive_interval != 0) {
v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
v &= M_KEEPALIVEINTVL;
t4_set_reg_field(sc, A_TP_KEEP_INTVL,
V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
}
if (t4_toe_keepalive_count != 0) {
v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
t4_set_reg_field(sc, A_TP_SHIFT_CNT,
V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
}
if (t4_toe_rexmt_min != 0) {
v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
v &= M_RXTMIN;
t4_set_reg_field(sc, A_TP_RXT_MIN,
V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
}
if (t4_toe_rexmt_max != 0) {
v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
v &= M_RXTMAX;
t4_set_reg_field(sc, A_TP_RXT_MAX,
V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
}
if (t4_toe_rexmt_count != 0) {
v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
t4_set_reg_field(sc, A_TP_SHIFT_CNT,
V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
}
for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
if (t4_toe_rexmt_backoff[i] != -1) {
v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
shift = (i & 3) << 3;
t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
M_TIMERBACKOFFINDEX0 << shift, v << shift);
}
}
#endif
/*
* Limit TOE connections to 2 reassembly "islands". This is
* required to permit migrating TOE connections to either
* ULP_MODE_TCPDDP or UPL_MODE_TLS.
*/
t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG, V_PASSMODE(M_PASSMODE),
V_PASSMODE(2));
#ifdef KERN_TLS
if (is_ktls(sc)) {
sc->tlst.inline_keys = t4_tls_inline_keys;
sc->tlst.combo_wrs = t4_tls_combo_wrs;
if (t4_kern_tls != 0 && is_t6(sc))
t6_config_kern_tls(sc, true);
}
#endif
return (0);
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
static void
t4_set_desc(struct adapter *sc)
{
struct adapter_params *p = &sc->params;
device_set_descf(sc->dev, "Chelsio %s", p->vpd.id);
}
static inline void
ifmedia_add4(struct ifmedia *ifm, int m)
{
ifmedia_add(ifm, m, 0, NULL);
ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
}
/*
* This is the selected media, which is not quite the same as the active media.
* The media line in ifconfig is "media: Ethernet selected (active)" if selected
* and active are not the same, and "media: Ethernet selected" otherwise.
*/
static void
set_current_media(struct port_info *pi)
{
struct link_config *lc;
struct ifmedia *ifm;
int mword;
u_int speed;
PORT_LOCK_ASSERT_OWNED(pi);
/* Leave current media alone if it's already set to IFM_NONE. */
ifm = &pi->media;
if (ifm->ifm_cur != NULL &&
IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
return;
lc = &pi->link_cfg;
if (lc->requested_aneg != AUTONEG_DISABLE &&
lc->pcaps & FW_PORT_CAP32_ANEG) {
ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
return;
}
mword = IFM_ETHER | IFM_FDX;
if (lc->requested_fc & PAUSE_TX)
mword |= IFM_ETH_TXPAUSE;
if (lc->requested_fc & PAUSE_RX)
mword |= IFM_ETH_RXPAUSE;
if (lc->requested_speed == 0)
speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */
else
speed = lc->requested_speed;
mword |= port_mword(pi, speed_to_fwcap(speed));
ifmedia_set(ifm, mword);
}
/*
* Returns true if the ifmedia list for the port cannot change.
*/
static bool
fixed_ifmedia(struct port_info *pi)
{
return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
pi->port_type == FW_PORT_TYPE_BT_XFI ||
pi->port_type == FW_PORT_TYPE_BT_XAUI ||
pi->port_type == FW_PORT_TYPE_KX4 ||
pi->port_type == FW_PORT_TYPE_KX ||
pi->port_type == FW_PORT_TYPE_KR ||
pi->port_type == FW_PORT_TYPE_BP_AP ||
pi->port_type == FW_PORT_TYPE_BP4_AP ||
pi->port_type == FW_PORT_TYPE_BP40_BA ||
pi->port_type == FW_PORT_TYPE_KR4_100G ||
pi->port_type == FW_PORT_TYPE_KR_SFP28 ||
pi->port_type == FW_PORT_TYPE_KR_XLAUI);
}
static void
build_medialist(struct port_info *pi)
{
uint32_t ss, speed;
int unknown, mword, bit;
struct link_config *lc;
struct ifmedia *ifm;
PORT_LOCK_ASSERT_OWNED(pi);
if (pi->flags & FIXED_IFMEDIA)
return;
/*
* Rebuild the ifmedia list.
*/
ifm = &pi->media;
ifmedia_removeall(ifm);
lc = &pi->link_cfg;
ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */
if (__predict_false(ss == 0)) { /* not supposed to happen. */
MPASS(ss != 0);
no_media:
MPASS(LIST_EMPTY(&ifm->ifm_list));
ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
return;
}
unknown = 0;
for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
speed = 1 << bit;
MPASS(speed & M_FW_PORT_CAP32_SPEED);
if (ss & speed) {
mword = port_mword(pi, speed);
if (mword == IFM_NONE) {
goto no_media;
} else if (mword == IFM_UNKNOWN)
unknown++;
else
ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
}
}
if (unknown > 0) /* Add one unknown for all unknown media types. */
ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
if (lc->pcaps & FW_PORT_CAP32_ANEG)
ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
set_current_media(pi);
}
/*
* Initialize the requested fields in the link config based on driver tunables.
*/
static void
init_link_config(struct port_info *pi)
{
struct link_config *lc = &pi->link_cfg;
PORT_LOCK_ASSERT_OWNED(pi);
lc->requested_caps = 0;
lc->requested_speed = 0;
if (t4_autoneg == 0)
lc->requested_aneg = AUTONEG_DISABLE;
else if (t4_autoneg == 1)
lc->requested_aneg = AUTONEG_ENABLE;
else
lc->requested_aneg = AUTONEG_AUTO;
lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
PAUSE_AUTONEG);
if (t4_fec & FEC_AUTO)
lc->requested_fec = FEC_AUTO;
else if (t4_fec == 0)
lc->requested_fec = FEC_NONE;
else {
/* -1 is handled by the FEC_AUTO block above and not here. */
lc->requested_fec = t4_fec &
(FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE);
if (lc->requested_fec == 0)
lc->requested_fec = FEC_AUTO;
}
if (t4_force_fec < 0)
lc->force_fec = -1;
else if (t4_force_fec > 0)
lc->force_fec = 1;
else
lc->force_fec = 0;
}
/*
* Makes sure that all requested settings comply with what's supported by the
* port. Returns the number of settings that were invalid and had to be fixed.
*/
static int
fixup_link_config(struct port_info *pi)
{
int n = 0;
struct link_config *lc = &pi->link_cfg;
uint32_t fwspeed;
PORT_LOCK_ASSERT_OWNED(pi);
/* Speed (when not autonegotiating) */
if (lc->requested_speed != 0) {
fwspeed = speed_to_fwcap(lc->requested_speed);
if ((fwspeed & lc->pcaps) == 0) {
n++;
lc->requested_speed = 0;
}
}
/* Link autonegotiation */
MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
lc->requested_aneg == AUTONEG_DISABLE ||
lc->requested_aneg == AUTONEG_AUTO);
if (lc->requested_aneg == AUTONEG_ENABLE &&
!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
n++;
lc->requested_aneg = AUTONEG_AUTO;
}
/* Flow control */
MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
if (lc->requested_fc & PAUSE_TX &&
!(lc->pcaps & FW_PORT_CAP32_FC_TX)) {
n++;
lc->requested_fc &= ~PAUSE_TX;
}
if (lc->requested_fc & PAUSE_RX &&
!(lc->pcaps & FW_PORT_CAP32_FC_RX)) {
n++;
lc->requested_fc &= ~PAUSE_RX;
}
if (!(lc->requested_fc & PAUSE_AUTONEG) &&
!(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) {
n++;
lc->requested_fc |= PAUSE_AUTONEG;
}
/* FEC */
if ((lc->requested_fec & FEC_RS &&
!(lc->pcaps & FW_PORT_CAP32_FEC_RS)) ||
(lc->requested_fec & FEC_BASER_RS &&
!(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) {
n++;
lc->requested_fec = FEC_AUTO;
}
return (n);
}
/*
* Apply the requested L1 settings, which are expected to be valid, to the
* hardware.
*/
static int
apply_link_config(struct port_info *pi)
{
struct adapter *sc = pi->adapter;
struct link_config *lc = &pi->link_cfg;
int rc;
#ifdef INVARIANTS
ASSERT_SYNCHRONIZED_OP(sc);
PORT_LOCK_ASSERT_OWNED(pi);
if (lc->requested_aneg == AUTONEG_ENABLE)
MPASS(lc->pcaps & FW_PORT_CAP32_ANEG);
if (!(lc->requested_fc & PAUSE_AUTONEG))
MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE);
if (lc->requested_fc & PAUSE_TX)
MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX);
if (lc->requested_fc & PAUSE_RX)
MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX);
if (lc->requested_fec & FEC_RS)
MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS);
if (lc->requested_fec & FEC_BASER_RS)
MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
#endif
if (!(sc->flags & IS_VF)) {
rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
if (rc != 0) {
device_printf(pi->dev, "l1cfg failed: %d\n", rc);
return (rc);
}
}
/*
* An L1_CFG will almost always result in a link-change event if the
* link is up, and the driver will refresh the actual fec/fc/etc. when
* the notification is processed. If the link is down then the actual
* settings are meaningless.
*
* This takes care of the case where a change in the L1 settings may not
* result in a notification.
*/
if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
return (0);
}
#define FW_MAC_EXACT_CHUNK 7
struct mcaddr_ctx {
if_t ifp;
const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
uint64_t hash;
int i;
int del;
int rc;
};
static u_int
add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct mcaddr_ctx *ctx = arg;
struct vi_info *vi = if_getsoftc(ctx->ifp);
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
if (ctx->rc < 0)
return (0);
ctx->mcaddr[ctx->i] = LLADDR(sdl);
MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i]));
ctx->i++;
if (ctx->i == FW_MAC_EXACT_CHUNK) {
ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del,
ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0);
if (ctx->rc < 0) {
int j;
for (j = 0; j < ctx->i; j++) {
if_printf(ctx->ifp,
"failed to add mc address"
" %02x:%02x:%02x:"
"%02x:%02x:%02x rc=%d\n",
ctx->mcaddr[j][0], ctx->mcaddr[j][1],
ctx->mcaddr[j][2], ctx->mcaddr[j][3],
ctx->mcaddr[j][4], ctx->mcaddr[j][5],
-ctx->rc);
}
return (0);
}
ctx->del = 0;
ctx->i = 0;
}
return (1);
}
/*
* Program the port's XGMAC based on parameters in ifnet. The caller also
* indicates which parameters should be programmed (the rest are left alone).
*/
int
update_mac_settings(if_t ifp, int flags)
{
int rc = 0;
struct vi_info *vi = if_getsoftc(ifp);
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
ASSERT_SYNCHRONIZED_OP(sc);
KASSERT(flags, ("%s: not told what to update.", __func__));
if (flags & XGMAC_MTU)
mtu = if_getmtu(ifp);
if (flags & XGMAC_PROMISC)
promisc = if_getflags(ifp) & IFF_PROMISC ? 1 : 0;
if (flags & XGMAC_ALLMULTI)
allmulti = if_getflags(ifp) & IFF_ALLMULTI ? 1 : 0;
if (flags & XGMAC_VLANEX)
vlanex = if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING ? 1 : 0;
if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
allmulti, 1, vlanex, false);
if (rc) {
if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
rc);
return (rc);
}
}
if (flags & XGMAC_UCADDR) {
uint8_t ucaddr[ETHER_ADDR_LEN];
bcopy(if_getlladdr(ifp), ucaddr, sizeof(ucaddr));
rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
ucaddr, true, &vi->smt_idx);
if (rc < 0) {
rc = -rc;
if_printf(ifp, "change_mac failed: %d\n", rc);
return (rc);
} else {
vi->xact_addr_filt = rc;
rc = 0;
}
}
if (flags & XGMAC_MCADDRS) {
struct epoch_tracker et;
struct mcaddr_ctx ctx;
int j;
ctx.ifp = ifp;
ctx.hash = 0;
ctx.i = 0;
ctx.del = 1;
ctx.rc = 0;
/*
* Unlike other drivers, we accumulate list of pointers into
* interface address lists and we need to keep it safe even
* after if_foreach_llmaddr() returns, thus we must enter the
* network epoch.
*/
NET_EPOCH_ENTER(et);
if_foreach_llmaddr(ifp, add_maddr, &ctx);
if (ctx.rc < 0) {
NET_EPOCH_EXIT(et);
rc = -ctx.rc;
return (rc);
}
if (ctx.i > 0) {
rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0);
NET_EPOCH_EXIT(et);
if (rc < 0) {
rc = -rc;
for (j = 0; j < ctx.i; j++) {
if_printf(ifp,
"failed to add mcast address"
" %02x:%02x:%02x:"
"%02x:%02x:%02x rc=%d\n",
ctx.mcaddr[j][0], ctx.mcaddr[j][1],
ctx.mcaddr[j][2], ctx.mcaddr[j][3],
ctx.mcaddr[j][4], ctx.mcaddr[j][5],
rc);
}
return (rc);
}
ctx.del = 0;
} else
NET_EPOCH_EXIT(et);
rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0);
if (rc != 0)
if_printf(ifp, "failed to set mcast address hash: %d\n",
rc);
if (ctx.del == 0) {
/* We clobbered the VXLAN entry if there was one. */
pi->vxlan_tcam_entry = false;
}
}
if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 &&
pi->vxlan_tcam_entry == false) {
rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac,
match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
true);
if (rc < 0) {
rc = -rc;
if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n",
rc);
} else {
MPASS(rc == sc->rawf_base + pi->port_id);
rc = 0;
pi->vxlan_tcam_entry = true;
}
}
return (rc);
}
/*
* {begin|end}_synchronized_op must be called from the same thread.
*/
int
begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
char *wmesg)
{
int rc, pri;
#ifdef WITNESS
/* the caller thinks it's ok to sleep, but is it really? */
if (flags & SLEEP_OK)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"begin_synchronized_op");
#endif
if (INTR_OK)
pri = PCATCH;
else
pri = 0;
ADAPTER_LOCK(sc);
for (;;) {
if (vi && IS_DETACHING(vi)) {
rc = ENXIO;
goto done;
}
if (!IS_BUSY(sc)) {
rc = 0;
break;
}
if (!(flags & SLEEP_OK)) {
rc = EBUSY;
goto done;
}
if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
rc = EINTR;
goto done;
}
}
KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
SET_BUSY(sc);
#ifdef INVARIANTS
sc->last_op = wmesg;
sc->last_op_thr = curthread;
sc->last_op_flags = flags;
#endif
done:
if (!(flags & HOLD_LOCK) || rc)
ADAPTER_UNLOCK(sc);
return (rc);
}
/*
* Tell if_ioctl and if_init that the VI is going away. This is
* special variant of begin_synchronized_op and must be paired with a
* call to end_vi_detach.
*/
void
begin_vi_detach(struct adapter *sc, struct vi_info *vi)
{
ADAPTER_LOCK(sc);
SET_DETACHING(vi);
wakeup(&sc->flags);
while (IS_BUSY(sc))
mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
SET_BUSY(sc);
#ifdef INVARIANTS
sc->last_op = "t4detach";
sc->last_op_thr = curthread;
sc->last_op_flags = 0;
#endif
ADAPTER_UNLOCK(sc);
}
void
end_vi_detach(struct adapter *sc, struct vi_info *vi)
{
ADAPTER_LOCK(sc);
KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
CLR_BUSY(sc);
CLR_DETACHING(vi);
wakeup(&sc->flags);
ADAPTER_UNLOCK(sc);
}
/*
* {begin|end}_synchronized_op must be called from the same thread.
*/
void
end_synchronized_op(struct adapter *sc, int flags)
{
if (flags & LOCK_HELD)
ADAPTER_LOCK_ASSERT_OWNED(sc);
else
ADAPTER_LOCK(sc);
KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
CLR_BUSY(sc);
wakeup(&sc->flags);
ADAPTER_UNLOCK(sc);
}
static int
cxgbe_init_synchronized(struct vi_info *vi)
{
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
if_t ifp = vi->ifp;
int rc = 0, i;
struct sge_txq *txq;
ASSERT_SYNCHRONIZED_OP(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return (0); /* already running */
if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
return (rc); /* error message displayed already */
if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
return (rc); /* error message displayed already */
rc = update_mac_settings(ifp, XGMAC_ALL);
if (rc)
goto done; /* error message displayed already */
PORT_LOCK(pi);
if (pi->up_vis == 0) {
t4_update_port_info(pi);
fixup_link_config(pi);
build_medialist(pi);
apply_link_config(pi);
}
rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
if (rc != 0) {
if_printf(ifp, "enable_vi failed: %d\n", rc);
PORT_UNLOCK(pi);
goto done;
}
/*
* Can't fail from this point onwards. Review cxgbe_uninit_synchronized
* if this changes.
*/
for_each_txq(vi, i, txq) {
TXQ_LOCK(txq);
txq->eq.flags |= EQ_ENABLED;
TXQ_UNLOCK(txq);
}
/*
* The first iq of the first port to come up is used for tracing.
*/
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
V_QUEUENUMBER(sc->traceq));
pi->flags |= HAS_TRACEQ;
}
/* all ok */
pi->up_vis++;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if (pi->link_cfg.link_ok)
t4_os_link_changed(pi);
PORT_UNLOCK(pi);
mtx_lock(&vi->tick_mtx);
if (vi->pi->nvi > 1 || sc->flags & IS_VF)
callout_reset(&vi->tick, hz, vi_tick, vi);
else
callout_reset(&vi->tick, hz, cxgbe_tick, vi);
mtx_unlock(&vi->tick_mtx);
done:
if (rc != 0)
cxgbe_uninit_synchronized(vi);
return (rc);
}
/*
* Idempotent.
*/
static int
cxgbe_uninit_synchronized(struct vi_info *vi)
{
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
if_t ifp = vi->ifp;
int rc, i;
struct sge_txq *txq;
ASSERT_SYNCHRONIZED_OP(sc);
if (!(vi->flags & VI_INIT_DONE)) {
if (__predict_false(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
KASSERT(0, ("uninited VI is running"));
if_printf(ifp, "uninited VI with running ifnet. "
"vi->flags 0x%016lx, if_flags 0x%08x, "
"if_drv_flags 0x%08x\n", vi->flags, if_getflags(ifp),
if_getdrvflags(ifp));
}
return (0);
}
/*
* Disable the VI so that all its data in either direction is discarded
* by the MPS. Leave everything else (the queues, interrupts, and 1Hz
* tick) intact as the TP can deliver negative advice or data that it's
* holding in its RAM (for an offloaded connection) even after the VI is
* disabled.
*/
rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
if (rc) {
if_printf(ifp, "disable_vi failed: %d\n", rc);
return (rc);
}
for_each_txq(vi, i, txq) {
TXQ_LOCK(txq);
txq->eq.flags &= ~EQ_ENABLED;
TXQ_UNLOCK(txq);
}
mtx_lock(&vi->tick_mtx);
callout_stop(&vi->tick);
mtx_unlock(&vi->tick_mtx);
PORT_LOCK(pi);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
PORT_UNLOCK(pi);
return (0);
}
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
pi->up_vis--;
if (pi->up_vis > 0) {
PORT_UNLOCK(pi);
return (0);
}
pi->link_cfg.link_ok = false;
pi->link_cfg.speed = 0;
pi->link_cfg.link_down_rc = 255;
t4_os_link_changed(pi);
PORT_UNLOCK(pi);
return (0);
}
/*
* It is ok for this function to fail midway and return right away. t4_detach
* will walk the entire sc->irq list and clean up whatever is valid.
*/
int
t4_setup_intr_handlers(struct adapter *sc)
{
int rc, rid, p, q, v;
char s[8];
struct irq *irq;
struct port_info *pi;
struct vi_info *vi;
struct sge *sge = &sc->sge;
struct sge_rxq *rxq;
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
#endif
#ifdef DEV_NETMAP
struct sge_nm_rxq *nm_rxq;
#endif
#ifdef RSS
int nbuckets = rss_getnumbuckets();
#endif
/*
* Setup interrupts.
*/
irq = &sc->irq[0];
rid = sc->intr_type == INTR_INTX ? 0 : 1;
if (forwarding_intr_to_fwq(sc))
return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
/* Multiple interrupts. */
if (sc->flags & IS_VF)
KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
("%s: too few intr.", __func__));
else
KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
("%s: too few intr.", __func__));
/* The first one is always error intr on PFs */
if (!(sc->flags & IS_VF)) {
rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
if (rc != 0)
return (rc);
irq++;
rid++;
}
/* The second one is always the firmware event queue (first on VFs) */
rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
if (rc != 0)
return (rc);
irq++;
rid++;
for_each_port(sc, p) {
pi = sc->port[p];
for_each_vi(pi, v, vi) {
vi->first_intr = rid - 1;
if (vi->nnmrxq > 0) {
int n = max(vi->nrxq, vi->nnmrxq);
rxq = &sge->rxq[vi->first_rxq];
#ifdef DEV_NETMAP
nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
#endif
for (q = 0; q < n; q++) {
snprintf(s, sizeof(s), "%x%c%x", p,
'a' + v, q);
if (q < vi->nrxq)
irq->rxq = rxq++;
#ifdef DEV_NETMAP
if (q < vi->nnmrxq)
irq->nm_rxq = nm_rxq++;
if (irq->nm_rxq != NULL &&
irq->rxq == NULL) {
/* Netmap rx only */
rc = t4_alloc_irq(sc, irq, rid,
t4_nm_intr, irq->nm_rxq, s);
}
if (irq->nm_rxq != NULL &&
irq->rxq != NULL) {
/* NIC and Netmap rx */
rc = t4_alloc_irq(sc, irq, rid,
t4_vi_intr, irq, s);
}
#endif
if (irq->rxq != NULL &&
irq->nm_rxq == NULL) {
/* NIC rx only */
rc = t4_alloc_irq(sc, irq, rid,
t4_intr, irq->rxq, s);
}
if (rc != 0)
return (rc);
#ifdef RSS
if (q < vi->nrxq) {
bus_bind_intr(sc->dev, irq->res,
rss_getcpu(q % nbuckets));
}
#endif
irq++;
rid++;
vi->nintr++;
}
} else {
for_each_rxq(vi, q, rxq) {
snprintf(s, sizeof(s), "%x%c%x", p,
'a' + v, q);
rc = t4_alloc_irq(sc, irq, rid,
t4_intr, rxq, s);
if (rc != 0)
return (rc);
#ifdef RSS
bus_bind_intr(sc->dev, irq->res,
rss_getcpu(q % nbuckets));
#endif
irq++;
rid++;
vi->nintr++;
}
}
#ifdef TCP_OFFLOAD
for_each_ofld_rxq(vi, q, ofld_rxq) {
snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
rc = t4_alloc_irq(sc, irq, rid, t4_intr,
ofld_rxq, s);
if (rc != 0)
return (rc);
irq++;
rid++;
vi->nintr++;
}
#endif
}
}
MPASS(irq == &sc->irq[sc->intr_count]);
return (0);
}
static void
write_global_rss_key(struct adapter *sc)
{
#ifdef RSS
int i;
uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
CTASSERT(RSS_KEYSIZE == 40);
rss_getkey((void *)&raw_rss_key[0]);
for (i = 0; i < nitems(rss_key); i++) {
rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
}
t4_write_rss_key(sc, &rss_key[0], -1, 1);
#endif
}
/*
* Idempotent.
*/
static int
adapter_full_init(struct adapter *sc)
{
int rc, i;
ASSERT_SYNCHRONIZED_OP(sc);
/*
* queues that belong to the adapter (not any particular port).
*/
rc = t4_setup_adapter_queues(sc);
if (rc != 0)
return (rc);
MPASS(sc->params.nports <= nitems(sc->tq));
for (i = 0; i < sc->params.nports; i++) {
if (sc->tq[i] != NULL)
continue;
sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->tq[i]);
if (sc->tq[i] == NULL) {
CH_ERR(sc, "failed to allocate task queue %d\n", i);
return (ENOMEM);
}
taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
device_get_nameunit(sc->dev), i);
}
if (!(sc->flags & IS_VF)) {
write_global_rss_key(sc);
t4_intr_enable(sc);
}
return (0);
}
int
adapter_init(struct adapter *sc)
{
int rc;
ASSERT_SYNCHRONIZED_OP(sc);
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
KASSERT((sc->flags & FULL_INIT_DONE) == 0,
("%s: FULL_INIT_DONE already", __func__));
rc = adapter_full_init(sc);
if (rc != 0)
adapter_full_uninit(sc);
else
sc->flags |= FULL_INIT_DONE;
return (rc);
}
/*
* Idempotent.
*/
static void
adapter_full_uninit(struct adapter *sc)
{
int i;
t4_teardown_adapter_queues(sc);
for (i = 0; i < nitems(sc->tq); i++) {
if (sc->tq[i] == NULL)
continue;
taskqueue_free(sc->tq[i]);
sc->tq[i] = NULL;
}
sc->flags &= ~FULL_INIT_DONE;
}
#ifdef RSS
#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
RSS_HASHTYPE_RSS_UDP_IPV6)
/* Translates kernel hash types to hardware. */
static int
hashconfig_to_hashen(int hashconfig)
{
int hashen = 0;
if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
}
if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
}
if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
return (hashen);
}
/* Translates hardware hash types to kernel. */
static int
hashen_to_hashconfig(int hashen)
{
int hashconfig = 0;
if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
/*
* If UDP hashing was enabled it must have been enabled for
* either IPv4 or IPv6 (inclusive or). Enabling UDP without
* enabling any 4-tuple hash is nonsense configuration.
*/
MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
}
if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
hashconfig |= RSS_HASHTYPE_RSS_IPV4;
if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
hashconfig |= RSS_HASHTYPE_RSS_IPV6;
return (hashconfig);
}
#endif
/*
* Idempotent.
*/
static int
vi_full_init(struct vi_info *vi)
{
struct adapter *sc = vi->adapter;
struct sge_rxq *rxq;
int rc, i, j;
#ifdef RSS
int nbuckets = rss_getnumbuckets();
int hashconfig = rss_gethashconfig();
int extra;
#endif
ASSERT_SYNCHRONIZED_OP(sc);
/*
* Allocate tx/rx/fl queues for this VI.
*/
rc = t4_setup_vi_queues(vi);
if (rc != 0)
return (rc);
/*
* Setup RSS for this VI. Save a copy of the RSS table for later use.
*/
if (vi->nrxq > vi->rss_size) {
CH_ALERT(vi, "nrxq (%d) > hw RSS table size (%d); "
"some queues will never receive traffic.\n", vi->nrxq,
vi->rss_size);
} else if (vi->rss_size % vi->nrxq) {
CH_ALERT(vi, "nrxq (%d), hw RSS table size (%d); "
"expect uneven traffic distribution.\n", vi->nrxq,
vi->rss_size);
}
#ifdef RSS
if (vi->nrxq != nbuckets) {
CH_ALERT(vi, "nrxq (%d) != kernel RSS buckets (%d);"
"performance will be impacted.\n", vi->nrxq, nbuckets);
}
#endif
if (vi->rss == NULL)
vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE,
M_ZERO | M_WAITOK);
for (i = 0; i < vi->rss_size;) {
#ifdef RSS
j = rss_get_indirection_to_bucket(i);
j %= vi->nrxq;
rxq = &sc->sge.rxq[vi->first_rxq + j];
vi->rss[i++] = rxq->iq.abs_id;
#else
for_each_rxq(vi, j, rxq) {
vi->rss[i++] = rxq->iq.abs_id;
if (i == vi->rss_size)
break;
}
#endif
}
rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
vi->rss, vi->rss_size);
if (rc != 0) {
CH_ERR(vi, "rss_config failed: %d\n", rc);
return (rc);
}
#ifdef RSS
vi->hashen = hashconfig_to_hashen(hashconfig);
/*
* We may have had to enable some hashes even though the global config
* wants them disabled. This is a potential problem that must be
* reported to the user.
*/
extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig;
/*
* If we consider only the supported hash types, then the enabled hashes
* are a superset of the requested hashes. In other words, there cannot
* be any supported hash that was requested but not enabled, but there
* can be hashes that were not requested but had to be enabled.
*/
extra &= SUPPORTED_RSS_HASHTYPES;
MPASS((extra & hashconfig) == 0);
if (extra) {
CH_ALERT(vi,
"global RSS config (0x%x) cannot be accommodated.\n",
hashconfig);
}
if (extra & RSS_HASHTYPE_RSS_IPV4)
CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n");
if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n");
if (extra & RSS_HASHTYPE_RSS_IPV6)
CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n");
if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n");
if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n");
if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n");
#else
vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
#endif
rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0],
0, 0);
if (rc != 0) {
CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc);
return (rc);
}
return (0);
}
int
vi_init(struct vi_info *vi)
{
int rc;
ASSERT_SYNCHRONIZED_OP(vi->adapter);
KASSERT((vi->flags & VI_INIT_DONE) == 0,
("%s: VI_INIT_DONE already", __func__));
rc = vi_full_init(vi);
if (rc != 0)
vi_full_uninit(vi);
else
vi->flags |= VI_INIT_DONE;
return (rc);
}
/*
* Idempotent.
*/
static void
vi_full_uninit(struct vi_info *vi)
{
if (vi->flags & VI_INIT_DONE) {
quiesce_vi(vi);
free(vi->rss, M_CXGBE);
free(vi->nm_rss, M_CXGBE);
}
t4_teardown_vi_queues(vi);
vi->flags &= ~VI_INIT_DONE;
}
static void
quiesce_txq(struct sge_txq *txq)
{
struct sge_eq *eq = &txq->eq;
struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
MPASS(eq->flags & EQ_SW_ALLOCATED);
MPASS(!(eq->flags & EQ_ENABLED));
/* Wait for the mp_ring to empty. */
while (!mp_ring_is_idle(txq->r)) {
mp_ring_check_drainage(txq->r, 4096);
pause("rquiesce", 1);
}
MPASS(txq->txp.npkt == 0);
if (eq->flags & EQ_HW_ALLOCATED) {
/*
* Hardware is alive and working normally. Wait for it to
* finish and then wait for the driver to catch up and reclaim
* all descriptors.
*/
while (spg->cidx != htobe16(eq->pidx))
pause("equiesce", 1);
while (eq->cidx != eq->pidx)
pause("dquiesce", 1);
} else {
/*
* Hardware is unavailable. Discard all pending tx and reclaim
* descriptors directly.
*/
TXQ_LOCK(txq);
while (eq->cidx != eq->pidx) {
struct mbuf *m, *nextpkt;
struct tx_sdesc *txsd;
txsd = &txq->sdesc[eq->cidx];
for (m = txsd->m; m != NULL; m = nextpkt) {
nextpkt = m->m_nextpkt;
m->m_nextpkt = NULL;
m_freem(m);
}
IDXINCR(eq->cidx, txsd->desc_used, eq->sidx);
}
spg->pidx = spg->cidx = htobe16(eq->cidx);
TXQ_UNLOCK(txq);
}
}
static void
quiesce_wrq(struct sge_wrq *wrq)
{
/* XXXTX */
}
static void
quiesce_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
{
/* Synchronize with the interrupt handler */
while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
pause("iqfree", 1);
if (fl != NULL) {
MPASS(iq->flags & IQ_HAS_FL);
mtx_lock(&sc->sfl_lock);
FL_LOCK(fl);
fl->flags |= FL_DOOMED;
FL_UNLOCK(fl);
callout_stop(&sc->sfl_callout);
mtx_unlock(&sc->sfl_lock);
KASSERT((fl->flags & FL_STARVING) == 0,
("%s: still starving", __func__));
/* Release all buffers if hardware is no longer available. */
if (!(iq->flags & IQ_HW_ALLOCATED))
free_fl_buffers(sc, fl);
}
}
/*
* Wait for all activity on all the queues of the VI to complete. It is assumed
* that no new work is being enqueued by the hardware or the driver. That part
* should be arranged before calling this function.
*/
static void
quiesce_vi(struct vi_info *vi)
{
int i;
struct adapter *sc = vi->adapter;
struct sge_rxq *rxq;
struct sge_txq *txq;
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
struct sge_ofld_txq *ofld_txq;
#endif
if (!(vi->flags & VI_INIT_DONE))
return;
for_each_txq(vi, i, txq) {
quiesce_txq(txq);
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
for_each_ofld_txq(vi, i, ofld_txq) {
quiesce_wrq(&ofld_txq->wrq);
}
#endif
for_each_rxq(vi, i, rxq) {
quiesce_iq_fl(sc, &rxq->iq, &rxq->fl);
}
#ifdef TCP_OFFLOAD
for_each_ofld_rxq(vi, i, ofld_rxq) {
quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl);
}
#endif
}
static int
t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
driver_intr_t *handler, void *arg, char *name)
{
int rc;
irq->rid = rid;
irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
RF_SHAREABLE | RF_ACTIVE);
if (irq->res == NULL) {
device_printf(sc->dev,
"failed to allocate IRQ for rid %d, name %s.\n", rid, name);
return (ENOMEM);
}
rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
NULL, handler, arg, &irq->tag);
if (rc != 0) {
device_printf(sc->dev,
"failed to setup interrupt for rid %d, name %s: %d\n",
rid, name, rc);
} else if (name)
bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
return (rc);
}
static int
t4_free_irq(struct adapter *sc, struct irq *irq)
{
if (irq->tag)
bus_teardown_intr(sc->dev, irq->res, irq->tag);
if (irq->res)
bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
bzero(irq, sizeof(*irq));
return (0);
}
static void
get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
{
regs->version = chip_id(sc) | chip_rev(sc) << 10;
t4_get_regs(sc, buf, regs->len);
}
#define A_PL_INDIR_CMD 0x1f8
#define S_PL_AUTOINC 31
#define M_PL_AUTOINC 0x1U
#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
#define S_PL_VFID 20
#define M_PL_VFID 0xffU
#define V_PL_VFID(x) ((x) << S_PL_VFID)
#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
#define S_PL_ADDR 0
#define M_PL_ADDR 0xfffffU
#define V_PL_ADDR(x) ((x) << S_PL_ADDR)
#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
#define A_PL_INDIR_DATA 0x1fc
static uint64_t
read_vf_stat(struct adapter *sc, u_int vin, int reg)
{
u32 stats[2];
if (sc->flags & IS_VF) {
stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
} else {
mtx_assert(&sc->reg_lock, MA_OWNED);
t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
}
return (((uint64_t)stats[1]) << 32 | stats[0]);
}
static void
t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
{
#define GET_STAT(name) \
read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
if (!(sc->flags & IS_VF))
mtx_lock(&sc->reg_lock);
stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
if (!(sc->flags & IS_VF))
mtx_unlock(&sc->reg_lock);
#undef GET_STAT
}
static void
t4_clr_vi_stats(struct adapter *sc, u_int vin)
{
int reg;
t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) |
V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
t4_write_reg(sc, A_PL_INDIR_DATA, 0);
}
static void
vi_refresh_stats(struct vi_info *vi)
{
struct timeval tv;
const struct timeval interval = {0, 250000}; /* 250ms */
mtx_assert(&vi->tick_mtx, MA_OWNED);
if (vi->flags & VI_SKIP_STATS)
return;
getmicrotime(&tv);
timevalsub(&tv, &interval);
if (timevalcmp(&tv, &vi->last_refreshed, <))
return;
t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats);
getmicrotime(&vi->last_refreshed);
}
static void
cxgbe_refresh_stats(struct vi_info *vi)
{
u_int i, v, tnl_cong_drops, chan_map;
struct timeval tv;
const struct timeval interval = {0, 250000}; /* 250ms */
struct port_info *pi;
struct adapter *sc;
mtx_assert(&vi->tick_mtx, MA_OWNED);
if (vi->flags & VI_SKIP_STATS)
return;
getmicrotime(&tv);
timevalsub(&tv, &interval);
if (timevalcmp(&tv, &vi->last_refreshed, <))
return;
pi = vi->pi;
sc = vi->adapter;
tnl_cong_drops = 0;
t4_get_port_stats(sc, pi->port_id, &pi->stats);
chan_map = pi->rx_e_chan_map;
while (chan_map) {
i = ffs(chan_map) - 1;
mtx_lock(&sc->reg_lock);
t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
A_TP_MIB_TNL_CNG_DROP_0 + i);
mtx_unlock(&sc->reg_lock);
tnl_cong_drops += v;
chan_map &= ~(1 << i);
}
pi->tnl_cong_drops = tnl_cong_drops;
getmicrotime(&vi->last_refreshed);
}
static void
cxgbe_tick(void *arg)
{
struct vi_info *vi = arg;
MPASS(IS_MAIN_VI(vi));
mtx_assert(&vi->tick_mtx, MA_OWNED);
cxgbe_refresh_stats(vi);
callout_schedule(&vi->tick, hz);
}
static void
vi_tick(void *arg)
{
struct vi_info *vi = arg;
mtx_assert(&vi->tick_mtx, MA_OWNED);
vi_refresh_stats(vi);
callout_schedule(&vi->tick, hz);
}
/*
* Should match fw_caps_config_<foo> enums in t4fw_interface.h
*/
static char *caps_decoder[] = {
"\20\001IPMI\002NCSI", /* 0: NBM */
"\20\001PPP\002QFC\003DCBX", /* 1: link */
"\20\001INGRESS\002EGRESS", /* 2: switch */
"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
"\006HASHFILTER\007ETHOFLD",
"\20\001TOE", /* 4: TOE */
"\20\001RDDP\002RDMAC", /* 5: RDMA */
"\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
"\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
"\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
"\007T10DIF"
"\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
"\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
"\004TLS_HW",
"\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
"\004PO_INITIATOR\005PO_TARGET",
};
void
t4_sysctls(struct adapter *sc)
{
struct sysctl_ctx_list *ctx = &sc->ctx;
struct sysctl_oid *oid;
struct sysctl_oid_list *children, *c0;
static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
/*
* dev.t4nex.X.
*/
oid = device_get_sysctl_tree(sc->dev);
c0 = children = SYSCTL_CHILDREN(oid);
sc->sc_do_rxcopy = 1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
&sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
sc->params.nports, "# of ports");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells,
(uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A",
"available doorbells");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
sc->params.vpd.cclk, "core clock frequency (in KHz)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val),
sysctl_int_array, "A", "interrupt holdoff timer values (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val),
sysctl_int_array, "A", "interrupt holdoff packet counter values");
t4_sge_sysctls(sc, ctx, children);
sc->lro_timeout = 100;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
&sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
&sc->debug_flags, 0, "flags to enable runtime debugging");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
CTLFLAG_RD, sc->fw_version, 0, "firmware version");
if (sc->flags & IS_VF)
return;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
NULL, chip_rev(sc), "chip hardware revision");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
sc->er_version, 0, "expansion ROM version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
sc->bs_version, 0, "bootstrap firmware version");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
NULL, sc->params.scfg_vers, "serial config version");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
NULL, sc->params.vpd_vers, "VPD version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
sc->cfcsum, "config file checksum");
#define SYSCTL_CAP(name, n, text) \
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \
(uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \
"available " text " capabilities")
SYSCTL_CAP(nbmcaps, 0, "NBM");
SYSCTL_CAP(linkcaps, 1, "link");
SYSCTL_CAP(switchcaps, 2, "switch");
SYSCTL_CAP(niccaps, 3, "NIC");
SYSCTL_CAP(toecaps, 4, "TCP offload");
SYSCTL_CAP(rdmacaps, 5, "RDMA");
SYSCTL_CAP(iscsicaps, 6, "iSCSI");
SYSCTL_CAP(cryptocaps, 7, "crypto");
SYSCTL_CAP(fcoecaps, 8, "FCoE");
#undef SYSCTL_CAP
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
NULL, sc->tids.nftids, "number of filters");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_temperature, "I", "chip temperature (in Celsius)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_loadavg, "A",
"microprocessor load averages (debug firmwares only)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
"I", "core Vdd (in mV)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS,
sysctl_cpus, "A", "local CPUs");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS,
sysctl_cpus, "A", "preferred CPUs for interrupts");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
&sc->swintr, 0, "software triggered interrupts");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset",
CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I",
"1 = reset adapter, 0 = zero reset counter");
/*
* dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
*/
oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
"logs and miscellaneous information");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cctrl, "A", "congestion control");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cim_la, "A", "CIM logic analyzer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
if (chip_id(sc) > CHELSIO_T4) {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
"CIM OBQ 6 (SGE0-RX)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
"CIM OBQ 7 (SGE1-RX)");
}
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cim_qcfg, "A", "CIM queue configuration");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cpl_stats, "A", "CPL statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_ddp_stats, "A", "non-TCP DDP statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tid_stats, "A", "tid stats");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_devlog, "A", "firmware's device log");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_fcoe_stats, "A", "FCoE statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_hw_sched, "A", "hardware scheduler ");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_l2t, "A", "hardware L2 table");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_smt, "A", "hardware source MAC table");
#ifdef INET6
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_clip, "A", "active CLIP table entries");
#endif
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_lb_stats, "A", "loopback statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_meminfo, "A", "memory regions");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
"A", "MPS TCAM entries");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_path_mtus, "A", "path MTUs");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_pm_stats, "A", "PM statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_rdma_stats, "A", "RDMA statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tcp_stats, "A", "TCP statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tids, "A", "TID information");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tp_err_stats, "A", "TP error statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tnl_stats, "A", "TP tunnel statistics");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tp_la, "A", "TP logic analyzer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tx_rate, "A", "Tx rate");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_ulprx_la, "A", "ULPRX logic analyzer");
if (chip_id(sc) >= CHELSIO_T5) {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_wcwr_stats, "A", "write combined work requests");
}
#ifdef KERN_TLS
if (is_ktls(sc)) {
/*
* dev.t4nex.0.tls.
*/
oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys",
CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS "
"keys in work requests (1) or attempt to store TLS keys "
"in card memory.");
if (is_t6(sc))
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs",
CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to "
"combine TCB field updates with TLS record work "
"requests.");
}
#endif
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
int i;
char s[4];
/*
* dev.t4nex.X.toe.
*/
oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters");
children = SYSCTL_CHILDREN(oid);
sc->tt.cong_algorithm = -1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
"(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
"3 = highspeed)");
sc->tt.sndbuf = -1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
&sc->tt.sndbuf, 0, "hardware send buffer");
sc->tt.ddp = 0;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp",
CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, "");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW,
&sc->tt.ddp, 0, "Enable zero-copy aio_read(2)");
sc->tt.rx_coalesce = -1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
sc->tt.tls = 0;
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT |
CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I",
"Inline TLS allowed");
sc->tt.tx_align = -1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
sc->tt.tx_zcopy = 0;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
"Enable zero-copy aio_write(2)");
sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"cop_managed_offloading", CTLFLAG_RW,
&sc->tt.cop_managed_offloading, 0,
"COP (Connection Offload Policy) controls all TOE offload");
sc->tt.autorcvbuf_inc = 16 * 1024;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc",
CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0,
"autorcvbuf increment");
sc->tt.update_hc_on_pmtu_change = 1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"update_hc_on_pmtu_change", CTLFLAG_RW,
&sc->tt.update_hc_on_pmtu_change, 0,
"Update hostcache entry if the PMTU changes");
sc->tt.iso = 1;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "iso", CTLFLAG_RW,
&sc->tt.iso, 0, "Enable iSCSI segmentation offload");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tp_tick, "A", "TP timer tick (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
sysctl_tp_tick, "A", "TCP timestamp tick (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
sysctl_tp_tick, "A", "DACK tick (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_tp_dack_timer, "IU", "DACK timer (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_RXT_MIN, sysctl_tp_timer, "LU",
"Minimum retransmit interval (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_RXT_MAX, sysctl_tp_timer, "LU",
"Maximum retransmit interval (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_PERS_MIN, sysctl_tp_timer, "LU",
"Persist timer min (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_PERS_MAX, sysctl_tp_timer, "LU",
"Persist timer max (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_KEEP_IDLE, sysctl_tp_timer, "LU",
"Keepalive idle timer (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_KEEP_INTVL, sysctl_tp_timer, "LU",
"Keepalive interval timer (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU",
"FINWAIT2 timer (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU",
"Number of SYN retransmissions before abort");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU",
"Number of retransmissions before abort");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU",
"Number of keepalive probes before abort");
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"TOE retransmit backoffs");
children = SYSCTL_CHILDREN(oid);
for (i = 0; i < 16; i++) {
snprintf(s, sizeof(s), "%u", i);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
i, sysctl_tp_backoff, "IU",
"TOE retransmit backoff");
}
}
#endif
}
void
vi_sysctls(struct vi_info *vi)
{
struct sysctl_ctx_list *ctx = &vi->ctx;
struct sysctl_oid *oid;
struct sysctl_oid_list *children;
/*
* dev.v?(cxgbe|cxl).X.
*/
oid = device_get_sysctl_tree(vi->dev);
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
vi->viid, "VI identifer");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
&vi->nrxq, 0, "# of rx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
&vi->ntxq, 0, "# of tx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
&vi->first_rxq, 0, "index of first rx queue");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
&vi->first_txq, 0, "index of first tx queue");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL,
vi->rss_base, "start of RSS indirection table");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
vi->rss_size, "size of RSS indirection table");
if (IS_MAIN_VI(vi)) {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_noflowq, "IU",
"Reserve queue 0 for non-flowid packets");
}
if (vi->adapter->flags & IS_VF) {
MPASS(vi->flags & TX_USES_VM_WR);
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD,
NULL, 1, "use VM work requests for transmit");
} else {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_tx_vm_wr, "I", "use VM work requestes for transmit");
}
#ifdef TCP_OFFLOAD
if (vi->nofldrxq != 0) {
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
&vi->nofldrxq, 0,
"# of rx queues for offloaded TCP connections");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
CTLFLAG_RD, &vi->first_ofld_rxq, 0,
"index of first TOE rx queue");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_holdoff_tmr_idx_ofld, "I",
"holdoff timer index for TOE queues");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_holdoff_pktc_idx_ofld, "I",
"holdoff packet counter index for TOE queues");
}
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
if (vi->nofldtxq != 0) {
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
&vi->nofldtxq, 0,
"# of tx queues for TOE/ETHOFLD");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
CTLFLAG_RD, &vi->first_ofld_txq, 0,
"index of first TOE/ETHOFLD tx queue");
}
#endif
#ifdef DEV_NETMAP
if (vi->nnmrxq != 0) {
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
&vi->nnmrxq, 0, "# of netmap rx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
&vi->nnmtxq, 0, "# of netmap tx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
CTLFLAG_RD, &vi->first_nm_rxq, 0,
"index of first netmap rx queue");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
CTLFLAG_RD, &vi->first_nm_txq, 0,
"index of first netmap tx queue");
}
#endif
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_holdoff_tmr_idx, "I", "holdoff timer index");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_qsize_rxq, "I", "rx queue size");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
sysctl_qsize_txq, "I", "tx queue size");
}
static void
cxgbe_sysctls(struct port_info *pi)
{
struct sysctl_ctx_list *ctx = &pi->ctx;
struct sysctl_oid *oid;
struct sysctl_oid_list *children, *children2;
struct adapter *sc = pi->adapter;
int i;
char name[16];
static char *tc_flags = {"\20\1USER"};
/*
* dev.cxgbe.X.
*/
oid = device_get_sysctl_tree(pi->dev);
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
sysctl_linkdnrc, "A", "reason why link is down");
if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
sysctl_btphy, "I", "PHY temperature (in Celsius)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1,
sysctl_btphy, "I", "PHY firmware version");
}
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
sysctl_pause_settings, "A",
"PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_fec",
CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_link_fec, "A",
"FEC in use on the link");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "requested_fec",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
sysctl_requested_fec, "A",
"FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec",
CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A",
"FEC recommended by the cable/transceiver");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
sysctl_autoneg, "I",
"autonegotiation (-1 = not supported)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "force_fec",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
sysctl_force_fec, "I", "when to use FORCE_FEC bit for link config");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rcaps", CTLFLAG_RD,
&pi->link_cfg.requested_caps, 0, "L1 config requested by driver");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD,
&pi->link_cfg.pcaps, 0, "port capabilities");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD,
&pi->link_cfg.acaps, 0, "advertised capabilities");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD,
&pi->link_cfg.lpacaps, 0, "link partner advertised capabilities");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
port_top_speed(pi), "max speed (in Gbps)");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
pi->mps_bg_map, "MPS buffer group map");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
NULL, pi->rx_e_chan_map, "TP rx e-channel map");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_chan", CTLFLAG_RD, NULL,
pi->tx_chan, "TP tx c-channel");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_chan", CTLFLAG_RD, NULL,
pi->rx_chan, "TP rx c-channel");
if (sc->flags & IS_VF)
return;
/*
* dev.(cxgbe|cxl).X.tc.
*/
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"Tx scheduler traffic classes (cl_rl)");
children2 = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
CTLFLAG_RW, &pi->sched_params->pktsize, 0,
"pktsize for per-flow cl-rl (0 means up to the driver )");
SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
CTLFLAG_RW, &pi->sched_params->burstsize, 0,
"burstsize for per-flow cl-rl (0 means up to the driver)");
for (i = 0; i < sc->params.nsched_cls; i++) {
struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
snprintf(name, sizeof(name), "%d", i);
children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
SYSCTL_CHILDREN(oid), OID_AUTO, name,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class"));
SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "state",
CTLFLAG_RD, &tc->state, 0, "current state");
SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags,
(uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags");
SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
CTLFLAG_RD, &tc->refcount, 0, "references to this class");
SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
(pi->port_id << 16) | i, sysctl_tc_params, "A",
"traffic class parameters");
}
/*
* dev.cxgbe.X.stats.
*/
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
&pi->tx_parse_error, 0,
"# of tx packets with invalid length or # of segments");
#define T4_REGSTAT(name, stat, desc) \
SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
sysctl_handle_t4_reg64, "QU", desc)
/* We get these from port_stats and they may be stale by up to 1s */
#define T4_PORTSTAT(name, desc) \
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
&pi->stats.name, desc)
T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
if (is_t6(sc)) {
T4_PORTSTAT(rx_fcs_err,
"# of frames received with bad FCS since last link up");
} else {
T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
"# of frames received with bad FCS");
}
T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
#undef T4_REGSTAT
#undef T4_PORTSTAT
}
static int
sysctl_int_array(SYSCTL_HANDLER_ARGS)
{
int rc, *i, space = 0;
struct sbuf sb;
sbuf_new_for_sysctl(&sb, NULL, 64, req);
for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
if (space)
sbuf_printf(&sb, " ");
sbuf_printf(&sb, "%d", *i);
space = 1;
}
rc = sbuf_finish(&sb);
sbuf_delete(&sb);
return (rc);
}
static int
sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
{
int rc;
struct sbuf *sb;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
{
int rc;
struct sbuf *sb;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_btphy(SYSCTL_HANDLER_ARGS)
{
struct port_info *pi = arg1;
int op = arg2;
struct adapter *sc = pi->adapter;
u_int v;
int rc;
rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else {
/* XXX: magic numbers */
rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e,
op ? 0x20 : 0xc820, &v);
}
end_synchronized_op(sc, 0);
if (rc)
return (rc);
if (op == 0)
v /= 256;
rc = sysctl_handle_int(oidp, &v, 0, req);
return (rc);
}
static int
sysctl_noflowq(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
int rc, val;
val = vi->rsrv_noflowq;
rc = sysctl_handle_int(oidp, &val, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if ((val >= 1) && (vi->ntxq > 1))
vi->rsrv_noflowq = 1;
else
vi->rsrv_noflowq = 0;
return (rc);
}
static int
sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
struct adapter *sc = vi->adapter;
int rc, val, i;
MPASS(!(sc->flags & IS_VF));
val = vi->flags & TX_USES_VM_WR ? 1 : 0;
rc = sysctl_handle_int(oidp, &val, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (val != 0 && val != 1)
return (EINVAL);
rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4txvm");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) {
/*
* We don't want parse_pkt to run with one setting (VF or PF)
* and then eth_tx to see a different setting but still use
* stale information calculated by parse_pkt.
*/
rc = EBUSY;
} else {
struct port_info *pi = vi->pi;
struct sge_txq *txq;
uint32_t ctrl0;
uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr;
if (val) {
vi->flags |= TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
V_TXPKT_INTF(pi->tx_chan));
if (!(sc->flags & IS_VF))
npkt--;
} else {
vi->flags &= ~TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
}
for_each_txq(vi, i, txq) {
txq->cpl_ctrl0 = ctrl0;
txq->txp.max_npkt = npkt;
}
}
end_synchronized_op(sc, LOCK_HELD);
return (rc);
}
static int
sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
struct adapter *sc = vi->adapter;
int idx, rc, i;
struct sge_rxq *rxq;
uint8_t v;
idx = vi->tmr_idx;
rc = sysctl_handle_int(oidp, &idx, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (idx < 0 || idx >= SGE_NTIMERS)
return (EINVAL);
rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4tmr");
if (rc)
return (rc);
v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
for_each_rxq(vi, i, rxq) {
#ifdef atomic_store_rel_8
atomic_store_rel_8(&rxq->iq.intr_params, v);
#else
rxq->iq.intr_params = v;
#endif
}
vi->tmr_idx = idx;
end_synchronized_op(sc, LOCK_HELD);
return (0);
}
static int
sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
struct adapter *sc = vi->adapter;
int idx, rc;
idx = vi->pktc_idx;
rc = sysctl_handle_int(oidp, &idx, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (idx < -1 || idx >= SGE_NCOUNTERS)
return (EINVAL);
rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4pktc");
if (rc)
return (rc);
if (vi->flags & VI_INIT_DONE)
rc = EBUSY; /* cannot be changed once the queues are created */
else
vi->pktc_idx = idx;
end_synchronized_op(sc, LOCK_HELD);
return (rc);
}
static int
sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
struct adapter *sc = vi->adapter;
int qsize, rc;
qsize = vi->qsize_rxq;
rc = sysctl_handle_int(oidp, &qsize, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (qsize < 128 || (qsize & 7))
return (EINVAL);
rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4rxqs");
if (rc)
return (rc);
if (vi->flags & VI_INIT_DONE)
rc = EBUSY; /* cannot be changed once the queues are created */
else
vi->qsize_rxq = qsize;
end_synchronized_op(sc, LOCK_HELD);
return (rc);
}
static int
sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
struct adapter *sc = vi->adapter;
int qsize, rc;
qsize = vi->qsize_txq;
rc = sysctl_handle_int(oidp, &qsize, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (qsize < 128 || qsize > 65536)
return (EINVAL);
rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4txqs");
if (rc)
return (rc);
if (vi->flags & VI_INIT_DONE)
rc = EBUSY; /* cannot be changed once the queues are created */
else
vi->qsize_txq = qsize;
end_synchronized_op(sc, LOCK_HELD);
return (rc);
}
static int
sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
{
struct port_info *pi = arg1;
struct adapter *sc = pi->adapter;
struct link_config *lc = &pi->link_cfg;
int rc;
if (req->newptr == NULL) {
struct sbuf *sb;
static char *bits = "\20\1RX\2TX\3AUTO";
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
if (lc->link_ok) {
sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
(lc->requested_fc & PAUSE_AUTONEG), bits);
} else {
sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
PAUSE_RX | PAUSE_AUTONEG), bits);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
} else {
char s[2];
int n;
s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
PAUSE_AUTONEG));
s[1] = 0;
rc = sysctl_handle_string(oidp, s, sizeof(s), req);
if (rc != 0)
return(rc);
if (s[1] != 0)
return (EINVAL);
if (s[0] < '0' || s[0] > '9')
return (EINVAL); /* not a number */
n = s[0] - '0';
if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
return (EINVAL); /* some other bit is set too */
rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
"t4PAUSE");
if (rc)
return (rc);
if (!hw_off_limits(sc)) {
PORT_LOCK(pi);
lc->requested_fc = n;
fixup_link_config(pi);
if (pi->up_vis > 0)
rc = apply_link_config(pi);
set_current_media(pi);
PORT_UNLOCK(pi);
}
end_synchronized_op(sc, 0);
}
return (rc);
}
static int
sysctl_link_fec(SYSCTL_HANDLER_ARGS)
{
struct port_info *pi = arg1;
struct link_config *lc = &pi->link_cfg;
int rc;
struct sbuf *sb;
static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
if (lc->link_ok)
sbuf_printf(sb, "%b", lc->fec, bits);
else
sbuf_printf(sb, "no link");
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
{
struct port_info *pi = arg1;
struct adapter *sc = pi->adapter;
struct link_config *lc = &pi->link_cfg;
int rc;
int8_t old;
if (req->newptr == NULL) {
struct sbuf *sb;
static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
"\5RSVD3\6auto\7module";
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, "%b", lc->requested_fec, bits);
rc = sbuf_finish(sb);
sbuf_delete(sb);
} else {
char s[8];
int n;
snprintf(s, sizeof(s), "%d",
lc->requested_fec == FEC_AUTO ? -1 :
lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE));
rc = sysctl_handle_string(oidp, s, sizeof(s), req);
if (rc != 0)
return(rc);
n = strtol(&s[0], NULL, 0);
if (n < 0 || n & FEC_AUTO)
n = FEC_AUTO;
else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE))
return (EINVAL);/* some other bit is set too */
rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
"t4reqf");
if (rc)
return (rc);
PORT_LOCK(pi);
old = lc->requested_fec;
if (n == FEC_AUTO)
lc->requested_fec = FEC_AUTO;
else if (n == 0 || n == FEC_NONE)
lc->requested_fec = FEC_NONE;
else {
if ((lc->pcaps |
V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) !=
lc->pcaps) {
rc = ENOTSUP;
goto done;
}
lc->requested_fec = n & (M_FW_PORT_CAP32_FEC |
FEC_MODULE);
}
if (!hw_off_limits(sc)) {
fixup_link_config(pi);
if (pi->up_vis > 0) {
rc = apply_link_config(pi);
if (rc != 0) {
lc->requested_fec = old;
if (rc == FW_EPROTO)
rc = ENOTSUP;
}
}
}
done:
PORT_UNLOCK(pi);
end_synchronized_op(sc, 0);
}
return (rc);
}
static int
sysctl_module_fec(SYSCTL_HANDLER_ARGS)
{
struct port_info *pi = arg1;
struct adapter *sc = pi->adapter;
struct link_config *lc = &pi->link_cfg;
int rc;
int8_t fec;
struct sbuf *sb;
static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) {
rc = EBUSY;
goto done;
}
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
PORT_LOCK(pi);
if (pi->up_vis == 0) {
/*
* If all the interfaces are administratively down the firmware
* does not report transceiver changes. Refresh port info here.
* This is the only reason we have a synchronized op in this
* function. Just PORT_LOCK would have been enough otherwise.
*/
t4_update_port_info(pi);
}
fec = lc->fec_hint;
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE ||
!fec_supported(lc->pcaps)) {
sbuf_printf(sb, "n/a");
} else {
if (fec == 0)
fec = FEC_NONE;
sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
}
rc = sbuf_finish(sb);
PORT_UNLOCK(pi);
done:
sbuf_delete(sb);
end_synchronized_op(sc, 0);
return (rc);
}
static int
sysctl_autoneg(SYSCTL_HANDLER_ARGS)
{
struct port_info *pi = arg1;
struct adapter *sc = pi->adapter;
struct link_config *lc = &pi->link_cfg;
int rc, val;
if (lc->pcaps & FW_PORT_CAP32_ANEG)
val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
else
val = -1;
rc = sysctl_handle_int(oidp, &val, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (val == 0)
val = AUTONEG_DISABLE;
else if (val == 1)
val = AUTONEG_ENABLE;
else
val = AUTONEG_AUTO;
rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
"t4aneg");
if (rc)
return (rc);
PORT_LOCK(pi);
if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
rc = ENOTSUP;
goto done;
}
lc->requested_aneg = val;
if (!hw_off_limits(sc)) {
fixup_link_config(pi);
if (pi->up_vis > 0)
rc = apply_link_config(pi);
set_current_media(pi);
}
done:
PORT_UNLOCK(pi);
end_synchronized_op(sc, 0);
return (rc);
}
static int
sysctl_force_fec(SYSCTL_HANDLER_ARGS)
{
struct port_info *pi = arg1;
struct adapter *sc = pi->adapter;
struct link_config *lc = &pi->link_cfg;
int rc, val;
val = lc->force_fec;
MPASS(val >= -1 && val <= 1);
rc = sysctl_handle_int(oidp, &val, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC))
return (ENOTSUP);
if (val < -1 || val > 1)
return (EINVAL);
rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff");
if (rc)
return (rc);
PORT_LOCK(pi);
lc->force_fec = val;
if (!hw_off_limits(sc)) {
fixup_link_config(pi);
if (pi->up_vis > 0)
rc = apply_link_config(pi);
}
PORT_UNLOCK(pi);
end_synchronized_op(sc, 0);
return (rc);
}
static int
sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc, reg = arg2;
uint64_t val;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
rc = 0;
val = t4_read_reg64(sc, reg);
}
mtx_unlock(&sc->reg_lock);
if (rc == 0)
rc = sysctl_handle_64(oidp, &val, 0, req);
return (rc);
}
static int
sysctl_temperature(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc, t;
uint32_t param, val;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
}
end_synchronized_op(sc, 0);
if (rc)
return (rc);
/* unknown is returned as 0 but we display -1 in that case */
t = val == 0 ? -1 : val;
rc = sysctl_handle_int(oidp, &t, 0, req);
return (rc);
}
static int
sysctl_vdd(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc;
uint32_t param, val;
if (sc->params.core_vdd == 0) {
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
"t4vdd");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1,
&param, &val);
}
end_synchronized_op(sc, 0);
if (rc)
return (rc);
sc->params.core_vdd = val;
}
return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req));
}
static int
sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc, v;
uint32_t param, val;
v = sc->sensor_resets;
rc = sysctl_handle_int(oidp, &v, 0, req);
if (rc != 0 || req->newptr == NULL || v <= 0)
return (rc);
if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) ||
chip_id(sc) < CHELSIO_T5)
return (ENOTSUP);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else {
param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR));
val = 1;
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
}
end_synchronized_op(sc, 0);
if (rc == 0)
sc->sensor_resets++;
return (rc);
}
static int
sysctl_loadavg(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
uint32_t param, val;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
}
end_synchronized_op(sc, 0);
if (rc)
return (rc);
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
if (val == 0xffffffff) {
/* Only debug and custom firmwares report load averages. */
sbuf_printf(sb, "not available");
} else {
sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
(val >> 16) & 0xff);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_cctrl(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i;
uint16_t incr[NMTUS][NCCTRL_WIN];
static const char *dec_fac[] = {
"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
"0.9375"
};
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_read_cong_tbl(sc, incr);
mtx_unlock(&sc->reg_lock);
if (rc)
goto done;
for (i = 0; i < NCCTRL_WIN; ++i) {
sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
incr[5][i], incr[6][i], incr[7][i]);
sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
incr[8][i], incr[9][i], incr[10][i], incr[11][i],
incr[12][i], incr[13][i], incr[14][i], incr[15][i],
sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
}
rc = sbuf_finish(sb);
done:
sbuf_delete(sb);
return (rc);
}
static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
"SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
};
static int
sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i, n, qid = arg2;
uint32_t *buf, *p;
char *qtype;
u_int cim_num_obq = sc->chip_params->cim_num_obq;
KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
("%s: bad qid %d\n", __func__, qid));
if (qid < CIM_NUM_IBQ) {
/* inbound queue */
qtype = "IBQ";
n = 4 * CIM_IBQ_SIZE;
buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = -ENXIO;
else
rc = t4_read_cim_ibq(sc, qid, buf, n);
mtx_unlock(&sc->reg_lock);
} else {
/* outbound queue */
qtype = "OBQ";
qid -= CIM_NUM_IBQ;
n = 4 * cim_num_obq * CIM_OBQ_SIZE;
buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = -ENXIO;
else
rc = t4_read_cim_obq(sc, qid, buf, n);
mtx_unlock(&sc->reg_lock);
}
if (rc < 0) {
rc = -rc;
goto done;
}
n = rc * sizeof(uint32_t); /* rc has # of words actually read */
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
goto done;
sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
if (sb == NULL) {
rc = ENOMEM;
goto done;
}
sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
for (i = 0, p = buf; i < n; i += 16, p += 4)
sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
p[2], p[3]);
rc = sbuf_finish(sb);
sbuf_delete(sb);
done:
free(buf, M_CXGBE);
return (rc);
}
static void
sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
{
uint32_t *p;
sbuf_printf(sb, "Status Data PC%s",
cfg & F_UPDBGLACAPTPCONLY ? "" :
" LS0Stat LS0Addr LS0Data");
for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
if (cfg & F_UPDBGLACAPTPCONLY) {
sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
p[6], p[7]);
sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
(p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
p[4] & 0xff, p[5] >> 8);
sbuf_printf(sb, "\n %02x %x%07x %x%07x",
(p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
p[1] & 0xf, p[2] >> 4);
} else {
sbuf_printf(sb,
"\n %02x %x%07x %x%07x %08x %08x "
"%08x%08x%08x%08x",
(p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
p[6], p[7]);
}
}
}
static void
sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
{
uint32_t *p;
sbuf_printf(sb, "Status Inst Data PC%s",
cfg & F_UPDBGLACAPTPCONLY ? "" :
" LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
if (cfg & F_UPDBGLACAPTPCONLY) {
sbuf_printf(sb, "\n %02x %08x %08x %08x",
p[3] & 0xff, p[2], p[1], p[0]);
sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
(p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
(p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
p[6] >> 16);
} else {
sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
"%08x %08x %08x %08x %08x %08x",
(p[9] >> 16) & 0xff,
p[9] & 0xffff, p[8] >> 16,
p[8] & 0xffff, p[7] >> 16,
p[7] & 0xffff, p[6] >> 16,
p[2], p[1], p[0], p[5], p[4], p[3]);
}
}
}
static int
sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
{
uint32_t cfg, *buf;
int rc;
MPASS(flags == M_WAITOK || flags == M_NOWAIT);
buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
M_ZERO | flags);
if (buf == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
if (rc == 0)
rc = -t4_cim_read_la(sc, buf, NULL);
}
mtx_unlock(&sc->reg_lock);
if (rc == 0) {
if (chip_id(sc) < CHELSIO_T6)
sbuf_cim_la4(sc, sb, buf, cfg);
else
sbuf_cim_la6(sc, sb, buf, cfg);
}
free(buf, M_CXGBE);
return (rc);
}
static int
sysctl_cim_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
rc = sbuf_cim_la(sc, sb, M_WAITOK);
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static void
dump_cim_regs(struct adapter *sc)
{
log(LOG_DEBUG, "%s: CIM debug regs1 %08x %08x %08x %08x %08x\n",
device_get_nameunit(sc->dev),
t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0),
t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1),
t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA2),
t4_read_reg(sc, A_EDC_H_BIST_DATA_PATTERN),
t4_read_reg(sc, A_EDC_H_BIST_STATUS_RDATA));
log(LOG_DEBUG, "%s: CIM debug regs2 %08x %08x %08x %08x %08x\n",
device_get_nameunit(sc->dev),
t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0),
t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1),
t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0 + 0x800),
t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1 + 0x800),
t4_read_reg(sc, A_EDC_H_BIST_CMD_LEN));
}
static void
dump_cimla(struct adapter *sc)
{
struct sbuf sb;
int rc;
if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
log(LOG_DEBUG, "%s: failed to generate CIM LA dump.\n",
device_get_nameunit(sc->dev));
return;
}
rc = sbuf_cim_la(sc, &sb, M_WAITOK);
if (rc == 0) {
rc = sbuf_finish(&sb);
if (rc == 0) {
log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s\n",
device_get_nameunit(sc->dev), sbuf_data(&sb));
}
}
sbuf_delete(&sb);
}
void
t4_os_cim_err(struct adapter *sc)
{
atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
}
static int
sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
u_int i;
struct sbuf *sb;
uint32_t *buf, *p;
int rc;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
mtx_unlock(&sc->reg_lock);
if (rc)
goto done;
p = buf;
for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
p[1], p[0]);
}
sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
(p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
(p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
(p[1] >> 2) | ((p[2] & 3) << 30),
(p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
p[0] & 1);
}
rc = sbuf_finish(sb);
done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
}
static int
sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
u_int i;
struct sbuf *sb;
uint32_t *buf, *p;
int rc;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
mtx_unlock(&sc->reg_lock);
if (rc)
goto done;
p = buf;
sbuf_printf(sb, "Cntl ID DataBE Addr Data");
for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
(p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
p[4], p[3], p[2], p[1], p[0]);
}
sbuf_printf(sb, "\n\nCntl ID Data");
for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
(p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
}
rc = sbuf_finish(sb);
done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
}
static int
sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i;
uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
uint16_t thres[CIM_NUM_IBQ];
uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
cim_num_obq = sc->chip_params->cim_num_obq;
if (is_t4(sc)) {
ibq_rdaddr = A_UP_IBQ_0_RDADDR;
obq_rdaddr = A_UP_OBQ_0_REALADDR;
} else {
ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
}
nq = CIM_NUM_IBQ + cim_num_obq;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
if (rc == 0) {
rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq,
obq_wr);
if (rc == 0)
t4_read_cimq_cfg(sc, base, size, thres);
}
}
mtx_unlock(&sc->reg_lock);
if (rc)
return (rc);
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb,
" Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
G_QUEREMFLITS(p[2]) * 16);
for ( ; i < nq; i++, p += 4, wr += 2)
sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
G_QUEREMFLITS(p[2]) * 16);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_cpl_stats stats;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_tp_get_cpl_stats(sc, &stats, 0);
mtx_unlock(&sc->reg_lock);
if (rc)
goto done;
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3");
sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
} else {
sbuf_printf(sb, " channel 0 channel 1");
sbuf_printf(sb, "\nCPL requests: %10u %10u",
stats.req[0], stats.req[1]);
sbuf_printf(sb, "\nCPL responses: %10u %10u",
stats.rsp[0], stats.rsp[1]);
}
rc = sbuf_finish(sb);
done:
sbuf_delete(sb);
return (rc);
}
static int
sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_usm_stats stats;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_get_usm_stats(sc, &stats, 1);
mtx_unlock(&sc->reg_lock);
if (rc == 0) {
sbuf_printf(sb, "Frames: %u\n", stats.frames);
sbuf_printf(sb, "Octets: %ju\n", stats.octets);
sbuf_printf(sb, "Drops: %u", stats.drops);
rc = sbuf_finish(sb);
}
sbuf_delete(sb);
return (rc);
}
static int
sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_tid_stats stats;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_tp_get_tid_stats(sc, &stats, 1);
mtx_unlock(&sc->reg_lock);
if (rc == 0) {
sbuf_printf(sb, "Delete: %u\n", stats.del);
sbuf_printf(sb, "Invalidate: %u\n", stats.inv);
sbuf_printf(sb, "Active: %u\n", stats.act);
sbuf_printf(sb, "Passive: %u", stats.pas);
rc = sbuf_finish(sb);
}
sbuf_delete(sb);
return (rc);
}
static const char * const devlog_level_strings[] = {
[FW_DEVLOG_LEVEL_EMERG] = "EMERG",
[FW_DEVLOG_LEVEL_CRIT] = "CRIT",
[FW_DEVLOG_LEVEL_ERR] = "ERR",
[FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
[FW_DEVLOG_LEVEL_INFO] = "INFO",
[FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
};
static const char * const devlog_facility_strings[] = {
[FW_DEVLOG_FACILITY_CORE] = "CORE",
[FW_DEVLOG_FACILITY_CF] = "CF",
[FW_DEVLOG_FACILITY_SCHED] = "SCHED",
[FW_DEVLOG_FACILITY_TIMER] = "TIMER",
[FW_DEVLOG_FACILITY_RES] = "RES",
[FW_DEVLOG_FACILITY_HW] = "HW",
[FW_DEVLOG_FACILITY_FLR] = "FLR",
[FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
[FW_DEVLOG_FACILITY_PHY] = "PHY",
[FW_DEVLOG_FACILITY_MAC] = "MAC",
[FW_DEVLOG_FACILITY_PORT] = "PORT",
[FW_DEVLOG_FACILITY_VI] = "VI",
[FW_DEVLOG_FACILITY_FILTER] = "FILTER",
[FW_DEVLOG_FACILITY_ACL] = "ACL",
[FW_DEVLOG_FACILITY_TM] = "TM",
[FW_DEVLOG_FACILITY_QFC] = "QFC",
[FW_DEVLOG_FACILITY_DCB] = "DCB",
[FW_DEVLOG_FACILITY_ETH] = "ETH",
[FW_DEVLOG_FACILITY_OFLD] = "OFLD",
[FW_DEVLOG_FACILITY_RI] = "RI",
[FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
[FW_DEVLOG_FACILITY_FCOE] = "FCOE",
[FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
[FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
[FW_DEVLOG_FACILITY_CHNET] = "CHNET",
};
static int
sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
{
int i, j, rc, nentries, first = 0;
struct devlog_params *dparams = &sc->params.devlog;
struct fw_devlog_e *buf, *e;
uint64_t ftstamp = UINT64_MAX;
if (dparams->addr == 0)
return (ENXIO);
MPASS(flags == M_WAITOK || flags == M_NOWAIT);
buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
if (buf == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
dparams->size);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
nentries = dparams->size / sizeof(struct fw_devlog_e);
for (i = 0; i < nentries; i++) {
e = &buf[i];
if (e->timestamp == 0)
break; /* end */
e->timestamp = be64toh(e->timestamp);
e->seqno = be32toh(e->seqno);
for (j = 0; j < 8; j++)
e->params[j] = be32toh(e->params[j]);
if (e->timestamp < ftstamp) {
ftstamp = e->timestamp;
first = i;
}
}
if (buf[first].timestamp == 0)
goto done; /* nothing in the log */
sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
"Seq#", "Tstamp", "Level", "Facility", "Message");
i = first;
do {
e = &buf[i];
if (e->timestamp == 0)
break; /* end */
sbuf_printf(sb, "%10d %15ju %8s %8s ",
e->seqno, e->timestamp,
(e->level < nitems(devlog_level_strings) ?
devlog_level_strings[e->level] : "UNKNOWN"),
(e->facility < nitems(devlog_facility_strings) ?
devlog_facility_strings[e->facility] : "UNKNOWN"));
sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
e->params[2], e->params[3], e->params[4],
e->params[5], e->params[6], e->params[7]);
if (++i == nentries)
i = 0;
} while (i != first);
done:
free(buf, M_CXGBE);
return (rc);
}
static int
sysctl_devlog(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc;
struct sbuf *sb;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
rc = sbuf_devlog(sc, sb, M_WAITOK);
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static void
dump_devlog(struct adapter *sc)
{
int rc;
struct sbuf sb;
if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
log(LOG_DEBUG, "%s: failed to generate devlog dump.\n",
device_get_nameunit(sc->dev));
return;
}
rc = sbuf_devlog(sc, &sb, M_WAITOK);
if (rc == 0) {
rc = sbuf_finish(&sb);
if (rc == 0) {
log(LOG_DEBUG, "%s: device log follows.\n%s",
device_get_nameunit(sc->dev), sbuf_data(&sb));
}
}
sbuf_delete(&sb);
}
static int
sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_fcoe_stats stats[MAX_NCHAN];
int i, nchan = sc->chip_params->nchan;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
for (i = 0; i < nchan; i++)
t4_get_fcoe_stats(sc, i, &stats[i], 1);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
if (nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3");
sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
stats[0].octets_ddp, stats[1].octets_ddp,
stats[2].octets_ddp, stats[3].octets_ddp);
sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
stats[0].frames_ddp, stats[1].frames_ddp,
stats[2].frames_ddp, stats[3].frames_ddp);
sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
stats[0].frames_drop, stats[1].frames_drop,
stats[2].frames_drop, stats[3].frames_drop);
} else {
sbuf_printf(sb, " channel 0 channel 1");
sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
stats[0].octets_ddp, stats[1].octets_ddp);
sbuf_printf(sb, "\nframesDDP: %16u %16u",
stats[0].frames_ddp, stats[1].frames_ddp);
sbuf_printf(sb, "\nframesDrop: %16u %16u",
stats[0].frames_drop, stats[1].frames_drop);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i;
unsigned int map, kbps, ipg, mode;
unsigned int pace_tab[NTX_SCHED];
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 512, req);
if (sb == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
t4_read_pace_tbl(sc, pace_tab);
sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
"Class IPG (0.1 ns) Flow IPG (us)");
for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
sbuf_printf(sb, "\n %u %-5s %u ", i,
(mode & (1 << i)) ? "flow" : "class", map & 3);
if (kbps)
sbuf_printf(sb, "%9u ", kbps);
else
sbuf_printf(sb, " disabled ");
if (ipg)
sbuf_printf(sb, "%13u ", ipg);
else
sbuf_printf(sb, " disabled ");
if (pace_tab[i])
sbuf_printf(sb, "%10u", pace_tab[i]);
else
sbuf_printf(sb, " disabled");
}
rc = sbuf_finish(sb);
done:
mtx_unlock(&sc->reg_lock);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i, j;
uint64_t *p0, *p1;
struct lb_port_stats s[2];
static const char *stat_name[] = {
"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
"Frames128To255:", "Frames256To511:", "Frames512To1023:",
"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
"BG2FramesTrunc:", "BG3FramesTrunc:"
};
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
memset(s, 0, sizeof(s));
for (i = 0; i < sc->chip_params->nchan; i += 2) {
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
t4_get_lb_stats(sc, i, &s[0]);
t4_get_lb_stats(sc, i + 1, &s[1]);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
break;
p0 = &s[0].octets;
p1 = &s[1].octets;
sbuf_printf(sb, "%s Loopback %u"
" Loopback %u", i == 0 ? "" : "\n", i, i + 1);
for (j = 0; j < nitems(stat_name); j++)
sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
*p0++, *p1++);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
{
int rc = 0;
struct port_info *pi = arg1;
struct link_config *lc = &pi->link_cfg;
struct sbuf *sb;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
if (sb == NULL)
return (ENOMEM);
if (lc->link_ok || lc->link_down_rc == 255)
sbuf_printf(sb, "n/a");
else
sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
struct mem_desc {
u_int base;
u_int limit;
u_int idx;
};
static int
mem_desc_cmp(const void *a, const void *b)
{
const u_int v1 = ((const struct mem_desc *)a)->base;
const u_int v2 = ((const struct mem_desc *)b)->base;
if (v1 < v2)
return (-1);
else if (v1 > v2)
return (1);
return (0);
}
static void
mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
unsigned int to)
{
unsigned int size;
if (from == to)
return;
size = to - from + 1;
if (size == 0)
return;
/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
}
static int
sysctl_meminfo(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i, n;
uint32_t lo, hi, used, free, alloc;
static const char *memory[] = {
"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
};
static const char *region[] = {
"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
"TLSKey region:", "DBVFIFO region:", "ULPRX state:",
"ULPTX state:", "On-chip queues:",
};
struct mem_desc avail[4];
struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
struct mem_desc *md = mem;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
for (i = 0; i < nitems(mem); i++) {
mem[i].limit = 0;
mem[i].idx = i;
}
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
/* Find and sort the populated memory ranges */
i = 0;
lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
if (lo & F_EDRAM0_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
avail[i].base = G_EDRAM0_BASE(hi) << 20;
avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
avail[i].idx = 0;
i++;
}
if (lo & F_EDRAM1_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
avail[i].base = G_EDRAM1_BASE(hi) << 20;
avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
avail[i].idx = 1;
i++;
}
if (lo & F_EXT_MEM_ENABLE) {
hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
avail[i].base = G_EXT_MEM_BASE(hi) << 20;
avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
i++;
}
if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
avail[i].idx = 4;
i++;
}
if (is_t6(sc) && lo & F_HMA_MUX) {
hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
avail[i].idx = 5;
i++;
}
MPASS(i <= nitems(avail));
if (!i) /* no memory available */
goto done;
qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
/* the next few have explicit upper bounds */
md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
md->limit = md->base - 1 +
t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
md++;
md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
md->limit = md->base - 1 +
t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
md++;
if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
if (chip_id(sc) <= CHELSIO_T5)
md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
else
md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
md->limit = 0;
} else {
md->base = 0;
md->idx = nitems(region); /* hide it */
}
md++;
#define ulp_region(reg) \
md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
ulp_region(RX_ISCSI);
ulp_region(RX_TDDP);
ulp_region(TX_TPT);
ulp_region(RX_STAG);
ulp_region(RX_RQ);
ulp_region(RX_RQUDP);
ulp_region(RX_PBL);
ulp_region(TX_PBL);
if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
ulp_region(RX_TLS_KEY);
}
#undef ulp_region
md->base = 0;
if (is_t4(sc))
md->idx = nitems(region);
else {
uint32_t size = 0;
uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
if (is_t5(sc)) {
if (sge_ctrl & F_VFIFO_ENABLE)
size = fifo_size << 2;
} else
size = G_T6_DBVFIFO_SIZE(fifo_size) << 6;
if (size) {
md->base = t4_read_reg(sc, A_SGE_DBVFIFO_BADDR);
md->limit = md->base + size - 1;
} else
md->idx = nitems(region);
}
md++;
md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
md->limit = 0;
md++;
md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
md->limit = 0;
md++;
md->base = sc->vres.ocq.start;
if (sc->vres.ocq.size)
md->limit = md->base + sc->vres.ocq.size - 1;
else
md->idx = nitems(region); /* hide it */
md++;
/* add any address-space holes, there can be up to 3 */
for (n = 0; n < i - 1; n++)
if (avail[n].limit < avail[n + 1].base)
(md++)->base = avail[n].limit;
if (avail[n].limit)
(md++)->base = avail[n].limit;
n = md - mem;
MPASS(n <= nitems(mem));
qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
for (lo = 0; lo < i; lo++)
mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
avail[lo].limit - 1);
sbuf_printf(sb, "\n");
for (i = 0; i < n; i++) {
if (mem[i].idx >= nitems(region))
continue; /* skip holes */
if (!mem[i].limit)
mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
mem_region_show(sb, region[mem[i].idx], mem[i].base,
mem[i].limit);
}
sbuf_printf(sb, "\n");
lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
mem_region_show(sb, "uP RAM:", lo, hi);
lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
mem_region_show(sb, "uP Extmem2:", lo, hi);
lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
for (i = 0, free = 0; i < 2; i++)
free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT));
sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
G_PMRXMAXPAGE(lo), free,
t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
(lo & F_PMRXNUMCHN) ? 2 : 1);
lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
for (i = 0, free = 0; i < 4; i++)
free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT));
sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
G_PMTXMAXPAGE(lo), free,
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
sbuf_printf(sb, "%u p-structs (%u free)\n",
t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT),
G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT)));
for (i = 0; i < 4; i++) {
if (chip_id(sc) > CHELSIO_T5)
lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
else
lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
if (is_t5(sc)) {
used = G_T5_USED(lo);
alloc = G_T5_ALLOC(lo);
} else {
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
/* For T6 these are MAC buffer groups */
sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
i, used, alloc);
}
for (i = 0; i < sc->chip_params->nchan; i++) {
if (chip_id(sc) > CHELSIO_T5)
lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
else
lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
if (is_t5(sc)) {
used = G_T5_USED(lo);
alloc = G_T5_ALLOC(lo);
} else {
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
/* For T6 these are MAC buffer groups */
sbuf_printf(sb,
"\nLoopback %d using %u pages out of %u allocated",
i, used, alloc);
}
done:
mtx_unlock(&sc->reg_lock);
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static inline void
tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
{
*mask = x | y;
y = htobe64(y);
memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
}
static int
sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i;
MPASS(chip_id(sc) <= CHELSIO_T5);
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb,
"Idx Ethernet address Mask Vld Ports PF"
" VF Replication P0 P1 P2 P3 ML");
for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
uint64_t tcamx, tcamy, mask;
uint32_t cls_lo, cls_hi;
uint8_t addr[ETHER_ADDR_LEN];
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
break;
if (tcamx & tcamy)
continue;
tcamxy2valmask(tcamx, tcamy, addr, &mask);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
break;
sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
" %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5], (uintmax_t)mask,
(cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
G_PORTMAP(cls_hi), G_PF(cls_lo),
(cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
if (cls_lo & F_REPLICATE) {
struct fw_ldst_cmd ldst_cmd;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
ldst_cmd.op_to_addrspace =
htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ |
V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
ldst_cmd.u.mps.rplc.fid_idx =
htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
V_FW_LDST_CMD_IDX(i));
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
"t4mps");
if (rc)
break;
if (hw_off_limits(sc))
rc = ENXIO;
else
rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
sizeof(ldst_cmd), &ldst_cmd);
end_synchronized_op(sc, 0);
if (rc != 0)
break;
else {
sbuf_printf(sb, " %08x %08x %08x %08x",
be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
}
} else
sbuf_printf(sb, "%36s", "");
sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
}
if (rc)
(void) sbuf_finish(sb);
else
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i;
MPASS(chip_id(sc) > CHELSIO_T5);
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
" IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
" Replication"
" P0 P1 P2 P3 ML\n");
for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
uint8_t dip_hit, vlan_vld, lookup_type, port_num;
uint16_t ivlan;
uint64_t tcamx, tcamy, val, mask;
uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
uint8_t addr[ETHER_ADDR_LEN];
ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
if (i < 256)
ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
else
ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
tcamy = G_DMACH(val) << 32;
tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
break;
lookup_type = G_DATALKPTYPE(data2);
port_num = G_DATAPORTNUM(data2);
if (lookup_type && lookup_type != M_DATALKPTYPE) {
/* Inner header VNI */
vniy = ((data2 & F_DATAVIDH2) << 23) |
(G_DATAVIDH1(data2) << 16) | G_VIDL(val);
dip_hit = data2 & F_DATADIPHIT;
vlan_vld = 0;
} else {
vniy = 0;
dip_hit = 0;
vlan_vld = data2 & F_DATAVIDH2;
ivlan = G_VIDL(val);
}
ctl |= V_CTLXYBITSEL(1);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
tcamx = G_DMACH(val) << 32;
tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
break;
if (lookup_type && lookup_type != M_DATALKPTYPE) {
/* Inner header VNI mask */
vnix = ((data2 & F_DATAVIDH2) << 23) |
(G_DATAVIDH1(data2) << 16) | G_VIDL(val);
} else
vnix = 0;
if (tcamx & tcamy)
continue;
tcamxy2valmask(tcamx, tcamy, addr, &mask);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
break;
if (lookup_type && lookup_type != M_DATALKPTYPE) {
sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
"%012jx %06x %06x - - %3c"
" I %4x %3c %#x%4u%4d", i, addr[0],
addr[1], addr[2], addr[3], addr[4], addr[5],
(uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
} else {
sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
"%012jx - - ", i, addr[0], addr[1],
addr[2], addr[3], addr[4], addr[5],
(uintmax_t)mask);
if (vlan_vld)
sbuf_printf(sb, "%4u Y ", ivlan);
else
sbuf_printf(sb, " - N ");
sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
lookup_type ? 'I' : 'O', port_num,
cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
}
if (cls_lo & F_T6_REPLICATE) {
struct fw_ldst_cmd ldst_cmd;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
ldst_cmd.op_to_addrspace =
htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ |
V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
ldst_cmd.u.mps.rplc.fid_idx =
htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
V_FW_LDST_CMD_IDX(i));
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
"t6mps");
if (rc)
break;
if (hw_off_limits(sc))
rc = ENXIO;
else
rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
sizeof(ldst_cmd), &ldst_cmd);
end_synchronized_op(sc, 0);
if (rc != 0)
break;
else {
sbuf_printf(sb, " %08x %08x %08x %08x"
" %08x %08x %08x %08x",
be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
}
} else
sbuf_printf(sb, "%72s", "");
sbuf_printf(sb, "%4u%3u%3u%3u %#x",
G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
(cls_lo >> S_T6_MULTILISTEN0) & 0xf);
}
if (rc)
(void) sbuf_finish(sb);
else
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
uint16_t mtus[NMTUS];
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_read_mtu_tbl(sc, mtus, NULL);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
mtus[14], mtus[15]);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc, i;
uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
static const char *tx_stats[MAX_PM_NSTATS] = {
"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
"Tx FIFO wait", NULL, "Tx latency"
};
static const char *rx_stats[MAX_PM_NSTATS] = {
"Read:", "Write bypass:", "Write mem:", "Flush:",
"Rx FIFO wait", NULL, "Rx latency"
};
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, " Tx pcmds Tx bytes");
for (i = 0; i < 4; i++) {
sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
tx_cyc[i]);
}
sbuf_printf(sb, "\n Rx pcmds Rx bytes");
for (i = 0; i < 4; i++) {
sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
rx_cyc[i]);
}
if (chip_id(sc) > CHELSIO_T5) {
sbuf_printf(sb,
"\n Total wait Total occupancy");
sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
tx_cyc[i]);
sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
rx_cyc[i]);
i += 2;
MPASS(i < nitems(tx_stats));
sbuf_printf(sb,
"\n Reads Total wait");
sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
tx_cyc[i]);
sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
rx_cyc[i]);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_rdma_stats stats;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_tp_get_rdma_stats(sc, &stats, 0);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_tcp_stats v4, v6;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb,
" IP IPv6\n");
sbuf_printf(sb, "OutRsts: %20u %20u\n",
v4.tcp_out_rsts, v6.tcp_out_rsts);
sbuf_printf(sb, "InSegs: %20ju %20ju\n",
v4.tcp_in_segs, v6.tcp_in_segs);
sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
v4.tcp_out_segs, v6.tcp_out_segs);
sbuf_printf(sb, "RetransSegs: %20ju %20ju",
v4.tcp_retrans_segs, v6.tcp_retrans_segs);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_tids(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
uint32_t x, y;
struct tid_info *t = &sc->tids;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
if (t->natids) {
sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
t->atids_in_use);
}
if (t->nhpftids) {
sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
}
if (t->ntids) {
bool hashen = false;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
hashen = true;
if (chip_id(sc) <= CHELSIO_T5) {
x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
} else {
x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
y = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
}
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
sbuf_printf(sb, "TID range: ");
if (hashen) {
if (x)
sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1);
sbuf_printf(sb, "%u-%u", y, t->ntids - 1);
} else {
sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base +
t->ntids - 1);
}
sbuf_printf(sb, ", in use: %u\n",
atomic_load_acq_int(&t->tids_in_use));
}
if (t->nstids) {
sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
t->stid_base + t->nstids - 1, t->stids_in_use);
}
if (t->nftids) {
sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
t->ftid_end, t->ftids_in_use);
}
if (t->netids) {
sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
t->etid_base + t->netids - 1, t->etids_in_use);
}
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4);
y = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y);
done:
if (rc == 0)
rc = sbuf_finish(sb);
else
(void)sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_err_stats stats;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_tp_get_err_stats(sc, &stats, 0);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3\n");
sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
stats.mac_in_errs[0], stats.mac_in_errs[1],
stats.mac_in_errs[2], stats.mac_in_errs[3]);
sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
stats.hdr_in_errs[0], stats.hdr_in_errs[1],
stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
stats.tcp_in_errs[0], stats.tcp_in_errs[1],
stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
} else {
sbuf_printf(sb, " channel 0 channel 1\n");
sbuf_printf(sb, "macInErrs: %10u %10u\n",
stats.mac_in_errs[0], stats.mac_in_errs[1]);
sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
}
sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
stats.ofld_no_neigh, stats.ofld_cong_defer);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
struct tp_tnl_stats stats;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return(rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_tp_get_tnl_stats(sc, &stats, 1);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3\n");
sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n",
stats.out_pkt[0], stats.out_pkt[1],
stats.out_pkt[2], stats.out_pkt[3]);
sbuf_printf(sb, "InPkts: %10u %10u %10u %10u",
stats.in_pkt[0], stats.in_pkt[1],
stats.in_pkt[2], stats.in_pkt[3]);
} else {
sbuf_printf(sb, " channel 0 channel 1\n");
sbuf_printf(sb, "OutPkts: %10u %10u\n",
stats.out_pkt[0], stats.out_pkt[1]);
sbuf_printf(sb, "InPkts: %10u %10u",
stats.in_pkt[0], stats.in_pkt[1]);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct tp_params *tpp = &sc->params.tp;
u_int mask;
int rc;
mask = tpp->la_mask >> 16;
rc = sysctl_handle_int(oidp, &mask, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (mask > 0xffff)
return (EINVAL);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
tpp->la_mask = mask << 16;
t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U,
tpp->la_mask);
}
mtx_unlock(&sc->reg_lock);
return (rc);
}
struct field_desc {
const char *name;
u_int start;
u_int width;
};
static void
field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
{
char buf[32];
int line_size = 0;
while (f->name) {
uint64_t mask = (1ULL << f->width) - 1;
int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
((uintmax_t)v >> f->start) & mask);
if (line_size + len >= 79) {
line_size = 8;
sbuf_printf(sb, "\n ");
}
sbuf_printf(sb, "%s ", buf);
line_size += len + 1;
f++;
}
sbuf_printf(sb, "\n");
}
static const struct field_desc tp_la0[] = {
{ "RcfOpCodeOut", 60, 4 },
{ "State", 56, 4 },
{ "WcfState", 52, 4 },
{ "RcfOpcSrcOut", 50, 2 },
{ "CRxError", 49, 1 },
{ "ERxError", 48, 1 },
{ "SanityFailed", 47, 1 },
{ "SpuriousMsg", 46, 1 },
{ "FlushInputMsg", 45, 1 },
{ "FlushInputCpl", 44, 1 },
{ "RssUpBit", 43, 1 },
{ "RssFilterHit", 42, 1 },
{ "Tid", 32, 10 },
{ "InitTcb", 31, 1 },
{ "LineNumber", 24, 7 },
{ "Emsg", 23, 1 },
{ "EdataOut", 22, 1 },
{ "Cmsg", 21, 1 },
{ "CdataOut", 20, 1 },
{ "EreadPdu", 19, 1 },
{ "CreadPdu", 18, 1 },
{ "TunnelPkt", 17, 1 },
{ "RcfPeerFin", 16, 1 },
{ "RcfReasonOut", 12, 4 },
{ "TxCchannel", 10, 2 },
{ "RcfTxChannel", 8, 2 },
{ "RxEchannel", 6, 2 },
{ "RcfRxChannel", 5, 1 },
{ "RcfDataOutSrdy", 4, 1 },
{ "RxDvld", 3, 1 },
{ "RxOoDvld", 2, 1 },
{ "RxCongestion", 1, 1 },
{ "TxCongestion", 0, 1 },
{ NULL }
};
static const struct field_desc tp_la1[] = {
{ "CplCmdIn", 56, 8 },
{ "CplCmdOut", 48, 8 },
{ "ESynOut", 47, 1 },
{ "EAckOut", 46, 1 },
{ "EFinOut", 45, 1 },
{ "ERstOut", 44, 1 },
{ "SynIn", 43, 1 },
{ "AckIn", 42, 1 },
{ "FinIn", 41, 1 },
{ "RstIn", 40, 1 },
{ "DataIn", 39, 1 },
{ "DataInVld", 38, 1 },
{ "PadIn", 37, 1 },
{ "RxBufEmpty", 36, 1 },
{ "RxDdp", 35, 1 },
{ "RxFbCongestion", 34, 1 },
{ "TxFbCongestion", 33, 1 },
{ "TxPktSumSrdy", 32, 1 },
{ "RcfUlpType", 28, 4 },
{ "Eread", 27, 1 },
{ "Ebypass", 26, 1 },
{ "Esave", 25, 1 },
{ "Static0", 24, 1 },
{ "Cread", 23, 1 },
{ "Cbypass", 22, 1 },
{ "Csave", 21, 1 },
{ "CPktOut", 20, 1 },
{ "RxPagePoolFull", 18, 2 },
{ "RxLpbkPkt", 17, 1 },
{ "TxLpbkPkt", 16, 1 },
{ "RxVfValid", 15, 1 },
{ "SynLearned", 14, 1 },
{ "SetDelEntry", 13, 1 },
{ "SetInvEntry", 12, 1 },
{ "CpcmdDvld", 11, 1 },
{ "CpcmdSave", 10, 1 },
{ "RxPstructsFull", 8, 2 },
{ "EpcmdDvld", 7, 1 },
{ "EpcmdFlush", 6, 1 },
{ "EpcmdTrimPrefix", 5, 1 },
{ "EpcmdTrimPostfix", 4, 1 },
{ "ERssIp4Pkt", 3, 1 },
{ "ERssIp6Pkt", 2, 1 },
{ "ERssTcpUdpPkt", 1, 1 },
{ "ERssFceFipPkt", 0, 1 },
{ NULL }
};
static const struct field_desc tp_la2[] = {
{ "CplCmdIn", 56, 8 },
{ "MpsVfVld", 55, 1 },
{ "MpsPf", 52, 3 },
{ "MpsVf", 44, 8 },
{ "SynIn", 43, 1 },
{ "AckIn", 42, 1 },
{ "FinIn", 41, 1 },
{ "RstIn", 40, 1 },
{ "DataIn", 39, 1 },
{ "DataInVld", 38, 1 },
{ "PadIn", 37, 1 },
{ "RxBufEmpty", 36, 1 },
{ "RxDdp", 35, 1 },
{ "RxFbCongestion", 34, 1 },
{ "TxFbCongestion", 33, 1 },
{ "TxPktSumSrdy", 32, 1 },
{ "RcfUlpType", 28, 4 },
{ "Eread", 27, 1 },
{ "Ebypass", 26, 1 },
{ "Esave", 25, 1 },
{ "Static0", 24, 1 },
{ "Cread", 23, 1 },
{ "Cbypass", 22, 1 },
{ "Csave", 21, 1 },
{ "CPktOut", 20, 1 },
{ "RxPagePoolFull", 18, 2 },
{ "RxLpbkPkt", 17, 1 },
{ "TxLpbkPkt", 16, 1 },
{ "RxVfValid", 15, 1 },
{ "SynLearned", 14, 1 },
{ "SetDelEntry", 13, 1 },
{ "SetInvEntry", 12, 1 },
{ "CpcmdDvld", 11, 1 },
{ "CpcmdSave", 10, 1 },
{ "RxPstructsFull", 8, 2 },
{ "EpcmdDvld", 7, 1 },
{ "EpcmdFlush", 6, 1 },
{ "EpcmdTrimPrefix", 5, 1 },
{ "EpcmdTrimPostfix", 4, 1 },
{ "ERssIp4Pkt", 3, 1 },
{ "ERssIp6Pkt", 2, 1 },
{ "ERssTcpUdpPkt", 1, 1 },
{ "ERssFceFipPkt", 0, 1 },
{ NULL }
};
static void
tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
{
field_desc_show(sb, *p, tp_la0);
}
static void
tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
{
if (idx)
sbuf_printf(sb, "\n");
field_desc_show(sb, p[0], tp_la0);
if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
field_desc_show(sb, p[1], tp_la0);
}
static void
tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
{
if (idx)
sbuf_printf(sb, "\n");
field_desc_show(sb, p[0], tp_la0);
if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
}
static int
sysctl_tp_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
uint64_t *buf, *p;
int rc;
u_int i, inc;
void (*show_func)(struct sbuf *, uint64_t *, int);
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
t4_tp_read_la(sc, buf, NULL);
switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
case 2:
inc = 2;
show_func = tp_la_show2;
break;
case 3:
inc = 2;
show_func = tp_la_show3;
break;
default:
inc = 1;
show_func = tp_la_show;
}
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
p = buf;
for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
(*show_func)(sb, p, i);
rc = sbuf_finish(sb);
done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
}
static int
sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_get_chan_txrate(sc, nrate, orate);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3\n");
sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
nrate[0], nrate[1], nrate[2], nrate[3]);
sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
orate[0], orate[1], orate[2], orate[3]);
} else {
sbuf_printf(sb, " channel 0 channel 1\n");
sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
nrate[0], nrate[1]);
sbuf_printf(sb, "Offload B/s: %10ju %10ju",
orate[0], orate[1]);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
uint32_t *buf, *p;
int rc, i;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
t4_ulprx_read_la(sc, buf);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
p = buf;
sbuf_printf(sb, " Pcmd Type Message"
" Data");
for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
}
rc = sbuf_finish(sb);
done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
}
static int
sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
uint32_t cfg, s1, s2;
MPASS(chip_id(sc) >= CHELSIO_T5);
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
cfg = t4_read_reg(sc, A_SGE_STAT_CFG);
s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL);
s2 = t4_read_reg(sc, A_SGE_STAT_MATCH);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
if (G_STATSOURCE_T5(cfg) == 7) {
int mode;
mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg);
if (mode == 0)
sbuf_printf(sb, "total %d, incomplete %d", s1, s2);
else if (mode == 1)
sbuf_printf(sb, "total %d, data overflow %d", s1, s2);
else
sbuf_printf(sb, "unknown mode %d", mode);
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_cpus(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
enum cpu_sets op = arg2;
cpuset_t cpuset;
struct sbuf *sb;
int i, rc;
MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
CPU_ZERO(&cpuset);
rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
if (rc != 0)
return (rc);
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
CPU_FOREACH(i)
sbuf_printf(sb, "%d ", i);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
}
static int
sysctl_reset(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
u_int val;
int rc;
val = atomic_load_int(&sc->num_resets);
rc = sysctl_handle_int(oidp, &val, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (val == 0) {
/* Zero out the counter that tracks reset. */
atomic_store_int(&sc->num_resets, 0);
return (0);
}
if (val != 1)
return (EINVAL); /* 0 or 1 are the only legal values */
if (hw_off_limits(sc)) /* harmless race */
return (EALREADY);
taskqueue_enqueue(reset_tq, &sc->reset_task);
return (0);
}
#ifdef TCP_OFFLOAD
static int
sysctl_tls(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int i, j, v, rc;
struct vi_info *vi;
v = sc->tt.tls;
rc = sysctl_handle_int(oidp, &v, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS))
return (ENOTSUP);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else {
sc->tt.tls = !!v;
for_each_port(sc, i) {
for_each_vi(sc->port[i], j, vi) {
if (vi->flags & VI_INIT_DONE)
t4_update_fl_bufsize(vi->ifp);
}
}
}
end_synchronized_op(sc, 0);
return (rc);
}
static void
unit_conv(char *buf, size_t len, u_int val, u_int factor)
{
u_int rem = val % factor;
if (rem == 0)
snprintf(buf, len, "%u", val / factor);
else {
while (rem % 10 == 0)
rem /= 10;
snprintf(buf, len, "%u.%u", val / factor, rem);
}
}
static int
sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
char buf[16];
u_int res, re;
u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
res = (u_int)-1;
else
res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
mtx_unlock(&sc->reg_lock);
if (res == (u_int)-1)
return (ENXIO);
switch (arg2) {
case 0:
/* timer_tick */
re = G_TIMERRESOLUTION(res);
break;
case 1:
/* TCP timestamp tick */
re = G_TIMESTAMPRESOLUTION(res);
break;
case 2:
/* DACK tick */
re = G_DELAYEDACKRESOLUTION(res);
break;
default:
return (EDOOFUS);
}
unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
}
static int
sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc;
u_int dack_tmr, dack_re, v;
u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
rc = 0;
dack_re = G_DELAYEDACKRESOLUTION(t4_read_reg(sc,
A_TP_TIMER_RESOLUTION));
dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
v = ((cclk_ps << dack_re) / 1000000) * dack_tmr;
return (sysctl_handle_int(oidp, &v, 0, req));
}
static int
sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc, reg = arg2;
u_int tre;
u_long tp_tick_us, v;
u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
rc = 0;
tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
tp_tick_us = (cclk_ps << tre) / 1000000;
if (reg == A_TP_INIT_SRTT)
v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
else
v = tp_tick_us * t4_read_reg(sc, reg);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
else
return (sysctl_handle_long(oidp, &v, 0, req));
}
/*
* All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
* passed to this function.
*/
static int
sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc, idx = arg2;
u_int v;
MPASS(idx >= 0 && idx <= 24);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
rc = 0;
v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
else
return (sysctl_handle_int(oidp, &v, 0, req));
}
static int
sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
int rc, idx = arg2;
u_int shift, v, r;
MPASS(idx >= 0 && idx < 16);
r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
shift = (idx & 3) << 3;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
rc = 0;
v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
else
return (sysctl_handle_int(oidp, &v, 0, req));
}
static int
sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
struct adapter *sc = vi->adapter;
int idx, rc, i;
struct sge_ofld_rxq *ofld_rxq;
uint8_t v;
idx = vi->ofld_tmr_idx;
rc = sysctl_handle_int(oidp, &idx, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (idx < 0 || idx >= SGE_NTIMERS)
return (EINVAL);
rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4otmr");
if (rc)
return (rc);
v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
for_each_ofld_rxq(vi, i, ofld_rxq) {
#ifdef atomic_store_rel_8
atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
#else
ofld_rxq->iq.intr_params = v;
#endif
}
vi->ofld_tmr_idx = idx;
end_synchronized_op(sc, LOCK_HELD);
return (0);
}
static int
sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
{
struct vi_info *vi = arg1;
struct adapter *sc = vi->adapter;
int idx, rc;
idx = vi->ofld_pktc_idx;
rc = sysctl_handle_int(oidp, &idx, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
if (idx < -1 || idx >= SGE_NCOUNTERS)
return (EINVAL);
rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4opktc");
if (rc)
return (rc);
if (vi->flags & VI_INIT_DONE)
rc = EBUSY; /* cannot be changed once the queues are created */
else
vi->ofld_pktc_idx = idx;
end_synchronized_op(sc, LOCK_HELD);
return (rc);
}
#endif
static int
get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
{
int rc;
if (cntxt->cid > M_CTXTQID)
return (EINVAL);
if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
return (EINVAL);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
if (sc->flags & FW_OK) {
rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
&cntxt->data[0]);
if (rc == 0)
goto done;
}
/*
* Read via firmware failed or wasn't even attempted. Read directly via
* the backdoor.
*/
rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
done:
end_synchronized_op(sc, 0);
return (rc);
}
static int
load_fw(struct adapter *sc, struct t4_data *fw)
{
int rc;
uint8_t *fw_data;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
/*
* The firmware, with the sole exception of the memory parity error
* handler, runs from memory and not flash. It is almost always safe to
* install a new firmware on a running system. Just set bit 1 in
* hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
*/
if (sc->flags & FULL_INIT_DONE &&
(sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
rc = EBUSY;
goto done;
}
fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
rc = copyin(fw->data, fw_data, fw->len);
if (rc == 0)
rc = -t4_load_fw(sc, fw_data, fw->len);
free(fw_data, M_CXGBE);
done:
end_synchronized_op(sc, 0);
return (rc);
}
static int
load_cfg(struct adapter *sc, struct t4_data *cfg)
{
int rc;
uint8_t *cfg_data = NULL;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
if (cfg->len == 0) {
/* clear */
rc = -t4_load_cfg(sc, NULL, 0);
goto done;
}
cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
rc = copyin(cfg->data, cfg_data, cfg->len);
if (rc == 0)
rc = -t4_load_cfg(sc, cfg_data, cfg->len);
free(cfg_data, M_CXGBE);
done:
end_synchronized_op(sc, 0);
return (rc);
}
static int
load_boot(struct adapter *sc, struct t4_bootrom *br)
{
int rc;
uint8_t *br_data = NULL;
u_int offset;
if (br->len > 1024 * 1024)
return (EFBIG);
if (br->pf_offset == 0) {
/* pfidx */
if (br->pfidx_addr > 7)
return (EINVAL);
offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
A_PCIE_PF_EXPROM_OFST)));
} else if (br->pf_offset == 1) {
/* offset */
offset = G_OFFSET(br->pfidx_addr);
} else {
return (EINVAL);
}
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
if (br->len == 0) {
/* clear */
rc = -t4_load_boot(sc, NULL, offset, 0);
goto done;
}
br_data = malloc(br->len, M_CXGBE, M_WAITOK);
rc = copyin(br->data, br_data, br->len);
if (rc == 0)
rc = -t4_load_boot(sc, br_data, offset, br->len);
free(br_data, M_CXGBE);
done:
end_synchronized_op(sc, 0);
return (rc);
}
static int
load_bootcfg(struct adapter *sc, struct t4_data *bc)
{
int rc;
uint8_t *bc_data = NULL;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
if (bc->len == 0) {
/* clear */
rc = -t4_load_bootcfg(sc, NULL, 0);
goto done;
}
bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
rc = copyin(bc->data, bc_data, bc->len);
if (rc == 0)
rc = -t4_load_bootcfg(sc, bc_data, bc->len);
free(bc_data, M_CXGBE);
done:
end_synchronized_op(sc, 0);
return (rc);
}
static int
cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
{
int rc;
struct cudbg_init *cudbg;
void *handle, *buf;
/* buf is large, don't block if no memory is available */
buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
if (buf == NULL)
return (ENOMEM);
handle = cudbg_alloc_handle();
if (handle == NULL) {
rc = ENOMEM;
goto done;
}
cudbg = cudbg_get_init(handle);
cudbg->adap = sc;
cudbg->print = (cudbg_print_cb)printf;
#ifndef notyet
device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
__func__, dump->wr_flash, dump->len, dump->data);
#endif
if (dump->wr_flash)
cudbg->use_flash = 1;
MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
rc = cudbg_collect(handle, buf, &dump->len);
if (rc != 0)
goto done;
rc = copyout(buf, dump->data, dump->len);
done:
cudbg_free_handle(handle);
free(buf, M_CXGBE);
return (rc);
}
static void
free_offload_policy(struct t4_offload_policy *op)
{
struct offload_rule *r;
int i;
if (op == NULL)
return;
r = &op->rule[0];
for (i = 0; i < op->nrules; i++, r++) {
free(r->bpf_prog.bf_insns, M_CXGBE);
}
free(op->rule, M_CXGBE);
free(op, M_CXGBE);
}
static int
set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
{
int i, rc, len;
struct t4_offload_policy *op, *old;
struct bpf_program *bf;
const struct offload_settings *s;
struct offload_rule *r;
void *u;
if (!is_offload(sc))
return (ENODEV);
if (uop->nrules == 0) {
/* Delete installed policies. */
op = NULL;
goto set_policy;
} else if (uop->nrules > 256) { /* arbitrary */
return (E2BIG);
}
/* Copy userspace offload policy to kernel */
op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
op->nrules = uop->nrules;
len = op->nrules * sizeof(struct offload_rule);
op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
rc = copyin(uop->rule, op->rule, len);
if (rc) {
free(op->rule, M_CXGBE);
free(op, M_CXGBE);
return (rc);
}
r = &op->rule[0];
for (i = 0; i < op->nrules; i++, r++) {
/* Validate open_type */
if (r->open_type != OPEN_TYPE_LISTEN &&
r->open_type != OPEN_TYPE_ACTIVE &&
r->open_type != OPEN_TYPE_PASSIVE &&
r->open_type != OPEN_TYPE_DONTCARE) {
error:
/*
* Rules 0 to i have malloc'd filters that need to be
* freed. Rules i+1 to nrules have userspace pointers
* and should be left alone.
*/
op->nrules = i;
free_offload_policy(op);
return (rc);
}
/* Validate settings */
s = &r->settings;
if ((s->offload != 0 && s->offload != 1) ||
s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
s->sched_class < -1 ||
s->sched_class >= sc->params.nsched_cls) {
rc = EINVAL;
goto error;
}
bf = &r->bpf_prog;
u = bf->bf_insns; /* userspace ptr */
bf->bf_insns = NULL;
if (bf->bf_len == 0) {
/* legal, matches everything */
continue;
}
len = bf->bf_len * sizeof(*bf->bf_insns);
bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
rc = copyin(u, bf->bf_insns, len);
if (rc != 0)
goto error;
if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
rc = EINVAL;
goto error;
}
}
set_policy:
rw_wlock(&sc->policy_lock);
old = sc->policy;
sc->policy = op;
rw_wunlock(&sc->policy_lock);
free_offload_policy(old);
return (0);
}
#define MAX_READ_BUF_SIZE (128 * 1024)
static int
read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
{
uint32_t addr, remaining, n;
uint32_t *buf;
int rc;
uint8_t *dst;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
rc = validate_mem_range(sc, mr->addr, mr->len);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
addr = mr->addr;
remaining = mr->len;
dst = (void *)mr->data;
while (remaining) {
n = min(remaining, MAX_READ_BUF_SIZE);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
read_via_memwin(sc, 2, addr, buf, n);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
break;
rc = copyout(buf, dst, n);
if (rc != 0)
break;
dst += n;
remaining -= n;
addr += n;
}
free(buf, M_CXGBE);
return (rc);
}
#undef MAX_READ_BUF_SIZE
static int
read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
{
int rc;
if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
return (EINVAL);
if (i2cd->len > sizeof(i2cd->data))
return (EFBIG);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
if (rc)
return (rc);
if (hw_off_limits(sc))
rc = ENXIO;
else
rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
i2cd->offset, i2cd->len, &i2cd->data[0]);
end_synchronized_op(sc, 0);
return (rc);
}
static int
clear_stats(struct adapter *sc, u_int port_id)
{
int i, v, chan_map;
struct port_info *pi;
struct vi_info *vi;
struct sge_rxq *rxq;
struct sge_txq *txq;
struct sge_wrq *wrq;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
struct sge_ofld_txq *ofld_txq;
#endif
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
#endif
if (port_id >= sc->params.nports)
return (EINVAL);
pi = sc->port[port_id];
if (pi == NULL)
return (EIO);
mtx_lock(&sc->reg_lock);
if (!hw_off_limits(sc)) {
/* MAC stats */
t4_clr_port_stats(sc, pi->tx_chan);
if (is_t6(sc)) {
if (pi->fcs_reg != -1)
pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
else
pi->stats.rx_fcs_err = 0;
}
for_each_vi(pi, v, vi) {
if (vi->flags & VI_INIT_DONE)
t4_clr_vi_stats(sc, vi->vin);
}
chan_map = pi->rx_e_chan_map;
v = 0; /* reuse */
while (chan_map) {
i = ffs(chan_map) - 1;
t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
1, A_TP_MIB_TNL_CNG_DROP_0 + i);
chan_map &= ~(1 << i);
}
}
mtx_unlock(&sc->reg_lock);
pi->tx_parse_error = 0;
pi->tnl_cong_drops = 0;
/*
* Since this command accepts a port, clear stats for
* all VIs on this port.
*/
for_each_vi(pi, v, vi) {
if (vi->flags & VI_INIT_DONE) {
for_each_rxq(vi, i, rxq) {
#if defined(INET) || defined(INET6)
rxq->lro.lro_queued = 0;
rxq->lro.lro_flushed = 0;
#endif
rxq->rxcsum = 0;
rxq->vlan_extraction = 0;
rxq->vxlan_rxcsum = 0;
rxq->fl.cl_allocated = 0;
rxq->fl.cl_recycled = 0;
rxq->fl.cl_fast_recycled = 0;
}
for_each_txq(vi, i, txq) {
txq->txcsum = 0;
txq->tso_wrs = 0;
txq->vlan_insertion = 0;
txq->imm_wrs = 0;
txq->sgl_wrs = 0;
txq->txpkt_wrs = 0;
txq->txpkts0_wrs = 0;
txq->txpkts1_wrs = 0;
txq->txpkts0_pkts = 0;
txq->txpkts1_pkts = 0;
txq->txpkts_flush = 0;
txq->raw_wrs = 0;
txq->vxlan_tso_wrs = 0;
txq->vxlan_txcsum = 0;
txq->kern_tls_records = 0;
txq->kern_tls_short = 0;
txq->kern_tls_partial = 0;
txq->kern_tls_full = 0;
txq->kern_tls_octets = 0;
txq->kern_tls_waste = 0;
txq->kern_tls_options = 0;
txq->kern_tls_header = 0;
txq->kern_tls_fin = 0;
txq->kern_tls_fin_short = 0;
txq->kern_tls_cbc = 0;
txq->kern_tls_gcm = 0;
mp_ring_reset_stats(txq->r);
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
for_each_ofld_txq(vi, i, ofld_txq) {
ofld_txq->wrq.tx_wrs_direct = 0;
ofld_txq->wrq.tx_wrs_copied = 0;
counter_u64_zero(ofld_txq->tx_iscsi_pdus);
counter_u64_zero(ofld_txq->tx_iscsi_octets);
counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs);
counter_u64_zero(ofld_txq->tx_aio_jobs);
counter_u64_zero(ofld_txq->tx_aio_octets);
counter_u64_zero(ofld_txq->tx_toe_tls_records);
counter_u64_zero(ofld_txq->tx_toe_tls_octets);
}
#endif
#ifdef TCP_OFFLOAD
for_each_ofld_rxq(vi, i, ofld_rxq) {
ofld_rxq->fl.cl_allocated = 0;
ofld_rxq->fl.cl_recycled = 0;
ofld_rxq->fl.cl_fast_recycled = 0;
counter_u64_zero(
ofld_rxq->rx_iscsi_ddp_setup_ok);
counter_u64_zero(
ofld_rxq->rx_iscsi_ddp_setup_error);
ofld_rxq->rx_iscsi_ddp_pdus = 0;
ofld_rxq->rx_iscsi_ddp_octets = 0;
ofld_rxq->rx_iscsi_fl_pdus = 0;
ofld_rxq->rx_iscsi_fl_octets = 0;
ofld_rxq->rx_aio_ddp_jobs = 0;
ofld_rxq->rx_aio_ddp_octets = 0;
ofld_rxq->rx_toe_tls_records = 0;
ofld_rxq->rx_toe_tls_octets = 0;
ofld_rxq->rx_toe_ddp_octets = 0;
counter_u64_zero(ofld_rxq->ddp_buffer_alloc);
counter_u64_zero(ofld_rxq->ddp_buffer_reuse);
counter_u64_zero(ofld_rxq->ddp_buffer_free);
}
#endif
if (IS_MAIN_VI(vi)) {
wrq = &sc->sge.ctrlq[pi->port_id];
wrq->tx_wrs_direct = 0;
wrq->tx_wrs_copied = 0;
}
}
}
return (0);
}
static int
hold_clip_addr(struct adapter *sc, struct t4_clip_addr *ca)
{
#ifdef INET6
struct in6_addr in6;
bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
if (t4_get_clip_entry(sc, &in6, true) != NULL)
return (0);
else
return (EIO);
#else
return (ENOTSUP);
#endif
}
static int
release_clip_addr(struct adapter *sc, struct t4_clip_addr *ca)
{
#ifdef INET6
struct in6_addr in6;
bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
return (t4_release_clip_addr(sc, &in6));
#else
return (ENOTSUP);
#endif
}
int
t4_os_find_pci_capability(struct adapter *sc, int cap)
{
int i;
return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
}
int
t4_os_pci_save_state(struct adapter *sc)
{
device_t dev;
struct pci_devinfo *dinfo;
dev = sc->dev;
dinfo = device_get_ivars(dev);
pci_cfg_save(dev, dinfo, 0);
return (0);
}
int
t4_os_pci_restore_state(struct adapter *sc)
{
device_t dev;
struct pci_devinfo *dinfo;
dev = sc->dev;
dinfo = device_get_ivars(dev);
pci_cfg_restore(dev, dinfo);
return (0);
}
void
t4_os_portmod_changed(struct port_info *pi)
{
struct adapter *sc = pi->adapter;
struct vi_info *vi;
if_t ifp;
static const char *mod_str[] = {
NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
};
KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
("%s: port_type %u", __func__, pi->port_type));
vi = &pi->vi[0];
if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
PORT_LOCK(pi);
build_medialist(pi);
if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
fixup_link_config(pi);
apply_link_config(pi);
}
PORT_UNLOCK(pi);
end_synchronized_op(sc, LOCK_HELD);
}
ifp = vi->ifp;
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
if_printf(ifp, "transceiver unplugged.\n");
else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
if_printf(ifp, "unknown transceiver inserted.\n");
else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
if_printf(ifp, "unsupported transceiver inserted.\n");
else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
if_printf(ifp, "%dGbps %s transceiver inserted.\n",
port_top_speed(pi), mod_str[pi->mod_type]);
} else {
if_printf(ifp, "transceiver (type %d) inserted.\n",
pi->mod_type);
}
}
void
t4_os_link_changed(struct port_info *pi)
{
struct vi_info *vi;
if_t ifp;
struct link_config *lc = &pi->link_cfg;
struct adapter *sc = pi->adapter;
int v;
PORT_LOCK_ASSERT_OWNED(pi);
if (is_t6(sc)) {
if (lc->link_ok) {
if (lc->speed > 25000 ||
(lc->speed == 25000 && lc->fec == FEC_RS)) {
pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
} else {
pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
}
pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
pi->stats.rx_fcs_err = 0;
} else {
pi->fcs_reg = -1;
}
} else {
MPASS(pi->fcs_reg != -1);
MPASS(pi->fcs_base == 0);
}
for_each_vi(pi, v, vi) {
ifp = vi->ifp;
if (ifp == NULL)
continue;
if (lc->link_ok) {
if_setbaudrate(ifp, IF_Mbps(lc->speed));
if_link_state_change(ifp, LINK_STATE_UP);
} else {
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
}
void
t4_iterate(void (*func)(struct adapter *, void *), void *arg)
{
struct adapter *sc;
sx_slock(&t4_list_lock);
SLIST_FOREACH(sc, &t4_list, link) {
/*
* func should not make any assumptions about what state sc is
* in - the only guarantee is that sc->sc_lock is a valid lock.
*/
func(sc, arg);
}
sx_sunlock(&t4_list_lock);
}
static int
t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
struct thread *td)
{
int rc;
struct adapter *sc = dev->si_drv1;
rc = priv_check(td, PRIV_DRIVER);
if (rc != 0)
return (rc);
switch (cmd) {
case CHELSIO_T4_GETREG: {
struct t4_reg *edata = (struct t4_reg *)data;
if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
return (EFAULT);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else if (edata->size == 4)
edata->val = t4_read_reg(sc, edata->addr);
else if (edata->size == 8)
edata->val = t4_read_reg64(sc, edata->addr);
else
rc = EINVAL;
mtx_unlock(&sc->reg_lock);
break;
}
case CHELSIO_T4_SETREG: {
struct t4_reg *edata = (struct t4_reg *)data;
if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
return (EFAULT);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else if (edata->size == 4) {
if (edata->val & 0xffffffff00000000)
rc = EINVAL;
t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
} else if (edata->size == 8)
t4_write_reg64(sc, edata->addr, edata->val);
else
rc = EINVAL;
mtx_unlock(&sc->reg_lock);
break;
}
case CHELSIO_T4_REGDUMP: {
struct t4_regdump *regs = (struct t4_regdump *)data;
int reglen = t4_get_regs_len(sc);
uint8_t *buf;
if (regs->len < reglen) {
regs->len = reglen; /* hint to the caller */
return (ENOBUFS);
}
regs->len = reglen;
buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else
get_regs(sc, regs, buf);
mtx_unlock(&sc->reg_lock);
if (rc == 0)
rc = copyout(buf, regs->data, reglen);
free(buf, M_CXGBE);
break;
}
case CHELSIO_T4_GET_FILTER_MODE:
rc = get_filter_mode(sc, (uint32_t *)data);
break;
case CHELSIO_T4_SET_FILTER_MODE:
rc = set_filter_mode(sc, *(uint32_t *)data);
break;
case CHELSIO_T4_SET_FILTER_MASK:
rc = set_filter_mask(sc, *(uint32_t *)data);
break;
case CHELSIO_T4_GET_FILTER:
rc = get_filter(sc, (struct t4_filter *)data);
break;
case CHELSIO_T4_SET_FILTER:
rc = set_filter(sc, (struct t4_filter *)data);
break;
case CHELSIO_T4_DEL_FILTER:
rc = del_filter(sc, (struct t4_filter *)data);
break;
case CHELSIO_T4_GET_SGE_CONTEXT:
rc = get_sge_context(sc, (struct t4_sge_context *)data);
break;
case CHELSIO_T4_LOAD_FW:
rc = load_fw(sc, (struct t4_data *)data);
break;
case CHELSIO_T4_GET_MEM:
rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
break;
case CHELSIO_T4_GET_I2C:
rc = read_i2c(sc, (struct t4_i2c_data *)data);
break;
case CHELSIO_T4_CLEAR_STATS:
rc = clear_stats(sc, *(uint32_t *)data);
break;
case CHELSIO_T4_SCHED_CLASS:
rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
break;
case CHELSIO_T4_SCHED_QUEUE:
rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
break;
case CHELSIO_T4_GET_TRACER:
rc = t4_get_tracer(sc, (struct t4_tracer *)data);
break;
case CHELSIO_T4_SET_TRACER:
rc = t4_set_tracer(sc, (struct t4_tracer *)data);
break;
case CHELSIO_T4_LOAD_CFG:
rc = load_cfg(sc, (struct t4_data *)data);
break;
case CHELSIO_T4_LOAD_BOOT:
rc = load_boot(sc, (struct t4_bootrom *)data);
break;
case CHELSIO_T4_LOAD_BOOTCFG:
rc = load_bootcfg(sc, (struct t4_data *)data);
break;
case CHELSIO_T4_CUDBG_DUMP:
rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
break;
case CHELSIO_T4_SET_OFLD_POLICY:
rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
break;
case CHELSIO_T4_HOLD_CLIP_ADDR:
rc = hold_clip_addr(sc, (struct t4_clip_addr *)data);
break;
case CHELSIO_T4_RELEASE_CLIP_ADDR:
rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
break;
default:
rc = ENOTTY;
}
return (rc);
}
#ifdef TCP_OFFLOAD
static int
toe_capability(struct vi_info *vi, bool enable)
{
int rc;
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
ASSERT_SYNCHRONIZED_OP(sc);
if (!is_offload(sc))
return (ENODEV);
if (hw_off_limits(sc))
return (ENXIO);
if (enable) {
#ifdef KERN_TLS
if (sc->flags & KERN_TLS_ON && is_t6(sc)) {
int i, j, n;
struct port_info *p;
struct vi_info *v;
/*
* Reconfigure hardware for TOE if TXTLS is not enabled
* on any ifnet.
*/
n = 0;
for_each_port(sc, i) {
p = sc->port[i];
for_each_vi(p, j, v) {
if (if_getcapenable(v->ifp) & IFCAP_TXTLS) {
CH_WARN(sc,
"%s has NIC TLS enabled.\n",
device_get_nameunit(v->dev));
n++;
}
}
}
if (n > 0) {
CH_WARN(sc, "Disable NIC TLS on all interfaces "
"associated with this adapter before "
"trying to enable TOE.\n");
return (EAGAIN);
}
rc = t6_config_kern_tls(sc, false);
if (rc)
return (rc);
}
#endif
if ((if_getcapenable(vi->ifp) & IFCAP_TOE) != 0) {
/* TOE is already enabled. */
return (0);
}
/*
* We need the port's queues around so that we're able to send
* and receive CPLs to/from the TOE even if the ifnet for this
* port has never been UP'd administratively.
*/
if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
return (rc);
if (!(pi->vi[0].flags & VI_INIT_DONE) &&
((rc = vi_init(&pi->vi[0])) != 0))
return (rc);
if (isset(&sc->offload_map, pi->port_id)) {
/* TOE is enabled on another VI of this port. */
pi->uld_vis++;
return (0);
}
if (!uld_active(sc, ULD_TOM)) {
rc = t4_activate_uld(sc, ULD_TOM);
if (rc == EAGAIN) {
log(LOG_WARNING,
"You must kldload t4_tom.ko before trying "
"to enable TOE on a cxgbe interface.\n");
}
if (rc != 0)
return (rc);
KASSERT(sc->tom_softc != NULL,
("%s: TOM activated but softc NULL", __func__));
KASSERT(uld_active(sc, ULD_TOM),
("%s: TOM activated but flag not set", __func__));
}
/* Activate iWARP and iSCSI too, if the modules are loaded. */
if (!uld_active(sc, ULD_IWARP))
(void) t4_activate_uld(sc, ULD_IWARP);
if (!uld_active(sc, ULD_ISCSI))
(void) t4_activate_uld(sc, ULD_ISCSI);
pi->uld_vis++;
setbit(&sc->offload_map, pi->port_id);
} else {
pi->uld_vis--;
if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
return (0);
KASSERT(uld_active(sc, ULD_TOM),
("%s: TOM never initialized?", __func__));
clrbit(&sc->offload_map, pi->port_id);
}
return (0);
}
/*
* Add an upper layer driver to the global list.
*/
int
t4_register_uld(struct uld_info *ui)
{
int rc = 0;
struct uld_info *u;
sx_xlock(&t4_uld_list_lock);
SLIST_FOREACH(u, &t4_uld_list, link) {
if (u->uld_id == ui->uld_id) {
rc = EEXIST;
goto done;
}
}
SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
ui->refcount = 0;
done:
sx_xunlock(&t4_uld_list_lock);
return (rc);
}
int
t4_unregister_uld(struct uld_info *ui)
{
int rc = EINVAL;
struct uld_info *u;
sx_xlock(&t4_uld_list_lock);
SLIST_FOREACH(u, &t4_uld_list, link) {
if (u == ui) {
if (ui->refcount > 0) {
rc = EBUSY;
goto done;
}
SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
rc = 0;
goto done;
}
}
done:
sx_xunlock(&t4_uld_list_lock);
return (rc);
}
int
t4_activate_uld(struct adapter *sc, int id)
{
int rc;
struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
if (id < 0 || id > ULD_MAX)
return (EINVAL);
rc = EAGAIN; /* kldoad the module with this ULD and try again. */
sx_slock(&t4_uld_list_lock);
SLIST_FOREACH(ui, &t4_uld_list, link) {
if (ui->uld_id == id) {
if (!(sc->flags & FULL_INIT_DONE)) {
rc = adapter_init(sc);
if (rc != 0)
break;
}
rc = ui->activate(sc);
if (rc == 0) {
setbit(&sc->active_ulds, id);
ui->refcount++;
}
break;
}
}
sx_sunlock(&t4_uld_list_lock);
return (rc);
}
int
t4_deactivate_uld(struct adapter *sc, int id)
{
int rc;
struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
if (id < 0 || id > ULD_MAX)
return (EINVAL);
rc = ENXIO;
sx_slock(&t4_uld_list_lock);
SLIST_FOREACH(ui, &t4_uld_list, link) {
if (ui->uld_id == id) {
rc = ui->deactivate(sc);
if (rc == 0) {
clrbit(&sc->active_ulds, id);
ui->refcount--;
}
break;
}
}
sx_sunlock(&t4_uld_list_lock);
return (rc);
}
static int
t4_deactivate_all_uld(struct adapter *sc)
{
int rc;
struct uld_info *ui;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld");
if (rc != 0)
return (ENXIO);
sx_slock(&t4_uld_list_lock);
SLIST_FOREACH(ui, &t4_uld_list, link) {
if (isset(&sc->active_ulds, ui->uld_id)) {
rc = ui->deactivate(sc);
if (rc != 0)
break;
clrbit(&sc->active_ulds, ui->uld_id);
ui->refcount--;
}
}
sx_sunlock(&t4_uld_list_lock);
end_synchronized_op(sc, 0);
return (rc);
}
static void
t4_async_event(struct adapter *sc)
{
struct uld_info *ui;
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
return;
sx_slock(&t4_uld_list_lock);
SLIST_FOREACH(ui, &t4_uld_list, link) {
if (ui->uld_id == ULD_IWARP) {
ui->async_event(sc);
break;
}
}
sx_sunlock(&t4_uld_list_lock);
end_synchronized_op(sc, 0);
}
int
uld_active(struct adapter *sc, int uld_id)
{
MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
return (isset(&sc->active_ulds, uld_id));
}
#endif
#ifdef KERN_TLS
static int
ktls_capability(struct adapter *sc, bool enable)
{
ASSERT_SYNCHRONIZED_OP(sc);
if (!is_ktls(sc))
return (ENODEV);
if (!is_t6(sc))
return (0);
if (hw_off_limits(sc))
return (ENXIO);
if (enable) {
if (sc->flags & KERN_TLS_ON)
return (0); /* already on */
if (sc->offload_map != 0) {
CH_WARN(sc,
"Disable TOE on all interfaces associated with "
"this adapter before trying to enable NIC TLS.\n");
return (EAGAIN);
}
return (t6_config_kern_tls(sc, true));
} else {
/*
* Nothing to do for disable. If TOE is enabled sometime later
* then toe_capability will reconfigure the hardware.
*/
return (0);
}
}
#endif
/*
* t = ptr to tunable.
* nc = number of CPUs.
* c = compiled in default for that tunable.
*/
static void
calculate_nqueues(int *t, int nc, const int c)
{
int nq;
if (*t > 0)
return;
nq = *t < 0 ? -*t : c;
*t = min(nc, nq);
}
/*
* Come up with reasonable defaults for some of the tunables, provided they're
* not set by the user (in which case we'll use the values as is).
*/
static void
tweak_tunables(void)
{
int nc = mp_ncpus; /* our snapshot of the number of CPUs */
if (t4_ntxq < 1) {
#ifdef RSS
t4_ntxq = rss_getnumbuckets();
#else
calculate_nqueues(&t4_ntxq, nc, NTXQ);
#endif
}
calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
if (t4_nrxq < 1) {
#ifdef RSS
t4_nrxq = rss_getnumbuckets();
#else
calculate_nqueues(&t4_nrxq, nc, NRXQ);
#endif
}
calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
#endif
#ifdef TCP_OFFLOAD
calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
#endif
#if defined(TCP_OFFLOAD) || defined(KERN_TLS)
if (t4_toecaps_allowed == -1)
t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
#else
if (t4_toecaps_allowed == -1)
t4_toecaps_allowed = 0;
#endif
#ifdef TCP_OFFLOAD
if (t4_rdmacaps_allowed == -1) {
t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
FW_CAPS_CONFIG_RDMA_RDMAC;
}
if (t4_iscsicaps_allowed == -1) {
t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
FW_CAPS_CONFIG_ISCSI_T10DIF;
}
if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
t4_tmr_idx_ofld = TMR_IDX_OFLD;
if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
t4_pktc_idx_ofld = PKTC_IDX_OFLD;
#else
if (t4_rdmacaps_allowed == -1)
t4_rdmacaps_allowed = 0;
if (t4_iscsicaps_allowed == -1)
t4_iscsicaps_allowed = 0;
#endif
#ifdef DEV_NETMAP
calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ);
calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ);
calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
#endif
if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
t4_tmr_idx = TMR_IDX;
if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
t4_pktc_idx = PKTC_IDX;
if (t4_qsize_txq < 128)
t4_qsize_txq = 128;
if (t4_qsize_rxq < 128)
t4_qsize_rxq = 128;
while (t4_qsize_rxq & 7)
t4_qsize_rxq++;
t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
/*
* Number of VIs to create per-port. The first VI is the "main" regular
* VI for the port. The rest are additional virtual interfaces on the
* same physical port. Note that the main VI does not have native
* netmap support but the extra VIs do.
*
* Limit the number of VIs per port to the number of available
* MAC addresses per port.
*/
if (t4_num_vis < 1)
t4_num_vis = 1;
if (t4_num_vis > nitems(vi_mac_funcs)) {
t4_num_vis = nitems(vi_mac_funcs);
printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
}
if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
pcie_relaxed_ordering = 1;
#if defined(__i386__) || defined(__amd64__)
if (cpu_vendor_id == CPU_VENDOR_INTEL)
pcie_relaxed_ordering = 0;
#endif
}
}
#ifdef DDB
static void
t4_dump_tcb(struct adapter *sc, int tid)
{
uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
save = t4_read_reg(sc, reg);
base = sc->memwin[2].mw_base;
/* Dump TCB for the tid */
tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
tcb_addr += tid * TCB_SIZE;
if (is_t4(sc)) {
pf = 0;
win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
} else {
pf = V_PFNUM(sc->pf);
win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
}
t4_write_reg(sc, reg, win_pos | pf);
t4_read_reg(sc, reg);
off = tcb_addr - win_pos;
for (i = 0; i < 4; i++) {
uint32_t buf[8];
for (j = 0; j < 8; j++, off += 4)
buf[j] = htonl(t4_read_reg(sc, base + off));
db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7]);
}
t4_write_reg(sc, reg, save);
t4_read_reg(sc, reg);
}
static void
t4_dump_devlog(struct adapter *sc)
{
struct devlog_params *dparams = &sc->params.devlog;
struct fw_devlog_e e;
int i, first, j, m, nentries, rc;
uint64_t ftstamp = UINT64_MAX;
if (dparams->start == 0) {
db_printf("devlog params not valid\n");
return;
}
nentries = dparams->size / sizeof(struct fw_devlog_e);
m = fwmtype_to_hwmtype(dparams->memtype);
/* Find the first entry. */
first = -1;
for (i = 0; i < nentries && !db_pager_quit; i++) {
rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
sizeof(e), (void *)&e);
if (rc != 0)
break;
if (e.timestamp == 0)
break;
e.timestamp = be64toh(e.timestamp);
if (e.timestamp < ftstamp) {
ftstamp = e.timestamp;
first = i;
}
}
if (first == -1)
return;
i = first;
do {
rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
sizeof(e), (void *)&e);
if (rc != 0)
return;
if (e.timestamp == 0)
return;
e.timestamp = be64toh(e.timestamp);
e.seqno = be32toh(e.seqno);
for (j = 0; j < 8; j++)
e.params[j] = be32toh(e.params[j]);
db_printf("%10d %15ju %8s %8s ",
e.seqno, e.timestamp,
(e.level < nitems(devlog_level_strings) ?
devlog_level_strings[e.level] : "UNKNOWN"),
(e.facility < nitems(devlog_facility_strings) ?
devlog_facility_strings[e.facility] : "UNKNOWN"));
db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
e.params[3], e.params[4], e.params[5], e.params[6],
e.params[7]);
if (++i == nentries)
i = 0;
} while (i != first && !db_pager_quit);
}
static DB_DEFINE_TABLE(show, t4, show_t4);
DB_TABLE_COMMAND_FLAGS(show_t4, devlog, db_show_devlog, CS_OWN)
{
device_t dev;
int t;
bool valid;
valid = false;
t = db_read_token();
if (t == tIDENT) {
dev = device_lookup_by_name(db_tok_string);
valid = true;
}
db_skip_to_eol();
if (!valid) {
db_printf("usage: show t4 devlog <nexus>\n");
return;
}
if (dev == NULL) {
db_printf("device not found\n");
return;
}
t4_dump_devlog(device_get_softc(dev));
}
DB_TABLE_COMMAND_FLAGS(show_t4, tcb, db_show_t4tcb, CS_OWN)
{
device_t dev;
int radix, tid, t;
bool valid;
valid = false;
radix = db_radix;
db_radix = 10;
t = db_read_token();
if (t == tIDENT) {
dev = device_lookup_by_name(db_tok_string);
t = db_read_token();
if (t == tNUMBER) {
tid = db_tok_number;
valid = true;
}
}
db_radix = radix;
db_skip_to_eol();
if (!valid) {
db_printf("usage: show t4 tcb <nexus> <tid>\n");
return;
}
if (dev == NULL) {
db_printf("device not found\n");
return;
}
if (tid < 0) {
db_printf("invalid tid\n");
return;
}
t4_dump_tcb(device_get_softc(dev), tid);
}
#endif
static eventhandler_tag vxlan_start_evtag;
static eventhandler_tag vxlan_stop_evtag;
struct vxlan_evargs {
if_t ifp;
uint16_t port;
};
static void
enable_vxlan_rx(struct adapter *sc)
{
int i, rc;
struct port_info *pi;
uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
ASSERT_SYNCHRONIZED_OP(sc);
t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) |
F_VXLAN_EN);
for_each_port(sc, i) {
pi = sc->port[i];
if (pi->vxlan_tcam_entry == true)
continue;
rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac,
match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
true);
if (rc < 0) {
rc = -rc;
CH_ERR(&pi->vi[0],
"failed to add VXLAN TCAM entry: %d.\n", rc);
} else {
MPASS(rc == sc->rawf_base + pi->port_id);
pi->vxlan_tcam_entry = true;
}
}
}
static void
t4_vxlan_start(struct adapter *sc, void *arg)
{
struct vxlan_evargs *v = arg;
if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
return;
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0)
return;
if (sc->vxlan_refcount == 0) {
sc->vxlan_port = v->port;
sc->vxlan_refcount = 1;
if (!hw_off_limits(sc))
enable_vxlan_rx(sc);
} else if (sc->vxlan_port == v->port) {
sc->vxlan_refcount++;
} else {
CH_ERR(sc, "VXLAN already configured on port %d; "
"ignoring attempt to configure it on port %d\n",
sc->vxlan_port, v->port);
}
end_synchronized_op(sc, 0);
}
static void
t4_vxlan_stop(struct adapter *sc, void *arg)
{
struct vxlan_evargs *v = arg;
if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
return;
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0)
return;
/*
* VXLANs may have been configured before the driver was loaded so we
* may see more stops than starts. This is not handled cleanly but at
* least we keep the refcount sane.
*/
if (sc->vxlan_port != v->port)
goto done;
if (sc->vxlan_refcount == 0) {
CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; "
"ignoring attempt to stop it again.\n", sc->vxlan_port);
} else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc))
t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0);
done:
end_synchronized_op(sc, 0);
}
static void
t4_vxlan_start_handler(void *arg __unused, if_t ifp,
sa_family_t family, u_int port)
{
struct vxlan_evargs v;
MPASS(family == AF_INET || family == AF_INET6);
v.ifp = ifp;
v.port = port;
t4_iterate(t4_vxlan_start, &v);
}
static void
t4_vxlan_stop_handler(void *arg __unused, if_t ifp, sa_family_t family,
u_int port)
{
struct vxlan_evargs v;
MPASS(family == AF_INET || family == AF_INET6);
v.ifp = ifp;
v.port = port;
t4_iterate(t4_vxlan_stop, &v);
}
static struct sx mlu; /* mod load unload */
SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
static int
mod_event(module_t mod, int cmd, void *arg)
{
int rc = 0;
static int loaded = 0;
switch (cmd) {
case MOD_LOAD:
sx_xlock(&mlu);
if (loaded++ == 0) {
t4_sge_modload();
t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
t4_filter_rpl, CPL_COOKIE_FILTER);
t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
do_l2t_write_rpl, CPL_COOKIE_FILTER);
t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
do_smt_write_rpl);
sx_init(&t4_list_lock, "T4/T5 adapters");
SLIST_INIT(&t4_list);
callout_init(&fatal_callout, 1);
#ifdef TCP_OFFLOAD
sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
SLIST_INIT(&t4_uld_list);
#endif
#ifdef INET6
t4_clip_modload();
#endif
#ifdef KERN_TLS
t6_ktls_modload();
#endif
t4_tracer_modload();
tweak_tunables();
vxlan_start_evtag =
EVENTHANDLER_REGISTER(vxlan_start,
t4_vxlan_start_handler, NULL,
EVENTHANDLER_PRI_ANY);
vxlan_stop_evtag =
EVENTHANDLER_REGISTER(vxlan_stop,
t4_vxlan_stop_handler, NULL,
EVENTHANDLER_PRI_ANY);
reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK,
taskqueue_thread_enqueue, &reset_tq);
taskqueue_start_threads(&reset_tq, 1, PI_SOFT,
"t4_rst_thr");
}
sx_xunlock(&mlu);
break;
case MOD_UNLOAD:
sx_xlock(&mlu);
if (--loaded == 0) {
int tries;
taskqueue_free(reset_tq);
sx_slock(&t4_list_lock);
if (!SLIST_EMPTY(&t4_list)) {
rc = EBUSY;
sx_sunlock(&t4_list_lock);
goto done_unload;
}
#ifdef TCP_OFFLOAD
sx_slock(&t4_uld_list_lock);
if (!SLIST_EMPTY(&t4_uld_list)) {
rc = EBUSY;
sx_sunlock(&t4_uld_list_lock);
sx_sunlock(&t4_list_lock);
goto done_unload;
}
#endif
tries = 0;
while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
uprintf("%ju clusters with custom free routine "
"still is use.\n", t4_sge_extfree_refs());
pause("t4unload", 2 * hz);
}
#ifdef TCP_OFFLOAD
sx_sunlock(&t4_uld_list_lock);
#endif
sx_sunlock(&t4_list_lock);
if (t4_sge_extfree_refs() == 0) {
EVENTHANDLER_DEREGISTER(vxlan_start,
vxlan_start_evtag);
EVENTHANDLER_DEREGISTER(vxlan_stop,
vxlan_stop_evtag);
t4_tracer_modunload();
#ifdef KERN_TLS
t6_ktls_modunload();
#endif
#ifdef INET6
t4_clip_modunload();
#endif
#ifdef TCP_OFFLOAD
sx_destroy(&t4_uld_list_lock);
#endif
sx_destroy(&t4_list_lock);
t4_sge_modunload();
loaded = 0;
} else {
rc = EBUSY;
loaded++; /* undo earlier decrement */
}
}
done_unload:
sx_xunlock(&mlu);
break;
}
return (rc);
}
DRIVER_MODULE(t4nex, pci, t4_driver, mod_event, 0);
MODULE_VERSION(t4nex, 1);
MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
#ifdef DEV_NETMAP
MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
DRIVER_MODULE(t5nex, pci, t5_driver, mod_event, 0);
MODULE_VERSION(t5nex, 1);
MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
#ifdef DEV_NETMAP
MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
DRIVER_MODULE(t6nex, pci, t6_driver, mod_event, 0);
MODULE_VERSION(t6nex, 1);
MODULE_DEPEND(t6nex, crypto, 1, 1, 1);
MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
#ifdef DEV_NETMAP
MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0);
MODULE_VERSION(cxgbe, 1);
DRIVER_MODULE(cxl, t5nex, cxl_driver, 0, 0);
MODULE_VERSION(cxl, 1);
DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0);
MODULE_VERSION(cc, 1);
DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0);
MODULE_VERSION(vcxgbe, 1);
DRIVER_MODULE(vcxl, cxl, vcxl_driver, 0, 0);
MODULE_VERSION(vcxl, 1);
DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0);
MODULE_VERSION(vcc, 1);
diff --git a/sys/dev/cxgbe/t4_tracer.c b/sys/dev/cxgbe/t4_tracer.c
index d9b336c4b64a..80689a543e83 100644
--- a/sys/dev/cxgbe/t4_tracer.c
+++ b/sys/dev/cxgbe/t4_tracer.c
@@ -1,523 +1,518 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013 Chelsio Communications, Inc.
* All rights reserved.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/lock.h>
#include <sys/types.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sx.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_clone.h>
#include <net/if_types.h>
#include "common/common.h"
#include "common/t4_msg.h"
#include "common/t4_regs.h"
#include "t4_ioctl.h"
/*
* Locking notes
* =============
*
* An interface cloner is registered during mod_load and it can be used to
* create or destroy the tracing ifnet for an adapter at any time. It is
* possible for the cloned interface to outlive the adapter (adapter disappears
* in t4_detach but the tracing ifnet may live till mod_unload when removal of
* the cloner finally destroys any remaining cloned interfaces). When tracing
* filters are active, this ifnet is also receiving data. There are potential
* bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
* cxgbe_detach/t4_detach, mod_unload.
*
* a) The driver selects an iq for tracing (sc->traceq) inside a synch op. The
* iq is destroyed inside a synch op too (and sc->traceq updated).
* b) The cloner looks for an adapter that matches the name of the ifnet it's
* been asked to create, starts a synch op on that adapter, and proceeds only
* if the adapter has a tracing iq.
* c) The cloned ifnet and the adapter are coupled to each other via
* ifp->if_softc and sc->ifp. These can be modified only with the global
* t4_trace_lock sx as well as the sc->ifp_lock mutex held. Holding either
* of these will prevent any change.
*
* The order in which all the locks involved should be acquired are:
* t4_list_lock
* adapter lock
* (begin synch op and let go of the above two)
* t4_trace_lock
* sc->ifp_lock
*/
static struct sx t4_trace_lock;
static const char *t4_cloner_name = "tXnex";
static struct if_clone *t4_cloner;
/* tracer ifnet routines. mostly no-ops. */
static void tracer_init(void *);
static int tracer_ioctl(if_t, unsigned long, caddr_t);
static int tracer_transmit(if_t, struct mbuf *);
static void tracer_qflush(if_t);
static int tracer_media_change(if_t);
static void tracer_media_status(if_t, struct ifmediareq *);
/* match name (request/response) */
struct match_rr {
const char *name;
int lock; /* set to 1 to returned sc locked. */
struct adapter *sc;
int rc;
};
static void
match_name(struct adapter *sc, void *arg)
{
struct match_rr *mrr = arg;
if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
return;
KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
__func__, mrr->sc, sc, mrr->name));
mrr->sc = sc;
if (mrr->lock)
mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
else
mrr->rc = 0;
}
static int
t4_cloner_match(struct if_clone *ifc, const char *name)
{
if (strncmp(name, "t4nex", 5) != 0 &&
strncmp(name, "t5nex", 5) != 0 &&
strncmp(name, "t6nex", 5) != 0)
return (0);
if (name[5] < '0' || name[5] > '9')
return (0);
return (1);
}
static int
t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
{
struct match_rr mrr;
struct adapter *sc;
if_t ifp;
int rc;
const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
mrr.name = name;
mrr.lock = 1;
mrr.sc = NULL;
mrr.rc = ENOENT;
t4_iterate(match_name, &mrr);
if (mrr.rc != 0)
return (mrr.rc);
sc = mrr.sc;
KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
__func__, name));
ASSERT_SYNCHRONIZED_OP(sc);
sx_xlock(&t4_trace_lock);
if (sc->ifp != NULL) {
rc = EEXIST;
goto done;
}
if (sc->traceq < 0) {
rc = EAGAIN;
goto done;
}
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- rc = ENOMEM;
- goto done;
- }
-
/* Note that if_xname is identical to the nexus nameunit */
if_initname(ifp, name, -1);
if_setdname(ifp, t4_cloner_name);
if_setinitfn(ifp, tracer_init);
if_setflags(ifp, IFF_SIMPLEX | IFF_DRV_RUNNING);
if_setioctlfn(ifp, tracer_ioctl);
if_settransmitfn(ifp, tracer_transmit);
if_setqflushfn(ifp, tracer_qflush);
if_setcapabilities(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU);
ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
tracer_media_status);
ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
sx_xunlock(&t4_trace_lock);
ether_ifattach(ifp, lla);
sx_xlock(&t4_trace_lock);
mtx_lock(&sc->ifp_lock);
if_setsoftc(ifp, sc);
sc->ifp = ifp;
mtx_unlock(&sc->ifp_lock);
rc = 0;
done:
sx_xunlock(&t4_trace_lock);
end_synchronized_op(sc, 0);
return (rc);
}
static int
t4_cloner_destroy(struct if_clone *ifc, if_t ifp)
{
struct adapter *sc;
sx_xlock(&t4_trace_lock);
sc = if_getsoftc(ifp);
if (sc != NULL) {
mtx_lock(&sc->ifp_lock);
sc->ifp = NULL;
if_setsoftc(ifp, NULL);
mtx_unlock(&sc->ifp_lock);
ifmedia_removeall(&sc->media);
}
sx_xunlock(&t4_trace_lock);
ether_ifdetach(ifp);
if_free(ifp);
return (0);
}
void
t4_tracer_modload(void)
{
sx_init(&t4_trace_lock, "T4/T5 tracer lock");
t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
t4_cloner_create, t4_cloner_destroy);
}
void
t4_tracer_modunload(void)
{
if (t4_cloner != NULL) {
/*
* The module is being unloaded so the nexus drivers have
* detached. The tracing interfaces can not outlive the nexus
* (ifp->if_softc is the nexus) and must have been destroyed
* already. XXX: but if_clone is opaque to us and we can't
* assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
*/
if_clone_detach(t4_cloner);
}
sx_destroy(&t4_trace_lock);
}
void
t4_tracer_port_detach(struct adapter *sc)
{
sx_xlock(&t4_trace_lock);
if (sc->ifp != NULL) {
mtx_lock(&sc->ifp_lock);
if_setsoftc(sc->ifp, NULL);
sc->ifp = NULL;
mtx_unlock(&sc->ifp_lock);
}
ifmedia_removeall(&sc->media);
sx_xunlock(&t4_trace_lock);
}
int
t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
{
int rc, i, enabled;
struct trace_params tp;
if (t->idx >= NTRACE) {
t->idx = 0xff;
t->enabled = 0;
t->valid = 0;
return (0);
}
rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4gett");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
for (i = t->idx; i < NTRACE; i++) {
if (isset(&sc->tracer_valid, t->idx)) {
t4_get_trace_filter(sc, &tp, i, &enabled);
t->idx = i;
t->enabled = enabled;
t->valid = 1;
memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
t->tp.snap_len = tp.snap_len;
t->tp.min_len = tp.min_len;
t->tp.skip_ofst = tp.skip_ofst;
t->tp.skip_len = tp.skip_len;
t->tp.invert = tp.invert;
/* convert channel to port iff 0 <= port < 8. */
if (tp.port < 4)
t->tp.port = sc->chan_map[tp.port];
else if (tp.port < 8)
t->tp.port = sc->chan_map[tp.port - 4] + 4;
else
t->tp.port = tp.port;
goto done;
}
}
t->idx = 0xff;
t->enabled = 0;
t->valid = 0;
done:
end_synchronized_op(sc, LOCK_HELD);
return (rc);
}
int
t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
{
int rc;
struct trace_params tp, *tpp;
if (t->idx >= NTRACE)
return (EINVAL);
rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4sett");
if (rc)
return (rc);
if (hw_off_limits(sc)) {
rc = ENXIO;
goto done;
}
/*
* If no tracing filter is specified this time then check if the filter
* at the index is valid anyway because it was set previously. If so
* then this is a legitimate enable/disable operation.
*/
if (t->valid == 0) {
if (isset(&sc->tracer_valid, t->idx))
tpp = NULL;
else
rc = EINVAL;
goto done;
}
if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
t->tp.skip_ofst > M_TFOFFSET) {
rc = EINVAL;
goto done;
}
memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
tp.snap_len = t->tp.snap_len;
tp.min_len = t->tp.min_len;
tp.skip_ofst = t->tp.skip_ofst;
tp.skip_len = t->tp.skip_len;
tp.invert = !!t->tp.invert;
/* convert port to channel iff 0 <= port < 8. */
if (t->tp.port < 4) {
if (sc->port[t->tp.port] == NULL) {
rc = EINVAL;
goto done;
}
tp.port = sc->port[t->tp.port]->tx_chan;
} else if (t->tp.port < 8) {
if (sc->port[t->tp.port - 4] == NULL) {
rc = EINVAL;
goto done;
}
tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
} else
tp.port = t->tp.port;
tpp = &tp;
done:
if (rc == 0) {
rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
if (rc == 0) {
if (t->enabled) {
setbit(&sc->tracer_valid, t->idx);
if (sc->tracer_enabled == 0) {
t4_set_reg_field(sc, A_MPS_TRC_CFG,
F_TRCEN, F_TRCEN);
}
setbit(&sc->tracer_enabled, t->idx);
} else {
clrbit(&sc->tracer_enabled, t->idx);
if (sc->tracer_enabled == 0) {
t4_set_reg_field(sc, A_MPS_TRC_CFG,
F_TRCEN, 0);
}
}
}
}
end_synchronized_op(sc, LOCK_HELD);
return (rc);
}
int
t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
struct adapter *sc = iq->adapter;
if_t ifp;
KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
rss->opcode));
mtx_lock(&sc->ifp_lock);
ifp = sc->ifp;
if (sc->ifp) {
m_adj(m, sizeof(struct cpl_trace_pkt));
m->m_pkthdr.rcvif = ifp;
ETHER_BPF_MTAP(ifp, m);
}
mtx_unlock(&sc->ifp_lock);
m_freem(m);
return (0);
}
int
t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
struct adapter *sc = iq->adapter;
if_t ifp;
KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
rss->opcode));
mtx_lock(&sc->ifp_lock);
ifp = sc->ifp;
if (ifp != NULL) {
m_adj(m, sizeof(struct cpl_t5_trace_pkt));
m->m_pkthdr.rcvif = ifp;
ETHER_BPF_MTAP(ifp, m);
}
mtx_unlock(&sc->ifp_lock);
m_freem(m);
return (0);
}
static void
tracer_init(void *arg)
{
return;
}
static int
tracer_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
{
int rc = 0;
struct adapter *sc;
struct ifreq *ifr = (struct ifreq *)data;
switch (cmd) {
case SIOCSIFMTU:
case SIOCSIFFLAGS:
case SIOCADDMULTI:
case SIOCDELMULTI:
case SIOCSIFCAP:
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
sx_xlock(&t4_trace_lock);
sc = if_getsoftc(ifp);
if (sc == NULL)
rc = EIO;
else
rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
sx_xunlock(&t4_trace_lock);
break;
default:
rc = ether_ioctl(ifp, cmd, data);
}
return (rc);
}
static int
tracer_transmit(if_t ifp, struct mbuf *m)
{
m_freem(m);
return (0);
}
static void
tracer_qflush(if_t ifp)
{
return;
}
static int
tracer_media_change(if_t ifp)
{
return (EOPNOTSUPP);
}
static void
tracer_media_status(if_t ifp, struct ifmediareq *ifmr)
{
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
return;
}
diff --git a/sys/dev/dc/if_dc.c b/sys/dev/dc/if_dc.c
index ddb81d8a486c..809feed7855f 100644
--- a/sys/dev/dc/if_dc.c
+++ b/sys/dev/dc/if_dc.c
@@ -1,4138 +1,4133 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ee.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
* series chips and several workalikes including the following:
*
* Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
* Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
* Lite-On 82c168/82c169 PNIC (www.litecom.com)
* ASIX Electronics AX88140A (www.asix.com.tw)
* ASIX Electronics AX88141 (www.asix.com.tw)
* ADMtek AL981 (www.admtek.com.tw)
* ADMtek AN983 (www.admtek.com.tw)
* ADMtek CardBus AN985 (www.admtek.com.tw)
* Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek CardBus AN985
* Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
* Accton EN1217 (www.accton.com)
* Xircom X3201 (www.xircom.com)
* Abocom FE2500
* Conexant LANfinity (www.conexant.com)
* 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com)
*
* Datasheets for the 21143 are available at developer.intel.com.
* Datasheets for the clone parts can be found at their respective sites.
* (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
* The PNIC II is essentially a Macronix 98715A chip; the only difference
* worth noting is that its multicast hash table is only 128 bits wide
* instead of 512.
*
* Written by Bill Paul <wpaul@ee.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
/*
* The Intel 21143 is the successor to the DEC 21140. It is basically
* the same as the 21140 but with a few new features. The 21143 supports
* three kinds of media attachments:
*
* o MII port, for 10Mbps and 100Mbps support and NWAY
* autonegotiation provided by an external PHY.
* o SYM port, for symbol mode 100Mbps support.
* o 10baseT port.
* o AUI/BNC port.
*
* The 100Mbps SYM port and 10baseT port can be used together in
* combination with the internal NWAY support to create a 10/100
* autosensing configuration.
*
* Note that not all tulip workalikes are handled in this driver: we only
* deal with those which are relatively well behaved. The Winbond is
* handled separately due to its different register offsets and the
* special handling needed for its various bugs. The PNIC is handled
* here, but I'm not thrilled about it.
*
* All of the workalike chips use some form of MII transceiver support
* with the exception of the Macronix chips, which also have a SYM port.
* The ASIX AX88140A is also documented to have a SYM port, but all
* the cards I've seen use an MII transceiver, probably because the
* AX88140A doesn't support internal NWAY.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#define DC_USEIOSPACE
#include <dev/dc/if_dcreg.h>
MODULE_DEPEND(dc, pci, 1, 1, 1);
MODULE_DEPEND(dc, ether, 1, 1, 1);
MODULE_DEPEND(dc, miibus, 1, 1, 1);
/*
* "device miibus" is required in kernel config. See GENERIC if you get
* errors here.
*/
#include "miibus_if.h"
/*
* Various supported device vendors/types and their names.
*/
static const struct dc_type dc_devs[] = {
{ DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0,
"Intel 21143 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0,
"Davicom DM9009 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100), 0,
"Davicom DM9100 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), DC_REVISION_DM9102A,
"Davicom DM9102A 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), 0,
"Davicom DM9102 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981), 0,
"ADMtek AL981 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983), 0,
"ADMtek AN983 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985), 0,
"ADMtek AN985 CardBus 10/100BaseTX or clone" },
{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511), 0,
"ADMtek ADM9511 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513), 0,
"ADMtek ADM9513 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), DC_REVISION_88141,
"ASIX AX88141 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), 0,
"ASIX AX88140A 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), DC_REVISION_98713A,
"Macronix 98713A 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), 0,
"Macronix 98713 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), DC_REVISION_98713A,
"Compex RL100-TX 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), 0,
"Compex RL100-TX 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98725,
"Macronix 98725 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98715AEC_C,
"Macronix 98715AEC-C 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), 0,
"Macronix 98715/98715A 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727), 0,
"Macronix 98727/98732 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115), 0,
"LC82C115 PNIC II 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), DC_REVISION_82C169,
"82c169 PNIC 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), 0,
"82c168 PNIC 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217), 0,
"Accton EN1217 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242), 0,
"Accton EN2242 MiniPCI 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201), 0,
"Xircom X3201 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD), 0,
"Neteasy DRP-32TXD Cardbus 10/100" },
{ DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500), 0,
"Abocom FE2500 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX), 0,
"Abocom FE2500MX 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112), 0,
"Conexant LANfinity MiniPCI 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX), 0,
"Hawking CB102 CardBus 10/100" },
{ DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T), 0,
"PlaneX FNW-3602-T CardBus 10/100" },
{ DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB), 0,
"3Com OfficeConnect 10/100B" },
{ DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120), 0,
"Microsoft MN-120 CardBus 10/100" },
{ DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130), 0,
"Microsoft MN-130 10/100" },
{ DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08), 0,
"Linksys PCMPC200 CardBus 10/100" },
{ DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09), 0,
"Linksys PCMPC200 CardBus 10/100" },
{ DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261), 0,
"ULi M5261 FastEthernet" },
{ DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5263), 0,
"ULi M5263 FastEthernet" },
{ 0, 0, NULL }
};
static int dc_probe(device_t);
static int dc_attach(device_t);
static int dc_detach(device_t);
static int dc_suspend(device_t);
static int dc_resume(device_t);
static const struct dc_type *dc_devtype(device_t);
static void dc_discard_rxbuf(struct dc_softc *, int);
static int dc_newbuf(struct dc_softc *, int);
static int dc_encap(struct dc_softc *, struct mbuf **);
static void dc_pnic_rx_bug_war(struct dc_softc *, int);
static int dc_rx_resync(struct dc_softc *);
static int dc_rxeof(struct dc_softc *);
static void dc_txeof(struct dc_softc *);
static void dc_tick(void *);
static void dc_tx_underrun(struct dc_softc *);
static void dc_intr(void *);
static void dc_start(if_t);
static void dc_start_locked(if_t);
static int dc_ioctl(if_t, u_long, caddr_t);
static void dc_init(void *);
static void dc_init_locked(struct dc_softc *);
static void dc_stop(struct dc_softc *);
static void dc_watchdog(void *);
static int dc_shutdown(device_t);
static int dc_ifmedia_upd(if_t);
static int dc_ifmedia_upd_locked(struct dc_softc *);
static void dc_ifmedia_sts(if_t, struct ifmediareq *);
static int dc_dma_alloc(struct dc_softc *);
static void dc_dma_free(struct dc_softc *);
static void dc_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static void dc_delay(struct dc_softc *);
static void dc_eeprom_idle(struct dc_softc *);
static void dc_eeprom_putbyte(struct dc_softc *, int);
static void dc_eeprom_getword(struct dc_softc *, int, uint16_t *);
static void dc_eeprom_getword_pnic(struct dc_softc *, int, uint16_t *);
static void dc_eeprom_getword_xircom(struct dc_softc *, int, uint16_t *);
static void dc_eeprom_width(struct dc_softc *);
static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
static int dc_miibus_readreg(device_t, int, int);
static int dc_miibus_writereg(device_t, int, int, int);
static void dc_miibus_statchg(device_t);
static void dc_miibus_mediainit(device_t);
static void dc_setcfg(struct dc_softc *, int);
static void dc_netcfg_wait(struct dc_softc *);
static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *);
static uint32_t dc_mchash_be(const uint8_t *);
static void dc_setfilt_21143(struct dc_softc *);
static void dc_setfilt_asix(struct dc_softc *);
static void dc_setfilt_admtek(struct dc_softc *);
static void dc_setfilt_uli(struct dc_softc *);
static void dc_setfilt_xircom(struct dc_softc *);
static void dc_setfilt(struct dc_softc *);
static void dc_reset(struct dc_softc *);
static int dc_list_rx_init(struct dc_softc *);
static int dc_list_tx_init(struct dc_softc *);
static int dc_read_srom(struct dc_softc *, int);
static int dc_parse_21143_srom(struct dc_softc *);
static int dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *);
static int dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *);
static int dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *);
static void dc_apply_fixup(struct dc_softc *, int);
static int dc_check_multiport(struct dc_softc *);
/*
* MII bit-bang glue
*/
static uint32_t dc_mii_bitbang_read(device_t);
static void dc_mii_bitbang_write(device_t, uint32_t);
static const struct mii_bitbang_ops dc_mii_bitbang_ops = {
dc_mii_bitbang_read,
dc_mii_bitbang_write,
{
DC_SIO_MII_DATAOUT, /* MII_BIT_MDO */
DC_SIO_MII_DATAIN, /* MII_BIT_MDI */
DC_SIO_MII_CLK, /* MII_BIT_MDC */
0, /* MII_BIT_DIR_HOST_PHY */
DC_SIO_MII_DIR, /* MII_BIT_DIR_PHY_HOST */
}
};
#ifdef DC_USEIOSPACE
#define DC_RES SYS_RES_IOPORT
#define DC_RID DC_PCI_CFBIO
#else
#define DC_RES SYS_RES_MEMORY
#define DC_RID DC_PCI_CFBMA
#endif
static device_method_t dc_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, dc_probe),
DEVMETHOD(device_attach, dc_attach),
DEVMETHOD(device_detach, dc_detach),
DEVMETHOD(device_suspend, dc_suspend),
DEVMETHOD(device_resume, dc_resume),
DEVMETHOD(device_shutdown, dc_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, dc_miibus_readreg),
DEVMETHOD(miibus_writereg, dc_miibus_writereg),
DEVMETHOD(miibus_statchg, dc_miibus_statchg),
DEVMETHOD(miibus_mediainit, dc_miibus_mediainit),
DEVMETHOD_END
};
static driver_t dc_driver = {
"dc",
dc_methods,
sizeof(struct dc_softc)
};
DRIVER_MODULE_ORDERED(dc, pci, dc_driver, NULL, NULL, SI_ORDER_ANY);
MODULE_PNP_INFO("W32:vendor/device;U8:revision;D:#", pci, dc, dc_devs,
nitems(dc_devs) - 1);
DRIVER_MODULE(miibus, dc, miibus_driver, NULL, NULL);
#define DC_SETBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
#define DC_CLRBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
#define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x))
#define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x))
static void
dc_delay(struct dc_softc *sc)
{
int idx;
for (idx = (300 / 33) + 1; idx > 0; idx--)
CSR_READ_4(sc, DC_BUSCTL);
}
static void
dc_eeprom_width(struct dc_softc *sc)
{
int i;
/* Force EEPROM to idle state. */
dc_eeprom_idle(sc);
/* Enter EEPROM access mode. */
CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
dc_delay(sc);
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
dc_delay(sc);
for (i = 3; i--;) {
if (6 & (1 << i))
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
else
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
}
for (i = 1; i <= 12; i++) {
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
break;
}
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
}
/* Turn off EEPROM access mode. */
dc_eeprom_idle(sc);
if (i < 4 || i > 12)
sc->dc_romwidth = 6;
else
sc->dc_romwidth = i;
/* Enter EEPROM access mode. */
CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
dc_delay(sc);
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
dc_delay(sc);
/* Turn off EEPROM access mode. */
dc_eeprom_idle(sc);
}
static void
dc_eeprom_idle(struct dc_softc *sc)
{
int i;
CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
dc_delay(sc);
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
dc_delay(sc);
for (i = 0; i < 25; i++) {
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
}
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
dc_delay(sc);
CSR_WRITE_4(sc, DC_SIO, 0x00000000);
}
/*
* Send a read command and address to the EEPROM, check for ACK.
*/
static void
dc_eeprom_putbyte(struct dc_softc *sc, int addr)
{
int d, i;
d = DC_EECMD_READ >> 6;
for (i = 3; i--; ) {
if (d & (1 << i))
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
else
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
}
/*
* Feed in each bit and strobe the clock.
*/
for (i = sc->dc_romwidth; i--;) {
if (addr & (1 << i)) {
SIO_SET(DC_SIO_EE_DATAIN);
} else {
SIO_CLR(DC_SIO_EE_DATAIN);
}
dc_delay(sc);
SIO_SET(DC_SIO_EE_CLK);
dc_delay(sc);
SIO_CLR(DC_SIO_EE_CLK);
dc_delay(sc);
}
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
* The PNIC 82c168/82c169 has its own non-standard way to read
* the EEPROM.
*/
static void
dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, uint16_t *dest)
{
int i;
uint32_t r;
CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr);
for (i = 0; i < DC_TIMEOUT; i++) {
DELAY(1);
r = CSR_READ_4(sc, DC_SIO);
if (!(r & DC_PN_SIOCTL_BUSY)) {
*dest = (uint16_t)(r & 0xFFFF);
return;
}
}
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
* The Xircom X3201 has its own non-standard way to read
* the EEPROM, too.
*/
static void
dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, uint16_t *dest)
{
SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
addr *= 2;
CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
*dest = (uint16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
addr += 1;
CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
*dest |= ((uint16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
dc_eeprom_getword(struct dc_softc *sc, int addr, uint16_t *dest)
{
int i;
uint16_t word = 0;
/* Force EEPROM to idle state. */
dc_eeprom_idle(sc);
/* Enter EEPROM access mode. */
CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
dc_delay(sc);
DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
dc_delay(sc);
DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
dc_delay(sc);
/*
* Send address of word we want to read.
*/
dc_eeprom_putbyte(sc, addr);
/*
* Start reading bits from EEPROM.
*/
for (i = 0x8000; i; i >>= 1) {
SIO_SET(DC_SIO_EE_CLK);
dc_delay(sc);
if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
word |= i;
dc_delay(sc);
SIO_CLR(DC_SIO_EE_CLK);
dc_delay(sc);
}
/* Turn off EEPROM access mode. */
dc_eeprom_idle(sc);
*dest = word;
}
/*
* Read a sequence of words from the EEPROM.
*/
static void
dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be)
{
int i;
uint16_t word = 0, *ptr;
for (i = 0; i < cnt; i++) {
if (DC_IS_PNIC(sc))
dc_eeprom_getword_pnic(sc, off + i, &word);
else if (DC_IS_XIRCOM(sc))
dc_eeprom_getword_xircom(sc, off + i, &word);
else
dc_eeprom_getword(sc, off + i, &word);
ptr = (uint16_t *)(dest + (i * 2));
if (be)
*ptr = be16toh(word);
else
*ptr = le16toh(word);
}
}
/*
* Write the MII serial port for the MII bit-bang module.
*/
static void
dc_mii_bitbang_write(device_t dev, uint32_t val)
{
struct dc_softc *sc;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, DC_SIO, val);
CSR_BARRIER_4(sc, DC_SIO,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/*
* Read the MII serial port for the MII bit-bang module.
*/
static uint32_t
dc_mii_bitbang_read(device_t dev)
{
struct dc_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
val = CSR_READ_4(sc, DC_SIO);
CSR_BARRIER_4(sc, DC_SIO,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
static int
dc_miibus_readreg(device_t dev, int phy, int reg)
{
struct dc_softc *sc;
int i, rval, phy_reg = 0;
sc = device_get_softc(dev);
if (sc->dc_pmode != DC_PMODE_MII) {
if (phy == (MII_NPHY - 1)) {
switch (reg) {
case MII_BMSR:
/*
* Fake something to make the probe
* code think there's a PHY here.
*/
return (BMSR_MEDIAMASK);
case MII_PHYIDR1:
if (DC_IS_PNIC(sc))
return (DC_VENDORID_LO);
return (DC_VENDORID_DEC);
case MII_PHYIDR2:
if (DC_IS_PNIC(sc))
return (DC_DEVICEID_82C168);
return (DC_DEVICEID_21143);
default:
return (0);
}
} else
return (0);
}
if (DC_IS_PNIC(sc)) {
CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
(phy << 23) | (reg << 18));
for (i = 0; i < DC_TIMEOUT; i++) {
DELAY(1);
rval = CSR_READ_4(sc, DC_PN_MII);
if (!(rval & DC_PN_MII_BUSY)) {
rval &= 0xFFFF;
return (rval == 0xFFFF ? 0 : rval);
}
}
return (0);
}
if (sc->dc_type == DC_TYPE_ULI_M5263) {
CSR_WRITE_4(sc, DC_ROM,
((phy << DC_ULI_PHY_ADDR_SHIFT) & DC_ULI_PHY_ADDR_MASK) |
((reg << DC_ULI_PHY_REG_SHIFT) & DC_ULI_PHY_REG_MASK) |
DC_ULI_PHY_OP_READ);
for (i = 0; i < DC_TIMEOUT; i++) {
DELAY(1);
rval = CSR_READ_4(sc, DC_ROM);
if ((rval & DC_ULI_PHY_OP_DONE) != 0) {
return (rval & DC_ULI_PHY_DATA_MASK);
}
}
if (i == DC_TIMEOUT)
device_printf(dev, "phy read timed out\n");
return (0);
}
if (DC_IS_COMET(sc)) {
switch (reg) {
case MII_BMCR:
phy_reg = DC_AL_BMCR;
break;
case MII_BMSR:
phy_reg = DC_AL_BMSR;
break;
case MII_PHYIDR1:
phy_reg = DC_AL_VENID;
break;
case MII_PHYIDR2:
phy_reg = DC_AL_DEVID;
break;
case MII_ANAR:
phy_reg = DC_AL_ANAR;
break;
case MII_ANLPAR:
phy_reg = DC_AL_LPAR;
break;
case MII_ANER:
phy_reg = DC_AL_ANER;
break;
default:
device_printf(dev, "phy_read: bad phy register %x\n",
reg);
return (0);
}
rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
if (rval == 0xFFFF)
return (0);
return (rval);
}
if (sc->dc_type == DC_TYPE_98713) {
phy_reg = CSR_READ_4(sc, DC_NETCFG);
CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
}
rval = mii_bitbang_readreg(dev, &dc_mii_bitbang_ops, phy, reg);
if (sc->dc_type == DC_TYPE_98713)
CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
return (rval);
}
static int
dc_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct dc_softc *sc;
int i, phy_reg = 0;
sc = device_get_softc(dev);
if (DC_IS_PNIC(sc)) {
CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
(phy << 23) | (reg << 10) | data);
for (i = 0; i < DC_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
break;
}
return (0);
}
if (sc->dc_type == DC_TYPE_ULI_M5263) {
CSR_WRITE_4(sc, DC_ROM,
((phy << DC_ULI_PHY_ADDR_SHIFT) & DC_ULI_PHY_ADDR_MASK) |
((reg << DC_ULI_PHY_REG_SHIFT) & DC_ULI_PHY_REG_MASK) |
((data << DC_ULI_PHY_DATA_SHIFT) & DC_ULI_PHY_DATA_MASK) |
DC_ULI_PHY_OP_WRITE);
DELAY(1);
return (0);
}
if (DC_IS_COMET(sc)) {
switch (reg) {
case MII_BMCR:
phy_reg = DC_AL_BMCR;
break;
case MII_BMSR:
phy_reg = DC_AL_BMSR;
break;
case MII_PHYIDR1:
phy_reg = DC_AL_VENID;
break;
case MII_PHYIDR2:
phy_reg = DC_AL_DEVID;
break;
case MII_ANAR:
phy_reg = DC_AL_ANAR;
break;
case MII_ANLPAR:
phy_reg = DC_AL_LPAR;
break;
case MII_ANER:
phy_reg = DC_AL_ANER;
break;
default:
device_printf(dev, "phy_write: bad phy register %x\n",
reg);
return (0);
break;
}
CSR_WRITE_4(sc, phy_reg, data);
return (0);
}
if (sc->dc_type == DC_TYPE_98713) {
phy_reg = CSR_READ_4(sc, DC_NETCFG);
CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
}
mii_bitbang_writereg(dev, &dc_mii_bitbang_ops, phy, reg, data);
if (sc->dc_type == DC_TYPE_98713)
CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
return (0);
}
static void
dc_miibus_statchg(device_t dev)
{
struct dc_softc *sc;
if_t ifp;
struct mii_data *mii;
struct ifmedia *ifm;
sc = device_get_softc(dev);
mii = device_get_softc(sc->dc_miibus);
ifp = sc->dc_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
ifm = &mii->mii_media;
if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
dc_setcfg(sc, ifm->ifm_media);
return;
} else if (!DC_IS_ADMTEK(sc))
dc_setcfg(sc, mii->mii_media_active);
sc->dc_link = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->dc_link = 1;
break;
}
}
}
/*
* Special support for DM9102A cards with HomePNA PHYs. Note:
* with the Davicom DM9102A/DM9801 eval board that I have, it seems
* to be impossible to talk to the management interface of the DM9801
* PHY (its MDIO pin is not connected to anything). Consequently,
* the driver has to just 'know' about the additional mode and deal
* with it itself. *sigh*
*/
static void
dc_miibus_mediainit(device_t dev)
{
struct dc_softc *sc;
struct mii_data *mii;
struct ifmedia *ifm;
int rev;
rev = pci_get_revid(dev);
sc = device_get_softc(dev);
mii = device_get_softc(sc->dc_miibus);
ifm = &mii->mii_media;
if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A)
ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL);
}
#define DC_BITS_512 9
#define DC_BITS_128 7
#define DC_BITS_64 6
static uint32_t
dc_mchash_le(struct dc_softc *sc, const uint8_t *addr)
{
uint32_t crc;
/* Compute CRC for the address value. */
crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
/*
* The hash table on the PNIC II and the MX98715AEC-C/D/E
* chips is only 128 bits wide.
*/
if (sc->dc_flags & DC_128BIT_HASH)
return (crc & ((1 << DC_BITS_128) - 1));
/* The hash table on the MX98715BEC is only 64 bits wide. */
if (sc->dc_flags & DC_64BIT_HASH)
return (crc & ((1 << DC_BITS_64) - 1));
/* Xircom's hash filtering table is different (read: weird) */
/* Xircom uses the LEAST significant bits */
if (DC_IS_XIRCOM(sc)) {
if ((crc & 0x180) == 0x180)
return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4));
else
return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 +
(12 << 4));
}
return (crc & ((1 << DC_BITS_512) - 1));
}
/*
* Calculate CRC of a multicast group address, return the lower 6 bits.
*/
static uint32_t
dc_mchash_be(const uint8_t *addr)
{
uint32_t crc;
/* Compute CRC for the address value. */
crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
/* Return the filter bit position. */
return ((crc >> 26) & 0x0000003F);
}
/*
* 21143-style RX filter setup routine. Filter programming is done by
* downloading a special setup frame into the TX engine. 21143, Macronix,
* PNIC, PNIC II and Davicom chips are programmed this way.
*
* We always program the chip using 'hash perfect' mode, i.e. one perfect
* address (our node address) and a 512-bit hash filter for multicast
* frames. We also sneak the broadcast address into the hash filter since
* we need that too.
*/
static u_int
dc_hash_maddr_21143(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct dc_softc *sc = arg;
uint32_t h;
h = dc_mchash_le(sc, LLADDR(sdl));
sc->dc_cdata.dc_sbuf[h >> 4] |= htole32(1 << (h & 0xF));
return (1);
}
static void
dc_setfilt_21143(struct dc_softc *sc)
{
uint16_t eaddr[(ETHER_ADDR_LEN+1)/2];
struct dc_desc *sframe;
uint32_t h, *sp;
if_t ifp;
int i;
ifp = sc->dc_ifp;
i = sc->dc_cdata.dc_tx_prod;
DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
sc->dc_cdata.dc_tx_cnt++;
sframe = &sc->dc_ldata.dc_tx_list[i];
sp = sc->dc_cdata.dc_sbuf;
bzero(sp, DC_SFRAME_LEN);
sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr));
sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & IFF_PROMISC)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
if (if_getflags(ifp) & IFF_ALLMULTI)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
if_foreach_llmaddr(ifp, dc_hash_maddr_21143, sp);
if (if_getflags(ifp) & IFF_BROADCAST) {
h = dc_mchash_le(sc, if_getbroadcastaddr(ifp));
sp[h >> 4] |= htole32(1 << (h & 0xF));
}
/* Set our MAC address. */
bcopy(if_getlladdr(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
sp[39] = DC_SP_MAC(eaddr[0]);
sp[40] = DC_SP_MAC(eaddr[1]);
sp[41] = DC_SP_MAC(eaddr[2]);
sframe->dc_status = htole32(DC_TXSTAT_OWN);
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
/*
* The PNIC takes an exceedingly long time to process its
* setup frame; wait 10ms after posting the setup frame
* before proceeding, just so it has time to swallow its
* medicine.
*/
DELAY(10000);
sc->dc_wdog_timer = 5;
}
static u_int
dc_hash_maddr_admtek_be(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *hashes = arg;
int h = 0;
h = dc_mchash_be(LLADDR(sdl));
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
struct dc_hash_maddr_admtek_le_ctx {
struct dc_softc *sc;
uint32_t hashes[2];
};
static u_int
dc_hash_maddr_admtek_le(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct dc_hash_maddr_admtek_le_ctx *ctx = arg;
int h = 0;
h = dc_mchash_le(ctx->sc, LLADDR(sdl));
if (h < 32)
ctx->hashes[0] |= (1 << h);
else
ctx->hashes[1] |= (1 << (h - 32));
return (1);
}
static void
dc_setfilt_admtek(struct dc_softc *sc)
{
uint8_t eaddr[ETHER_ADDR_LEN];
if_t ifp;
struct dc_hash_maddr_admtek_le_ctx ctx = { sc, { 0, 0 }};
ifp = sc->dc_ifp;
/* Init our MAC address. */
bcopy(if_getlladdr(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
CSR_WRITE_4(sc, DC_AL_PAR0, eaddr[3] << 24 | eaddr[2] << 16 |
eaddr[1] << 8 | eaddr[0]);
CSR_WRITE_4(sc, DC_AL_PAR1, eaddr[5] << 8 | eaddr[4]);
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & IFF_PROMISC)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
if (if_getflags(ifp) & IFF_ALLMULTI)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
/* First, zot all the existing hash bits. */
CSR_WRITE_4(sc, DC_AL_MAR0, 0);
CSR_WRITE_4(sc, DC_AL_MAR1, 0);
/*
* If we're already in promisc or allmulti mode, we
* don't have to bother programming the multicast filter.
*/
if (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI))
return;
/* Now program new ones. */
if (DC_IS_CENTAUR(sc))
if_foreach_llmaddr(ifp, dc_hash_maddr_admtek_le, &ctx);
else
if_foreach_llmaddr(ifp, dc_hash_maddr_admtek_be, &ctx.hashes);
CSR_WRITE_4(sc, DC_AL_MAR0, ctx.hashes[0]);
CSR_WRITE_4(sc, DC_AL_MAR1, ctx.hashes[1]);
}
static void
dc_setfilt_asix(struct dc_softc *sc)
{
uint32_t eaddr[(ETHER_ADDR_LEN+3)/4];
if_t ifp;
uint32_t hashes[2] = { 0, 0 };
ifp = sc->dc_ifp;
/* Init our MAC address. */
bcopy(if_getlladdr(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[0]);
CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[1]);
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & IFF_PROMISC)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
if (if_getflags(ifp) & IFF_ALLMULTI)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
/*
* The ASIX chip has a special bit to enable reception
* of broadcast frames.
*/
if (if_getflags(ifp) & IFF_BROADCAST)
DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
else
DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
/* first, zot all the existing hash bits */
CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
/*
* If we're already in promisc or allmulti mode, we
* don't have to bother programming the multicast filter.
*/
if (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI))
return;
/* now program new ones */
if_foreach_llmaddr(ifp, dc_hash_maddr_admtek_be, hashes);
CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
}
static u_int
dc_hash_maddr_uli(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
uint32_t **sp = arg;
uint8_t *ma;
if (mcnt == DC_ULI_FILTER_NPERF)
return (0);
ma = LLADDR(sdl);
*(*sp)++ = DC_SP_MAC(ma[1] << 8 | ma[0]);
*(*sp)++ = DC_SP_MAC(ma[3] << 8 | ma[2]);
*(*sp)++ = DC_SP_MAC(ma[5] << 8 | ma[4]);
return (1);
}
static void
dc_setfilt_uli(struct dc_softc *sc)
{
uint8_t eaddr[ETHER_ADDR_LEN];
if_t ifp;
struct dc_desc *sframe;
uint32_t filter, *sp;
int i, mcnt;
ifp = sc->dc_ifp;
i = sc->dc_cdata.dc_tx_prod;
DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
sc->dc_cdata.dc_tx_cnt++;
sframe = &sc->dc_ldata.dc_tx_list[i];
sp = sc->dc_cdata.dc_sbuf;
bzero(sp, DC_SFRAME_LEN);
sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr));
sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
DC_TXCTL_TLINK | DC_FILTER_PERFECT | DC_TXCTL_FINT);
sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
/* Set station address. */
bcopy(if_getlladdr(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
*sp++ = DC_SP_MAC(eaddr[1] << 8 | eaddr[0]);
*sp++ = DC_SP_MAC(eaddr[3] << 8 | eaddr[2]);
*sp++ = DC_SP_MAC(eaddr[5] << 8 | eaddr[4]);
/* Set broadcast address. */
*sp++ = DC_SP_MAC(0xFFFF);
*sp++ = DC_SP_MAC(0xFFFF);
*sp++ = DC_SP_MAC(0xFFFF);
/* Extract current filter configuration. */
filter = CSR_READ_4(sc, DC_NETCFG);
filter &= ~(DC_NETCFG_RX_PROMISC | DC_NETCFG_RX_ALLMULTI);
/* Now build perfect filters. */
mcnt = if_foreach_llmaddr(ifp, dc_hash_maddr_uli, &sp);
if (mcnt == DC_ULI_FILTER_NPERF)
filter |= DC_NETCFG_RX_ALLMULTI;
else
for (; mcnt < DC_ULI_FILTER_NPERF; mcnt++) {
*sp++ = DC_SP_MAC(0xFFFF);
*sp++ = DC_SP_MAC(0xFFFF);
*sp++ = DC_SP_MAC(0xFFFF);
}
if (filter & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON))
CSR_WRITE_4(sc, DC_NETCFG,
filter & ~(DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
if (if_getflags(ifp) & IFF_PROMISC)
filter |= DC_NETCFG_RX_PROMISC | DC_NETCFG_RX_ALLMULTI;
if (if_getflags(ifp) & IFF_ALLMULTI)
filter |= DC_NETCFG_RX_ALLMULTI;
CSR_WRITE_4(sc, DC_NETCFG,
filter & ~(DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
if (filter & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON))
CSR_WRITE_4(sc, DC_NETCFG, filter);
sframe->dc_status = htole32(DC_TXSTAT_OWN);
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
/*
* Wait some time...
*/
DELAY(1000);
sc->dc_wdog_timer = 5;
}
static u_int
dc_hash_maddr_xircom(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct dc_softc *sc = arg;
uint32_t h;
h = dc_mchash_le(sc, LLADDR(sdl));
sc->dc_cdata.dc_sbuf[h >> 4] |= htole32(1 << (h & 0xF));
return (1);
}
static void
dc_setfilt_xircom(struct dc_softc *sc)
{
uint16_t eaddr[(ETHER_ADDR_LEN+1)/2];
if_t ifp;
struct dc_desc *sframe;
uint32_t h, *sp;
int i;
ifp = sc->dc_ifp;
DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
i = sc->dc_cdata.dc_tx_prod;
DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
sc->dc_cdata.dc_tx_cnt++;
sframe = &sc->dc_ldata.dc_tx_list[i];
sp = sc->dc_cdata.dc_sbuf;
bzero(sp, DC_SFRAME_LEN);
sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr));
sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & IFF_PROMISC)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
if (if_getflags(ifp) & IFF_ALLMULTI)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
else
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
if_foreach_llmaddr(ifp, dc_hash_maddr_xircom, &sp);
if (if_getflags(ifp) & IFF_BROADCAST) {
h = dc_mchash_le(sc, if_getbroadcastaddr(ifp));
sp[h >> 4] |= htole32(1 << (h & 0xF));
}
/* Set our MAC address. */
bcopy(if_getlladdr(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
sp[0] = DC_SP_MAC(eaddr[0]);
sp[1] = DC_SP_MAC(eaddr[1]);
sp[2] = DC_SP_MAC(eaddr[2]);
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
sframe->dc_status = htole32(DC_TXSTAT_OWN);
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
/*
* Wait some time...
*/
DELAY(1000);
sc->dc_wdog_timer = 5;
}
static void
dc_setfilt(struct dc_softc *sc)
{
if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
dc_setfilt_21143(sc);
if (DC_IS_ASIX(sc))
dc_setfilt_asix(sc);
if (DC_IS_ADMTEK(sc))
dc_setfilt_admtek(sc);
if (DC_IS_ULI(sc))
dc_setfilt_uli(sc);
if (DC_IS_XIRCOM(sc))
dc_setfilt_xircom(sc);
}
static void
dc_netcfg_wait(struct dc_softc *sc)
{
uint32_t isr;
int i;
for (i = 0; i < DC_TIMEOUT; i++) {
isr = CSR_READ_4(sc, DC_ISR);
if (isr & DC_ISR_TX_IDLE &&
((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
(isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
break;
DELAY(10);
}
if (i == DC_TIMEOUT && bus_child_present(sc->dc_dev)) {
if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
device_printf(sc->dc_dev,
"%s: failed to force tx to idle state\n", __func__);
if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
(isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
!DC_HAS_BROKEN_RXSTATE(sc))
device_printf(sc->dc_dev,
"%s: failed to force rx to idle state\n", __func__);
}
}
/*
* In order to fiddle with the 'full-duplex' and '100Mbps' bits in
* the netconfig register, we first have to put the transmit and/or
* receive logic in the idle state.
*/
static void
dc_setcfg(struct dc_softc *sc, int media)
{
int restart = 0, watchdogreg;
if (IFM_SUBTYPE(media) == IFM_NONE)
return;
if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) {
restart = 1;
DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
dc_netcfg_wait(sc);
}
if (IFM_SUBTYPE(media) == IFM_100_TX) {
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
if (sc->dc_pmode == DC_PMODE_MII) {
if (DC_IS_INTEL(sc)) {
/* There's a write enable bit here that reads as 1. */
watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
watchdogreg &= ~DC_WDOG_CTLWREN;
watchdogreg |= DC_WDOG_JABBERDIS;
CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
} else {
DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
}
DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
if (sc->dc_type == DC_TYPE_98713)
DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
DC_NETCFG_SCRAMBLER));
if (!DC_IS_DAVICOM(sc))
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
} else {
if (DC_IS_PNIC(sc)) {
DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
}
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
}
}
if (IFM_SUBTYPE(media) == IFM_10_T) {
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
if (sc->dc_pmode == DC_PMODE_MII) {
/* There's a write enable bit here that reads as 1. */
if (DC_IS_INTEL(sc)) {
watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
watchdogreg &= ~DC_WDOG_CTLWREN;
watchdogreg |= DC_WDOG_JABBERDIS;
CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
} else {
DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
}
DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
if (sc->dc_type == DC_TYPE_98713)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
if (!DC_IS_DAVICOM(sc))
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
} else {
if (DC_IS_PNIC(sc)) {
DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
}
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
if (DC_IS_INTEL(sc)) {
DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
if ((media & IFM_GMASK) == IFM_FDX)
DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
else
DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
DC_CLRBIT(sc, DC_10BTCTRL,
DC_TCTL_AUTONEGENBL);
DELAY(20000);
}
}
}
/*
* If this is a Davicom DM9102A card with a DM9801 HomePNA
* PHY and we want HomePNA mode, set the portsel bit to turn
* on the external MII port.
*/
if (DC_IS_DAVICOM(sc)) {
if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
sc->dc_link = 1;
} else {
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
}
}
if ((media & IFM_GMASK) == IFM_FDX) {
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
} else {
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
}
if (restart)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON);
}
static void
dc_reset(struct dc_softc *sc)
{
int i;
DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
for (i = 0; i < DC_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
break;
}
if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) ||
DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc) || DC_IS_ULI(sc)) {
DELAY(10000);
DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
i = 0;
}
if (i == DC_TIMEOUT)
device_printf(sc->dc_dev, "reset never completed!\n");
/* Wait a little while for the chip to get its brains in order. */
DELAY(1000);
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
/*
* Bring the SIA out of reset. In some cases, it looks
* like failing to unreset the SIA soon enough gets it
* into a state where it will never come out of reset
* until we reset the whole chip again.
*/
if (DC_IS_INTEL(sc)) {
DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
CSR_WRITE_4(sc, DC_10BTCTRL, 0xFFFFFFFF);
CSR_WRITE_4(sc, DC_WATCHDOG, 0);
}
}
static const struct dc_type *
dc_devtype(device_t dev)
{
const struct dc_type *t;
uint32_t devid;
uint8_t rev;
t = dc_devs;
devid = pci_get_devid(dev);
rev = pci_get_revid(dev);
while (t->dc_name != NULL) {
if (devid == t->dc_devid && rev >= t->dc_minrev)
return (t);
t++;
}
return (NULL);
}
/*
* Probe for a 21143 or clone chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
* We do a little bit of extra work to identify the exact type of
* chip. The MX98713 and MX98713A have the same PCI vendor/device ID,
* but different revision IDs. The same is true for 98715/98715A
* chips and the 98725, as well as the ASIX and ADMtek chips. In some
* cases, the exact chip revision affects driver behavior.
*/
static int
dc_probe(device_t dev)
{
const struct dc_type *t;
t = dc_devtype(dev);
if (t != NULL) {
device_set_desc(dev, t->dc_name);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static void
dc_apply_fixup(struct dc_softc *sc, int media)
{
struct dc_mediainfo *m;
uint8_t *p;
int i;
uint32_t reg;
m = sc->dc_mi;
while (m != NULL) {
if (m->dc_media == media)
break;
m = m->dc_next;
}
if (m == NULL)
return;
for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
reg = (p[0] | (p[1] << 8)) << 16;
CSR_WRITE_4(sc, DC_WATCHDOG, reg);
}
for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
reg = (p[0] | (p[1] << 8)) << 16;
CSR_WRITE_4(sc, DC_WATCHDOG, reg);
}
}
static int
dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
{
struct dc_mediainfo *m;
m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
if (m == NULL) {
device_printf(sc->dc_dev, "Could not allocate mediainfo\n");
return (ENOMEM);
}
switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
case DC_SIA_CODE_10BT:
m->dc_media = IFM_10_T;
break;
case DC_SIA_CODE_10BT_FDX:
m->dc_media = IFM_10_T | IFM_FDX;
break;
case DC_SIA_CODE_10B2:
m->dc_media = IFM_10_2;
break;
case DC_SIA_CODE_10B5:
m->dc_media = IFM_10_5;
break;
default:
break;
}
/*
* We need to ignore CSR13, CSR14, CSR15 for SIA mode.
* Things apparently already work for cards that do
* supply Media Specific Data.
*/
if (l->dc_sia_code & DC_SIA_CODE_EXT) {
m->dc_gp_len = 2;
m->dc_gp_ptr =
(uint8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
} else {
m->dc_gp_len = 2;
m->dc_gp_ptr =
(uint8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
}
m->dc_next = sc->dc_mi;
sc->dc_mi = m;
sc->dc_pmode = DC_PMODE_SIA;
return (0);
}
static int
dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
{
struct dc_mediainfo *m;
m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
if (m == NULL) {
device_printf(sc->dc_dev, "Could not allocate mediainfo\n");
return (ENOMEM);
}
if (l->dc_sym_code == DC_SYM_CODE_100BT)
m->dc_media = IFM_100_TX;
if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
m->dc_media = IFM_100_TX | IFM_FDX;
m->dc_gp_len = 2;
m->dc_gp_ptr = (uint8_t *)&l->dc_sym_gpio_ctl;
m->dc_next = sc->dc_mi;
sc->dc_mi = m;
sc->dc_pmode = DC_PMODE_SYM;
return (0);
}
static int
dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
{
struct dc_mediainfo *m;
uint8_t *p;
m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
if (m == NULL) {
device_printf(sc->dc_dev, "Could not allocate mediainfo\n");
return (ENOMEM);
}
/* We abuse IFM_AUTO to represent MII. */
m->dc_media = IFM_AUTO;
m->dc_gp_len = l->dc_gpr_len;
p = (uint8_t *)l;
p += sizeof(struct dc_eblock_mii);
m->dc_gp_ptr = p;
p += 2 * l->dc_gpr_len;
m->dc_reset_len = *p;
p++;
m->dc_reset_ptr = p;
m->dc_next = sc->dc_mi;
sc->dc_mi = m;
return (0);
}
static int
dc_read_srom(struct dc_softc *sc, int bits)
{
int size;
size = DC_ROM_SIZE(bits);
sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->dc_srom == NULL) {
device_printf(sc->dc_dev, "Could not allocate SROM buffer\n");
return (ENOMEM);
}
dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
return (0);
}
static int
dc_parse_21143_srom(struct dc_softc *sc)
{
struct dc_leaf_hdr *lhdr;
struct dc_eblock_hdr *hdr;
int error, have_mii, i, loff;
char *ptr;
have_mii = 0;
loff = sc->dc_srom[27];
lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
ptr = (char *)lhdr;
ptr += sizeof(struct dc_leaf_hdr) - 1;
/*
* Look if we got a MII media block.
*/
for (i = 0; i < lhdr->dc_mcnt; i++) {
hdr = (struct dc_eblock_hdr *)ptr;
if (hdr->dc_type == DC_EBLOCK_MII)
have_mii++;
ptr += (hdr->dc_len & 0x7F);
ptr++;
}
/*
* Do the same thing again. Only use SIA and SYM media
* blocks if no MII media block is available.
*/
ptr = (char *)lhdr;
ptr += sizeof(struct dc_leaf_hdr) - 1;
error = 0;
for (i = 0; i < lhdr->dc_mcnt; i++) {
hdr = (struct dc_eblock_hdr *)ptr;
switch (hdr->dc_type) {
case DC_EBLOCK_MII:
error = dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
break;
case DC_EBLOCK_SIA:
if (! have_mii)
error = dc_decode_leaf_sia(sc,
(struct dc_eblock_sia *)hdr);
break;
case DC_EBLOCK_SYM:
if (! have_mii)
error = dc_decode_leaf_sym(sc,
(struct dc_eblock_sym *)hdr);
break;
default:
/* Don't care. Yet. */
break;
}
ptr += (hdr->dc_len & 0x7F);
ptr++;
}
return (error);
}
static void
dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *paddr;
KASSERT(nseg == 1,
("%s: wrong number of segments (%d)", __func__, nseg));
paddr = arg;
*paddr = segs->ds_addr;
}
static int
dc_dma_alloc(struct dc_softc *sc)
{
int error, i;
error = bus_dma_tag_create(bus_get_dma_tag(sc->dc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
NULL, NULL, &sc->dc_ptag);
if (error) {
device_printf(sc->dc_dev,
"failed to allocate parent DMA tag\n");
goto fail;
}
/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DC_RX_LIST_SZ, 1,
DC_RX_LIST_SZ, 0, NULL, NULL, &sc->dc_rx_ltag);
if (error) {
device_printf(sc->dc_dev, "failed to create RX list DMA tag\n");
goto fail;
}
error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DC_TX_LIST_SZ, 1,
DC_TX_LIST_SZ, 0, NULL, NULL, &sc->dc_tx_ltag);
if (error) {
device_printf(sc->dc_dev, "failed to create TX list DMA tag\n");
goto fail;
}
/* RX descriptor list. */
error = bus_dmamem_alloc(sc->dc_rx_ltag,
(void **)&sc->dc_ldata.dc_rx_list, BUS_DMA_NOWAIT |
BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->dc_rx_lmap);
if (error) {
device_printf(sc->dc_dev,
"failed to allocate DMA'able memory for RX list\n");
goto fail;
}
error = bus_dmamap_load(sc->dc_rx_ltag, sc->dc_rx_lmap,
sc->dc_ldata.dc_rx_list, DC_RX_LIST_SZ, dc_dma_map_addr,
&sc->dc_ldata.dc_rx_list_paddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->dc_dev,
"failed to load DMA'able memory for RX list\n");
goto fail;
}
/* TX descriptor list. */
error = bus_dmamem_alloc(sc->dc_tx_ltag,
(void **)&sc->dc_ldata.dc_tx_list, BUS_DMA_NOWAIT |
BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->dc_tx_lmap);
if (error) {
device_printf(sc->dc_dev,
"failed to allocate DMA'able memory for TX list\n");
goto fail;
}
error = bus_dmamap_load(sc->dc_tx_ltag, sc->dc_tx_lmap,
sc->dc_ldata.dc_tx_list, DC_TX_LIST_SZ, dc_dma_map_addr,
&sc->dc_ldata.dc_tx_list_paddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->dc_dev,
"cannot load DMA'able memory for TX list\n");
goto fail;
}
/*
* Allocate a busdma tag and DMA safe memory for the multicast
* setup frame.
*/
error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN,
0, NULL, NULL, &sc->dc_stag);
if (error) {
device_printf(sc->dc_dev,
"failed to create DMA tag for setup frame\n");
goto fail;
}
error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf,
BUS_DMA_NOWAIT, &sc->dc_smap);
if (error) {
device_printf(sc->dc_dev,
"failed to allocate DMA'able memory for setup frame\n");
goto fail;
}
error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf,
DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->dc_dev,
"cannot load DMA'able memory for setup frame\n");
goto fail;
}
/* Allocate a busdma tag for RX mbufs. */
error = bus_dma_tag_create(sc->dc_ptag, DC_RXBUF_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->dc_rx_mtag);
if (error) {
device_printf(sc->dc_dev, "failed to create RX mbuf tag\n");
goto fail;
}
/* Allocate a busdma tag for TX mbufs. */
error = bus_dma_tag_create(sc->dc_ptag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * DC_MAXFRAGS, DC_MAXFRAGS, MCLBYTES,
0, NULL, NULL, &sc->dc_tx_mtag);
if (error) {
device_printf(sc->dc_dev, "failed to create TX mbuf tag\n");
goto fail;
}
/* Create the TX/RX busdma maps. */
for (i = 0; i < DC_TX_LIST_CNT; i++) {
error = bus_dmamap_create(sc->dc_tx_mtag, 0,
&sc->dc_cdata.dc_tx_map[i]);
if (error) {
device_printf(sc->dc_dev,
"failed to create TX mbuf dmamap\n");
goto fail;
}
}
for (i = 0; i < DC_RX_LIST_CNT; i++) {
error = bus_dmamap_create(sc->dc_rx_mtag, 0,
&sc->dc_cdata.dc_rx_map[i]);
if (error) {
device_printf(sc->dc_dev,
"failed to create RX mbuf dmamap\n");
goto fail;
}
}
error = bus_dmamap_create(sc->dc_rx_mtag, 0, &sc->dc_sparemap);
if (error) {
device_printf(sc->dc_dev,
"failed to create spare RX mbuf dmamap\n");
goto fail;
}
fail:
return (error);
}
static void
dc_dma_free(struct dc_softc *sc)
{
int i;
/* RX buffers. */
if (sc->dc_rx_mtag != NULL) {
for (i = 0; i < DC_RX_LIST_CNT; i++) {
if (sc->dc_cdata.dc_rx_map[i] != NULL)
bus_dmamap_destroy(sc->dc_rx_mtag,
sc->dc_cdata.dc_rx_map[i]);
}
if (sc->dc_sparemap != NULL)
bus_dmamap_destroy(sc->dc_rx_mtag, sc->dc_sparemap);
bus_dma_tag_destroy(sc->dc_rx_mtag);
}
/* TX buffers. */
if (sc->dc_rx_mtag != NULL) {
for (i = 0; i < DC_TX_LIST_CNT; i++) {
if (sc->dc_cdata.dc_tx_map[i] != NULL)
bus_dmamap_destroy(sc->dc_tx_mtag,
sc->dc_cdata.dc_tx_map[i]);
}
bus_dma_tag_destroy(sc->dc_tx_mtag);
}
/* RX descriptor list. */
if (sc->dc_rx_ltag) {
if (sc->dc_ldata.dc_rx_list_paddr != 0)
bus_dmamap_unload(sc->dc_rx_ltag, sc->dc_rx_lmap);
if (sc->dc_ldata.dc_rx_list != NULL)
bus_dmamem_free(sc->dc_rx_ltag, sc->dc_ldata.dc_rx_list,
sc->dc_rx_lmap);
bus_dma_tag_destroy(sc->dc_rx_ltag);
}
/* TX descriptor list. */
if (sc->dc_tx_ltag) {
if (sc->dc_ldata.dc_tx_list_paddr != 0)
bus_dmamap_unload(sc->dc_tx_ltag, sc->dc_tx_lmap);
if (sc->dc_ldata.dc_tx_list != NULL)
bus_dmamem_free(sc->dc_tx_ltag, sc->dc_ldata.dc_tx_list,
sc->dc_tx_lmap);
bus_dma_tag_destroy(sc->dc_tx_ltag);
}
/* multicast setup frame. */
if (sc->dc_stag) {
if (sc->dc_saddr != 0)
bus_dmamap_unload(sc->dc_stag, sc->dc_smap);
if (sc->dc_cdata.dc_sbuf != NULL)
bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf,
sc->dc_smap);
bus_dma_tag_destroy(sc->dc_stag);
}
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
dc_attach(device_t dev)
{
uint32_t eaddr[(ETHER_ADDR_LEN+3)/4];
uint32_t command;
struct dc_softc *sc;
if_t ifp;
struct dc_mediainfo *m;
uint32_t reg, revision;
uint16_t *srom;
int error, mac_offset, n, phy, rid, tmp;
uint8_t *mac;
sc = device_get_softc(dev);
sc->dc_dev = dev;
mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
rid = DC_RID;
sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE);
if (sc->dc_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
sc->dc_btag = rman_get_bustag(sc->dc_res);
sc->dc_bhandle = rman_get_bushandle(sc->dc_res);
/* Allocate interrupt. */
rid = 0;
sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->dc_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
/* Need this info to decide on a chip type. */
sc->dc_info = dc_devtype(dev);
revision = pci_get_revid(dev);
error = 0;
/* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */
if (sc->dc_info->dc_devid !=
DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168) &&
sc->dc_info->dc_devid !=
DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201))
dc_eeprom_width(sc);
switch (sc->dc_info->dc_devid) {
case DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143):
sc->dc_type = DC_TYPE_21143;
sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
sc->dc_flags |= DC_REDUCED_MII_POLL;
/* Save EEPROM contents so we can parse them later. */
error = dc_read_srom(sc, sc->dc_romwidth);
if (error != 0)
goto fail;
break;
case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009):
case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100):
case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102):
sc->dc_type = DC_TYPE_DM9102;
sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS;
sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD;
sc->dc_flags |= DC_TX_ALIGN;
sc->dc_pmode = DC_PMODE_MII;
/* Increase the latency timer value. */
pci_write_config(dev, PCIR_LATTIMER, 0x80, 1);
break;
case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981):
sc->dc_type = DC_TYPE_AL981;
sc->dc_flags |= DC_TX_USE_TX_INTR;
sc->dc_flags |= DC_TX_ADMTEK_WAR;
sc->dc_pmode = DC_PMODE_MII;
error = dc_read_srom(sc, sc->dc_romwidth);
if (error != 0)
goto fail;
break;
case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983):
case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985):
case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511):
case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513):
case DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD):
case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500):
case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX):
case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242):
case DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX):
case DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T):
case DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB):
case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120):
case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130):
case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08):
case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09):
sc->dc_type = DC_TYPE_AN983;
sc->dc_flags |= DC_64BIT_HASH;
sc->dc_flags |= DC_TX_USE_TX_INTR;
sc->dc_flags |= DC_TX_ADMTEK_WAR;
sc->dc_pmode = DC_PMODE_MII;
/* Don't read SROM for - auto-loaded on reset */
break;
case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713):
case DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP):
if (revision < DC_REVISION_98713A) {
sc->dc_type = DC_TYPE_98713;
}
if (revision >= DC_REVISION_98713A) {
sc->dc_type = DC_TYPE_98713A;
sc->dc_flags |= DC_21143_NWAY;
}
sc->dc_flags |= DC_REDUCED_MII_POLL;
sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
break;
case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5):
case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217):
/*
* Macronix MX98715AEC-C/D/E parts have only a
* 128-bit hash table. We need to deal with these
* in the same manner as the PNIC II so that we
* get the right number of bits out of the
* CRC routine.
*/
if (revision >= DC_REVISION_98715AEC_C &&
revision < DC_REVISION_98725)
sc->dc_flags |= DC_128BIT_HASH;
sc->dc_type = DC_TYPE_987x5;
sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
break;
case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727):
sc->dc_type = DC_TYPE_987x5;
sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
break;
case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115):
sc->dc_type = DC_TYPE_PNICII;
sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH;
sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
break;
case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168):
sc->dc_type = DC_TYPE_PNIC;
sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS;
sc->dc_flags |= DC_PNIC_RX_BUG_WAR;
sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT);
if (sc->dc_pnic_rx_buf == NULL) {
device_printf(sc->dc_dev,
"Could not allocate PNIC RX buffer\n");
error = ENOMEM;
goto fail;
}
if (revision < DC_REVISION_82C169)
sc->dc_pmode = DC_PMODE_SYM;
break;
case DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A):
sc->dc_type = DC_TYPE_ASIX;
sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG;
sc->dc_flags |= DC_REDUCED_MII_POLL;
sc->dc_pmode = DC_PMODE_MII;
break;
case DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201):
sc->dc_type = DC_TYPE_XIRCOM;
sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE |
DC_TX_ALIGN;
/*
* We don't actually need to coalesce, but we're doing
* it to obtain a double word aligned buffer.
* The DC_TX_COALESCE flag is required.
*/
sc->dc_pmode = DC_PMODE_MII;
break;
case DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112):
sc->dc_type = DC_TYPE_CONEXANT;
sc->dc_flags |= DC_TX_INTR_ALWAYS;
sc->dc_flags |= DC_REDUCED_MII_POLL;
sc->dc_pmode = DC_PMODE_MII;
error = dc_read_srom(sc, sc->dc_romwidth);
if (error != 0)
goto fail;
break;
case DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261):
case DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5263):
if (sc->dc_info->dc_devid ==
DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261))
sc->dc_type = DC_TYPE_ULI_M5261;
else
sc->dc_type = DC_TYPE_ULI_M5263;
/* TX buffers should be aligned on 4 byte boundary. */
sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE |
DC_TX_ALIGN;
sc->dc_pmode = DC_PMODE_MII;
error = dc_read_srom(sc, sc->dc_romwidth);
if (error != 0)
goto fail;
break;
default:
device_printf(dev, "unknown device: %x\n",
sc->dc_info->dc_devid);
break;
}
/* Save the cache line size. */
if (DC_IS_DAVICOM(sc))
sc->dc_cachesize = 0;
else
sc->dc_cachesize = pci_get_cachelnsz(dev);
/* Reset the adapter. */
dc_reset(sc);
/* Take 21143 out of snooze mode */
if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) {
command = pci_read_config(dev, DC_PCI_CFDD, 4);
command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE);
pci_write_config(dev, DC_PCI_CFDD, command, 4);
}
/*
* Try to learn something about the supported media.
* We know that ASIX and ADMtek and Davicom devices
* will *always* be using MII media, so that's a no-brainer.
* The tricky ones are the Macronix/PNIC II and the
* Intel 21143.
*/
if (DC_IS_INTEL(sc)) {
error = dc_parse_21143_srom(sc);
if (error != 0)
goto fail;
} else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
if (sc->dc_type == DC_TYPE_98713)
sc->dc_pmode = DC_PMODE_MII;
else
sc->dc_pmode = DC_PMODE_SYM;
} else if (!sc->dc_pmode)
sc->dc_pmode = DC_PMODE_MII;
/*
* Get station address from the EEPROM.
*/
switch(sc->dc_type) {
case DC_TYPE_98713:
case DC_TYPE_98713A:
case DC_TYPE_987x5:
case DC_TYPE_PNICII:
dc_read_eeprom(sc, (caddr_t)&mac_offset,
(DC_EE_NODEADDR_OFFSET / 2), 1, 0);
dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0);
break;
case DC_TYPE_PNIC:
dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1);
break;
case DC_TYPE_DM9102:
dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
break;
case DC_TYPE_21143:
case DC_TYPE_ASIX:
dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
break;
case DC_TYPE_AL981:
case DC_TYPE_AN983:
reg = CSR_READ_4(sc, DC_AL_PAR0);
mac = (uint8_t *)&eaddr[0];
mac[0] = (reg >> 0) & 0xff;
mac[1] = (reg >> 8) & 0xff;
mac[2] = (reg >> 16) & 0xff;
mac[3] = (reg >> 24) & 0xff;
reg = CSR_READ_4(sc, DC_AL_PAR1);
mac[4] = (reg >> 0) & 0xff;
mac[5] = (reg >> 8) & 0xff;
break;
case DC_TYPE_CONEXANT:
bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr,
ETHER_ADDR_LEN);
break;
case DC_TYPE_XIRCOM:
/* The MAC comes from the CIS. */
mac = pci_get_ether(dev);
if (!mac) {
device_printf(dev, "No station address in CIS!\n");
error = ENXIO;
goto fail;
}
bcopy(mac, eaddr, ETHER_ADDR_LEN);
break;
case DC_TYPE_ULI_M5261:
case DC_TYPE_ULI_M5263:
srom = (uint16_t *)sc->dc_srom;
if (srom == NULL || *srom == 0xFFFF || *srom == 0) {
/*
* No valid SROM present, read station address
* from ID Table.
*/
device_printf(dev,
"Reading station address from ID Table.\n");
CSR_WRITE_4(sc, DC_BUSCTL, 0x10000);
CSR_WRITE_4(sc, DC_SIARESET, 0x01C0);
CSR_WRITE_4(sc, DC_10BTCTRL, 0x0000);
CSR_WRITE_4(sc, DC_10BTCTRL, 0x0010);
CSR_WRITE_4(sc, DC_10BTCTRL, 0x0000);
CSR_WRITE_4(sc, DC_SIARESET, 0x0000);
CSR_WRITE_4(sc, DC_SIARESET, 0x01B0);
mac = (uint8_t *)eaddr;
for (n = 0; n < ETHER_ADDR_LEN; n++)
mac[n] = (uint8_t)CSR_READ_4(sc, DC_10BTCTRL);
CSR_WRITE_4(sc, DC_SIARESET, 0x0000);
CSR_WRITE_4(sc, DC_BUSCTL, 0x0000);
DELAY(10);
} else
dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3,
0);
break;
default:
dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
break;
}
bcopy(eaddr, sc->dc_eaddr, sizeof(eaddr));
/*
* If we still have invalid station address, see whether we can
* find station address for chip 0. Some multi-port controllers
* just store station address for chip 0 if they have a shared
* SROM.
*/
if ((sc->dc_eaddr[0] == 0 && (sc->dc_eaddr[1] & ~0xffff) == 0) ||
(sc->dc_eaddr[0] == 0xffffffff &&
(sc->dc_eaddr[1] & 0xffff) == 0xffff)) {
error = dc_check_multiport(sc);
if (error == 0) {
bcopy(sc->dc_eaddr, eaddr, sizeof(eaddr));
/* Extract media information. */
if (DC_IS_INTEL(sc) && sc->dc_srom != NULL) {
while (sc->dc_mi != NULL) {
m = sc->dc_mi->dc_next;
free(sc->dc_mi, M_DEVBUF);
sc->dc_mi = m;
}
error = dc_parse_21143_srom(sc);
if (error != 0)
goto fail;
}
} else if (error == ENOMEM)
goto fail;
else
error = 0;
}
if ((error = dc_dma_alloc(sc)) != 0)
goto fail;
ifp = sc->dc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, dc_ioctl);
if_setstartfn(ifp, dc_start);
if_setinitfn(ifp, dc_init);
if_setsendqlen(ifp, DC_TX_LIST_CNT - 1);
if_setsendqready(ifp);
/*
* Do MII setup. If this is a 21143, check for a PHY on the
* MII bus after applying any necessary fixups to twiddle the
* GPIO bits. If we don't end up finding a PHY, restore the
* old selection (SIA only or SIA/SYM) and attach the dcphy
* driver instead.
*/
tmp = 0;
if (DC_IS_INTEL(sc)) {
dc_apply_fixup(sc, IFM_AUTO);
tmp = sc->dc_pmode;
sc->dc_pmode = DC_PMODE_MII;
}
/*
* Setup General Purpose port mode and data so the tulip can talk
* to the MII. This needs to be done before mii_attach so that
* we can actually see them.
*/
if (DC_IS_XIRCOM(sc)) {
CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
DELAY(10);
CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
DELAY(10);
}
phy = MII_PHY_ANY;
/*
* Note: both the AL981 and AN983 have internal PHYs, however the
* AL981 provides direct access to the PHY registers while the AN983
* uses a serial MII interface. The AN983's MII interface is also
* buggy in that you can read from any MII address (0 to 31), but
* only address 1 behaves normally. To deal with both cases, we
* pretend that the PHY is at MII address 1.
*/
if (DC_IS_ADMTEK(sc))
phy = DC_ADMTEK_PHYADDR;
/*
* Note: the ukphy probes of the RS7112 report a PHY at MII address
* 0 (possibly HomePNA?) and 1 (ethernet) so we only respond to the
* correct one.
*/
if (DC_IS_CONEXANT(sc))
phy = DC_CONEXANT_PHYADDR;
error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd,
dc_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (error && DC_IS_INTEL(sc)) {
sc->dc_pmode = tmp;
if (sc->dc_pmode != DC_PMODE_SIA)
sc->dc_pmode = DC_PMODE_SYM;
sc->dc_flags |= DC_21143_NWAY;
/*
* For non-MII cards, we need to have the 21143
* drive the LEDs. Except there are some systems
* like the NEC VersaPro NoteBook PC which have no
* LEDs, and twiddling these bits has adverse effects
* on them. (I.e. you suddenly can't get a link.)
*/
if (!(pci_get_subvendor(dev) == 0x1033 &&
pci_get_subdevice(dev) == 0x8028))
sc->dc_flags |= DC_TULIP_LEDS;
error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd,
dc_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY,
MII_OFFSET_ANY, 0);
}
if (error) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
if (DC_IS_ADMTEK(sc)) {
/*
* Set automatic TX underrun recovery for the ADMtek chips
*/
DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
}
/*
* Tell the upper layer(s) we support long frames.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0);
callout_init_mtx(&sc->dc_wdog_ch, &sc->dc_mtx, 0);
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, (caddr_t)eaddr);
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, dc_intr, sc, &sc->dc_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
dc_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
dc_detach(device_t dev)
{
struct dc_softc *sc;
if_t ifp;
struct dc_mediainfo *m;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized"));
ifp = sc->dc_ifp;
#ifdef DEVICE_POLLING
if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
DC_LOCK(sc);
dc_stop(sc);
DC_UNLOCK(sc);
callout_drain(&sc->dc_stat_ch);
callout_drain(&sc->dc_wdog_ch);
ether_ifdetach(ifp);
}
if (sc->dc_miibus)
device_delete_child(dev, sc->dc_miibus);
bus_generic_detach(dev);
if (sc->dc_intrhand)
bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand);
if (sc->dc_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq);
if (sc->dc_res)
bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res);
if (ifp != NULL)
if_free(ifp);
dc_dma_free(sc);
free(sc->dc_pnic_rx_buf, M_DEVBUF);
while (sc->dc_mi != NULL) {
m = sc->dc_mi->dc_next;
free(sc->dc_mi, M_DEVBUF);
sc->dc_mi = m;
}
free(sc->dc_srom, M_DEVBUF);
mtx_destroy(&sc->dc_mtx);
return (0);
}
/*
* Initialize the transmit descriptors.
*/
static int
dc_list_tx_init(struct dc_softc *sc)
{
struct dc_chain_data *cd;
struct dc_list_data *ld;
int i, nexti;
cd = &sc->dc_cdata;
ld = &sc->dc_ldata;
for (i = 0; i < DC_TX_LIST_CNT; i++) {
if (i == DC_TX_LIST_CNT - 1)
nexti = 0;
else
nexti = i + 1;
ld->dc_tx_list[i].dc_status = 0;
ld->dc_tx_list[i].dc_ctl = 0;
ld->dc_tx_list[i].dc_data = 0;
ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti));
cd->dc_tx_chain[i] = NULL;
}
cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
cd->dc_tx_pkts = 0;
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
return (0);
}
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that
* we arrange the descriptors in a closed ring, so that the last descriptor
* points back to the first.
*/
static int
dc_list_rx_init(struct dc_softc *sc)
{
struct dc_chain_data *cd;
struct dc_list_data *ld;
int i, nexti;
cd = &sc->dc_cdata;
ld = &sc->dc_ldata;
for (i = 0; i < DC_RX_LIST_CNT; i++) {
if (dc_newbuf(sc, i) != 0)
return (ENOBUFS);
if (i == DC_RX_LIST_CNT - 1)
nexti = 0;
else
nexti = i + 1;
ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti));
}
cd->dc_rx_prod = 0;
bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
return (0);
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
*/
static int
dc_newbuf(struct dc_softc *sc, int i)
{
struct mbuf *m;
bus_dmamap_t map;
bus_dma_segment_t segs[1];
int error, nseg;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, sizeof(u_int64_t));
/*
* If this is a PNIC chip, zero the buffer. This is part
* of the workaround for the receive bug in the 82c168 and
* 82c169 chips.
*/
if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
bzero(mtod(m, char *), m->m_len);
error = bus_dmamap_load_mbuf_sg(sc->dc_rx_mtag, sc->dc_sparemap,
m, segs, &nseg, 0);
if (error) {
m_freem(m);
return (error);
}
KASSERT(nseg == 1, ("%s: wrong number of segments (%d)", __func__,
nseg));
if (sc->dc_cdata.dc_rx_chain[i] != NULL)
bus_dmamap_unload(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i]);
map = sc->dc_cdata.dc_rx_map[i];
sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap;
sc->dc_sparemap = map;
sc->dc_cdata.dc_rx_chain[i] = m;
bus_dmamap_sync(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i],
BUS_DMASYNC_PREREAD);
sc->dc_ldata.dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
sc->dc_ldata.dc_rx_list[i].dc_data =
htole32(DC_ADDR_LO(segs[0].ds_addr));
sc->dc_ldata.dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN);
bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
return (0);
}
/*
* Grrrrr.
* The PNIC chip has a terrible bug in it that manifests itself during
* periods of heavy activity. The exact mode of failure if difficult to
* pinpoint: sometimes it only happens in promiscuous mode, sometimes it
* will happen on slow machines. The bug is that sometimes instead of
* uploading one complete frame during reception, it uploads what looks
* like the entire contents of its FIFO memory. The frame we want is at
* the end of the whole mess, but we never know exactly how much data has
* been uploaded, so salvaging the frame is hard.
*
* There is only one way to do it reliably, and it's disgusting.
* Here's what we know:
*
* - We know there will always be somewhere between one and three extra
* descriptors uploaded.
*
* - We know the desired received frame will always be at the end of the
* total data upload.
*
* - We know the size of the desired received frame because it will be
* provided in the length field of the status word in the last descriptor.
*
* Here's what we do:
*
* - When we allocate buffers for the receive ring, we bzero() them.
* This means that we know that the buffer contents should be all
* zeros, except for data uploaded by the chip.
*
* - We also force the PNIC chip to upload frames that include the
* ethernet CRC at the end.
*
* - We gather all of the bogus frame data into a single buffer.
*
* - We then position a pointer at the end of this buffer and scan
* backwards until we encounter the first non-zero byte of data.
* This is the end of the received frame. We know we will encounter
* some data at the end of the frame because the CRC will always be
* there, so even if the sender transmits a packet of all zeros,
* we won't be fooled.
*
* - We know the size of the actual received frame, so we subtract
* that value from the current pointer location. This brings us
* to the start of the actual received packet.
*
* - We copy this into an mbuf and pass it on, along with the actual
* frame length.
*
* The performance hit is tremendous, but it beats dropping frames all
* the time.
*/
#define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG)
static void
dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
{
struct dc_desc *cur_rx;
struct dc_desc *c = NULL;
struct mbuf *m = NULL;
unsigned char *ptr;
int i, total_len;
uint32_t rxstat = 0;
i = sc->dc_pnic_rx_bug_save;
cur_rx = &sc->dc_ldata.dc_rx_list[idx];
ptr = sc->dc_pnic_rx_buf;
bzero(ptr, DC_RXLEN * 5);
/* Copy all the bytes from the bogus buffers. */
while (1) {
c = &sc->dc_ldata.dc_rx_list[i];
rxstat = le32toh(c->dc_status);
m = sc->dc_cdata.dc_rx_chain[i];
bcopy(mtod(m, char *), ptr, DC_RXLEN);
ptr += DC_RXLEN;
/* If this is the last buffer, break out. */
if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
break;
dc_discard_rxbuf(sc, i);
DC_INC(i, DC_RX_LIST_CNT);
}
/* Find the length of the actual receive frame. */
total_len = DC_RXBYTES(rxstat);
/* Scan backwards until we hit a non-zero byte. */
while (*ptr == 0x00)
ptr--;
/* Round off. */
if ((uintptr_t)(ptr) & 0x3)
ptr -= 1;
/* Now find the start of the frame. */
ptr -= total_len;
if (ptr < sc->dc_pnic_rx_buf)
ptr = sc->dc_pnic_rx_buf;
/*
* Now copy the salvaged frame to the last mbuf and fake up
* the status word to make it look like a successful
* frame reception.
*/
bcopy(ptr, mtod(m, char *), total_len);
cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
}
/*
* This routine searches the RX ring for dirty descriptors in the
* event that the rxeof routine falls out of sync with the chip's
* current descriptor pointer. This may happen sometimes as a result
* of a "no RX buffer available" condition that happens when the chip
* consumes all of the RX buffers before the driver has a chance to
* process the RX ring. This routine may need to be called more than
* once to bring the driver back in sync with the chip, however we
* should still be getting RX DONE interrupts to drive the search
* for new packets in the RX ring, so we should catch up eventually.
*/
static int
dc_rx_resync(struct dc_softc *sc)
{
struct dc_desc *cur_rx;
int i, pos;
pos = sc->dc_cdata.dc_rx_prod;
for (i = 0; i < DC_RX_LIST_CNT; i++) {
cur_rx = &sc->dc_ldata.dc_rx_list[pos];
if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN))
break;
DC_INC(pos, DC_RX_LIST_CNT);
}
/* If the ring really is empty, then just return. */
if (i == DC_RX_LIST_CNT)
return (0);
/* We've fallen behing the chip: catch it. */
sc->dc_cdata.dc_rx_prod = pos;
return (EAGAIN);
}
static void
dc_discard_rxbuf(struct dc_softc *sc, int i)
{
struct mbuf *m;
if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
m = sc->dc_cdata.dc_rx_chain[i];
bzero(mtod(m, char *), m->m_len);
}
sc->dc_ldata.dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
sc->dc_ldata.dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN);
bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static int
dc_rxeof(struct dc_softc *sc)
{
struct mbuf *m;
if_t ifp;
struct dc_desc *cur_rx;
int i, total_len, rx_npkts;
uint32_t rxstat;
DC_LOCK_ASSERT(sc);
ifp = sc->dc_ifp;
rx_npkts = 0;
bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_POSTREAD |
BUS_DMASYNC_POSTWRITE);
for (i = sc->dc_cdata.dc_rx_prod;
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0;
DC_INC(i, DC_RX_LIST_CNT)) {
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
}
#endif
cur_rx = &sc->dc_ldata.dc_rx_list[i];
rxstat = le32toh(cur_rx->dc_status);
if ((rxstat & DC_RXSTAT_OWN) != 0)
break;
m = sc->dc_cdata.dc_rx_chain[i];
bus_dmamap_sync(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i],
BUS_DMASYNC_POSTREAD);
total_len = DC_RXBYTES(rxstat);
rx_npkts++;
if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
if (rxstat & DC_RXSTAT_FIRSTFRAG)
sc->dc_pnic_rx_bug_save = i;
if ((rxstat & DC_RXSTAT_LASTFRAG) == 0)
continue;
dc_pnic_rx_bug_war(sc, i);
rxstat = le32toh(cur_rx->dc_status);
total_len = DC_RXBYTES(rxstat);
}
}
/*
* If an error occurs, update stats, clear the
* status word and leave the mbuf cluster in place:
* it should simply get re-used next time this descriptor
* comes up in the ring. However, don't report long
* frames as errors since they could be vlans.
*/
if ((rxstat & DC_RXSTAT_RXERR)) {
if (!(rxstat & DC_RXSTAT_GIANT) ||
(rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
DC_RXSTAT_RUNT | DC_RXSTAT_DE))) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if (rxstat & DC_RXSTAT_COLLSEEN)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
dc_discard_rxbuf(sc, i);
if (rxstat & DC_RXSTAT_CRCERR)
continue;
else {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
dc_init_locked(sc);
return (rx_npkts);
}
}
}
/* No errors; receive the packet. */
total_len -= ETHER_CRC_LEN;
#ifdef __NO_STRICT_ALIGNMENT
/*
* On architectures without alignment problems we try to
* allocate a new buffer for the receive ring, and pass up
* the one where the packet is already, saving the expensive
* copy done in m_devget().
* If we are on an architecture with alignment problems, or
* if the allocation fails, then use m_devget and leave the
* existing buffer in the receive ring.
*/
if (dc_newbuf(sc, i) != 0) {
dc_discard_rxbuf(sc, i);
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = total_len;
#else
{
struct mbuf *m0;
m0 = m_devget(mtod(m, char *), total_len,
ETHER_ALIGN, ifp, NULL);
dc_discard_rxbuf(sc, i);
if (m0 == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
m = m0;
}
#endif
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
DC_UNLOCK(sc);
if_input(ifp, m);
DC_LOCK(sc);
}
sc->dc_cdata.dc_rx_prod = i;
return (rx_npkts);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
dc_txeof(struct dc_softc *sc)
{
struct dc_desc *cur_tx;
if_t ifp;
int idx, setup;
uint32_t ctl, txstat;
if (sc->dc_cdata.dc_tx_cnt == 0)
return;
ifp = sc->dc_ifp;
/*
* Go through our tx list and free mbufs for those
* frames that have been transmitted.
*/
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_POSTREAD |
BUS_DMASYNC_POSTWRITE);
setup = 0;
for (idx = sc->dc_cdata.dc_tx_cons; idx != sc->dc_cdata.dc_tx_prod;
DC_INC(idx, DC_TX_LIST_CNT), sc->dc_cdata.dc_tx_cnt--) {
cur_tx = &sc->dc_ldata.dc_tx_list[idx];
txstat = le32toh(cur_tx->dc_status);
ctl = le32toh(cur_tx->dc_ctl);
if (txstat & DC_TXSTAT_OWN)
break;
if (sc->dc_cdata.dc_tx_chain[idx] == NULL)
continue;
if (ctl & DC_TXCTL_SETUP) {
cur_tx->dc_ctl = htole32(ctl & ~DC_TXCTL_SETUP);
setup++;
bus_dmamap_sync(sc->dc_stag, sc->dc_smap,
BUS_DMASYNC_POSTWRITE);
/*
* Yes, the PNIC is so brain damaged
* that it will sometimes generate a TX
* underrun error while DMAing the RX
* filter setup frame. If we detect this,
* we have to send the setup frame again,
* or else the filter won't be programmed
* correctly.
*/
if (DC_IS_PNIC(sc)) {
if (txstat & DC_TXSTAT_ERRSUM)
dc_setfilt(sc);
}
sc->dc_cdata.dc_tx_chain[idx] = NULL;
continue;
}
if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
/*
* XXX: Why does my Xircom taunt me so?
* For some reason it likes setting the CARRLOST flag
* even when the carrier is there. wtf?!?
* Who knows, but Conexant chips have the
* same problem. Maybe they took lessons
* from Xircom.
*/
if (/*sc->dc_type == DC_TYPE_21143 &&*/
sc->dc_pmode == DC_PMODE_MII &&
((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
DC_TXSTAT_NOCARRIER)))
txstat &= ~DC_TXSTAT_ERRSUM;
} else {
if (/*sc->dc_type == DC_TYPE_21143 &&*/
sc->dc_pmode == DC_PMODE_MII &&
((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST)))
txstat &= ~DC_TXSTAT_ERRSUM;
}
if (txstat & DC_TXSTAT_ERRSUM) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if (txstat & DC_TXSTAT_EXCESSCOLL)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
if (txstat & DC_TXSTAT_LATECOLL)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
if (!(txstat & DC_TXSTAT_UNDERRUN)) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
dc_init_locked(sc);
return;
}
} else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & DC_TXSTAT_COLLCNT) >> 3);
bus_dmamap_sync(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx]);
m_freem(sc->dc_cdata.dc_tx_chain[idx]);
sc->dc_cdata.dc_tx_chain[idx] = NULL;
}
sc->dc_cdata.dc_tx_cons = idx;
if (sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_CNT - DC_TX_LIST_RSVD) {
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (sc->dc_cdata.dc_tx_cnt == 0)
sc->dc_wdog_timer = 0;
}
if (setup > 0)
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
dc_tick(void *xsc)
{
struct dc_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t r;
sc = xsc;
DC_LOCK_ASSERT(sc);
ifp = sc->dc_ifp;
mii = device_get_softc(sc->dc_miibus);
/*
* Reclaim transmitted frames for controllers that do
* not generate TX completion interrupt for every frame.
*/
if (sc->dc_flags & DC_TX_USE_TX_INTR)
dc_txeof(sc);
if (sc->dc_flags & DC_REDUCED_MII_POLL) {
if (sc->dc_flags & DC_21143_NWAY) {
r = CSR_READ_4(sc, DC_10BTSTAT);
if (IFM_SUBTYPE(mii->mii_media_active) ==
IFM_100_TX && (r & DC_TSTAT_LS100)) {
sc->dc_link = 0;
mii_mediachg(mii);
}
if (IFM_SUBTYPE(mii->mii_media_active) ==
IFM_10_T && (r & DC_TSTAT_LS10)) {
sc->dc_link = 0;
mii_mediachg(mii);
}
if (sc->dc_link == 0)
mii_tick(mii);
} else {
/*
* For NICs which never report DC_RXSTATE_WAIT, we
* have to bite the bullet...
*/
if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
sc->dc_cdata.dc_tx_cnt == 0)
mii_tick(mii);
}
} else
mii_tick(mii);
/*
* When the init routine completes, we expect to be able to send
* packets right away, and in fact the network code will send a
* gratuitous ARP the moment the init routine marks the interface
* as running. However, even though the MAC may have been initialized,
* there may be a delay of a few seconds before the PHY completes
* autonegotiation and the link is brought up. Any transmissions
* made during that delay will be lost. Dealing with this is tricky:
* we can't just pause in the init routine while waiting for the
* PHY to come ready since that would bring the whole system to
* a screeching halt for several seconds.
*
* What we do here is prevent the TX start routine from sending
* any packets until a link has been established. After the
* interface has been initialized, the tick routine will poll
* the state of the PHY until the IFM_ACTIVE flag is set. Until
* that time, packets will stay in the send queue, and once the
* link comes up, they will be flushed out to the wire.
*/
if (sc->dc_link != 0 && !if_sendq_empty(ifp))
dc_start_locked(ifp);
if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
else
callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
}
/*
* A transmit underrun has occurred. Back off the transmit threshold,
* or switch to store and forward mode if we have to.
*/
static void
dc_tx_underrun(struct dc_softc *sc)
{
uint32_t netcfg, isr;
int i, reinit;
reinit = 0;
netcfg = CSR_READ_4(sc, DC_NETCFG);
device_printf(sc->dc_dev, "TX underrun -- ");
if ((sc->dc_flags & DC_TX_STORENFWD) == 0) {
if (sc->dc_txthresh + DC_TXTHRESH_INC > DC_TXTHRESH_MAX) {
printf("using store and forward mode\n");
netcfg |= DC_NETCFG_STORENFWD;
} else {
printf("increasing TX threshold\n");
sc->dc_txthresh += DC_TXTHRESH_INC;
netcfg &= ~DC_NETCFG_TX_THRESH;
netcfg |= sc->dc_txthresh;
}
if (DC_IS_INTEL(sc)) {
/*
* The real 21143 requires that the transmitter be idle
* in order to change the transmit threshold or store
* and forward state.
*/
CSR_WRITE_4(sc, DC_NETCFG, netcfg & ~DC_NETCFG_TX_ON);
for (i = 0; i < DC_TIMEOUT; i++) {
isr = CSR_READ_4(sc, DC_ISR);
if (isr & DC_ISR_TX_IDLE)
break;
DELAY(10);
}
if (i == DC_TIMEOUT) {
device_printf(sc->dc_dev,
"%s: failed to force tx to idle state\n",
__func__);
reinit++;
}
}
} else {
printf("resetting\n");
reinit++;
}
if (reinit == 0) {
CSR_WRITE_4(sc, DC_NETCFG, netcfg);
if (DC_IS_INTEL(sc))
CSR_WRITE_4(sc, DC_NETCFG, netcfg | DC_NETCFG_TX_ON);
} else {
if_setdrvflagbits(sc->dc_ifp, 0, IFF_DRV_RUNNING);
dc_init_locked(sc);
}
}
#ifdef DEVICE_POLLING
static poll_handler_t dc_poll;
static int
dc_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct dc_softc *sc = if_getsoftc(ifp);
int rx_npkts = 0;
DC_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
DC_UNLOCK(sc);
return (rx_npkts);
}
sc->rxcycles = count;
rx_npkts = dc_rxeof(sc);
dc_txeof(sc);
if (!if_sendq_empty(ifp) &&
!(if_getdrvflags(ifp) & IFF_DRV_OACTIVE))
dc_start_locked(ifp);
if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
uint32_t status;
status = CSR_READ_4(sc, DC_ISR);
status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF |
DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN |
DC_ISR_BUS_ERR);
if (!status) {
DC_UNLOCK(sc);
return (rx_npkts);
}
/* ack what we have */
CSR_WRITE_4(sc, DC_ISR, status);
if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) {
uint32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
if_inc_counter(ifp, IFCOUNTER_IERRORS, (r & 0xffff) + ((r >> 17) & 0x7ff));
if (dc_rx_resync(sc))
dc_rxeof(sc);
}
/* restart transmit unit if necessary */
if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt)
CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
if (status & DC_ISR_TX_UNDERRUN)
dc_tx_underrun(sc);
if (status & DC_ISR_BUS_ERR) {
if_printf(ifp, "%s: bus error\n", __func__);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
dc_init_locked(sc);
}
}
DC_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
dc_intr(void *arg)
{
struct dc_softc *sc;
if_t ifp;
uint32_t r, status;
int n;
sc = arg;
if (sc->suspended)
return;
DC_LOCK(sc);
status = CSR_READ_4(sc, DC_ISR);
if (status == 0xFFFFFFFF || (status & DC_INTRS) == 0) {
DC_UNLOCK(sc);
return;
}
ifp = sc->dc_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
DC_UNLOCK(sc);
return;
}
#endif
/* Disable interrupts. */
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
for (n = 16; n > 0; n--) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
/* Ack interrupts. */
CSR_WRITE_4(sc, DC_ISR, status);
if (status & DC_ISR_RX_OK) {
if (dc_rxeof(sc) == 0) {
while (dc_rx_resync(sc))
dc_rxeof(sc);
}
}
if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF))
dc_txeof(sc);
if (status & DC_ISR_TX_IDLE) {
dc_txeof(sc);
if (sc->dc_cdata.dc_tx_cnt) {
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
}
}
if (status & DC_ISR_TX_UNDERRUN)
dc_tx_underrun(sc);
if ((status & DC_ISR_RX_WATDOGTIMEO)
|| (status & DC_ISR_RX_NOBUF)) {
r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
if_inc_counter(ifp, IFCOUNTER_IERRORS, (r & 0xffff) + ((r >> 17) & 0x7ff));
if (dc_rxeof(sc) == 0) {
while (dc_rx_resync(sc))
dc_rxeof(sc);
}
}
if (!if_sendq_empty(ifp))
dc_start_locked(ifp);
if (status & DC_ISR_BUS_ERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
dc_init_locked(sc);
DC_UNLOCK(sc);
return;
}
status = CSR_READ_4(sc, DC_ISR);
if (status == 0xFFFFFFFF || (status & DC_INTRS) == 0)
break;
}
/* Re-enable interrupts. */
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
DC_UNLOCK(sc);
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
dc_encap(struct dc_softc *sc, struct mbuf **m_head)
{
bus_dma_segment_t segs[DC_MAXFRAGS];
bus_dmamap_t map;
struct dc_desc *f;
struct mbuf *m;
int cur, defragged, error, first, frag, i, idx, nseg;
m = NULL;
defragged = 0;
if (sc->dc_flags & DC_TX_COALESCE &&
((*m_head)->m_next != NULL || sc->dc_flags & DC_TX_ALIGN)) {
m = m_defrag(*m_head, M_NOWAIT);
defragged = 1;
} else {
/*
* Count the number of frags in this chain to see if we
* need to m_collapse. Since the descriptor list is shared
* by all packets, we'll m_collapse long chains so that they
* do not use up the entire list, even if they would fit.
*/
i = 0;
for (m = *m_head; m != NULL; m = m->m_next)
i++;
if (i > DC_TX_LIST_CNT / 4 ||
DC_TX_LIST_CNT - i + sc->dc_cdata.dc_tx_cnt <=
DC_TX_LIST_RSVD) {
m = m_collapse(*m_head, M_NOWAIT, DC_MAXFRAGS);
defragged = 1;
}
}
if (defragged != 0) {
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
idx = sc->dc_cdata.dc_tx_prod;
error = bus_dmamap_load_mbuf_sg(sc->dc_tx_mtag,
sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0);
if (error == EFBIG) {
if (defragged != 0 || (m = m_collapse(*m_head, M_NOWAIT,
DC_MAXFRAGS)) == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (defragged != 0 ? error : ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->dc_tx_mtag,
sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
KASSERT(nseg <= DC_MAXFRAGS,
("%s: wrong number of segments (%d)", __func__, nseg));
if (nseg == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check descriptor overruns. */
if (sc->dc_cdata.dc_tx_cnt + nseg > DC_TX_LIST_CNT - DC_TX_LIST_RSVD) {
bus_dmamap_unload(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx]);
return (ENOBUFS);
}
bus_dmamap_sync(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx],
BUS_DMASYNC_PREWRITE);
first = cur = frag = sc->dc_cdata.dc_tx_prod;
for (i = 0; i < nseg; i++) {
if ((sc->dc_flags & DC_TX_ADMTEK_WAR) &&
(frag == (DC_TX_LIST_CNT - 1)) &&
(first != sc->dc_cdata.dc_tx_first)) {
bus_dmamap_unload(sc->dc_tx_mtag,
sc->dc_cdata.dc_tx_map[first]);
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
f = &sc->dc_ldata.dc_tx_list[frag];
f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len);
if (i == 0) {
f->dc_status = 0;
f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
} else
f->dc_status = htole32(DC_TXSTAT_OWN);
f->dc_data = htole32(DC_ADDR_LO(segs[i].ds_addr));
cur = frag;
DC_INC(frag, DC_TX_LIST_CNT);
}
sc->dc_cdata.dc_tx_prod = frag;
sc->dc_cdata.dc_tx_cnt += nseg;
sc->dc_cdata.dc_tx_chain[cur] = *m_head;
sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
sc->dc_ldata.dc_tx_list[first].dc_ctl |=
htole32(DC_TXCTL_FINT);
if (sc->dc_flags & DC_TX_INTR_ALWAYS)
sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
if (sc->dc_flags & DC_TX_USE_TX_INTR &&
++sc->dc_cdata.dc_tx_pkts >= 8) {
sc->dc_cdata.dc_tx_pkts = 0;
sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
}
sc->dc_ldata.dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN);
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Swap the last and the first dmamaps to ensure the map for
* this transmission is placed at the last descriptor.
*/
map = sc->dc_cdata.dc_tx_map[cur];
sc->dc_cdata.dc_tx_map[cur] = sc->dc_cdata.dc_tx_map[first];
sc->dc_cdata.dc_tx_map[first] = map;
return (0);
}
static void
dc_start(if_t ifp)
{
struct dc_softc *sc;
sc = if_getsoftc(ifp);
DC_LOCK(sc);
dc_start_locked(ifp);
DC_UNLOCK(sc);
}
/*
* Main transmit routine
* To avoid having to do mbuf copies, we put pointers to the mbuf data
* regions directly in the transmit lists. We also save a copy of the
* pointers since the transmit list fragment pointers are physical
* addresses.
*/
static void
dc_start_locked(if_t ifp)
{
struct dc_softc *sc;
struct mbuf *m_head;
int queued;
sc = if_getsoftc(ifp);
DC_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || sc->dc_link == 0)
return;
sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod;
for (queued = 0; !if_sendq_empty(ifp); ) {
/*
* If there's no way we can send any packets, return now.
*/
if (sc->dc_cdata.dc_tx_cnt > DC_TX_LIST_CNT - DC_TX_LIST_RSVD) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
if (dc_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
queued++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, m_head);
}
if (queued > 0) {
/* Transmit */
if (!(sc->dc_flags & DC_TX_POLL))
CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->dc_wdog_timer = 5;
}
}
static void
dc_init(void *xsc)
{
struct dc_softc *sc = xsc;
DC_LOCK(sc);
dc_init_locked(sc);
DC_UNLOCK(sc);
}
static void
dc_init_locked(struct dc_softc *sc)
{
if_t ifp = sc->dc_ifp;
struct mii_data *mii;
struct ifmedia *ifm;
DC_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
mii = device_get_softc(sc->dc_miibus);
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
dc_stop(sc);
dc_reset(sc);
if (DC_IS_INTEL(sc)) {
ifm = &mii->mii_media;
dc_apply_fixup(sc, ifm->ifm_media);
}
/*
* Set cache alignment and burst length.
*/
if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc) || DC_IS_ULI(sc))
CSR_WRITE_4(sc, DC_BUSCTL, 0);
else
CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE);
/*
* Evenly share the bus between receive and transmit process.
*/
if (DC_IS_INTEL(sc))
DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
} else {
DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
}
if (sc->dc_flags & DC_TX_POLL)
DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
switch(sc->dc_cachesize) {
case 32:
DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
break;
case 16:
DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
break;
case 8:
DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
break;
case 0:
default:
DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
break;
}
if (sc->dc_flags & DC_TX_STORENFWD)
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
else {
if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
} else {
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
}
}
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
/*
* The app notes for the 98713 and 98715A say that
* in order to have the chips operate properly, a magic
* number must be written to CSR16. Macronix does not
* document the meaning of these bits so there's no way
* to know exactly what they do. The 98713 has a magic
* number all its own; the rest all use a different one.
*/
DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
if (sc->dc_type == DC_TYPE_98713)
DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
else
DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
}
if (DC_IS_XIRCOM(sc)) {
/*
* setup General Purpose Port mode and data so the tulip
* can talk to the MII.
*/
CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
DELAY(10);
CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
DELAY(10);
}
DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
/* Init circular RX list. */
if (dc_list_rx_init(sc) == ENOBUFS) {
device_printf(sc->dc_dev,
"initialization failed: no memory for rx buffers\n");
dc_stop(sc);
return;
}
/*
* Init TX descriptors.
*/
dc_list_tx_init(sc);
/*
* Load the address of the RX list.
*/
CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0));
CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0));
/*
* Enable interrupts.
*/
#ifdef DEVICE_POLLING
/*
* ... but only if we are not polling, and make sure they are off in
* the case of polling. Some cards (e.g. fxp) turn interrupts on
* after a reset.
*/
if (if_getcapenable(ifp) & IFCAP_POLLING)
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
else
#endif
CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
/* Initialize TX jabber and RX watchdog timer. */
if (DC_IS_ULI(sc))
CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_JABBERCLK |
DC_WDOG_HOSTUNJAB);
/* Enable transmitter. */
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
/*
* If this is an Intel 21143 and we're not using the
* MII port, program the LED control pins so we get
* link and activity indications.
*/
if (sc->dc_flags & DC_TULIP_LEDS) {
CSR_WRITE_4(sc, DC_WATCHDOG,
DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY);
CSR_WRITE_4(sc, DC_WATCHDOG, 0);
}
/*
* Load the RX/multicast filter. We do this sort of late
* because the filter programming scheme on the 21143 and
* some clones requires DMAing a setup frame via the TX
* engine, and we need the transmitter enabled for that.
*/
dc_setfilt(sc);
/* Enable receiver. */
DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
dc_ifmedia_upd_locked(sc);
/* Clear missed frames and overflow counter. */
CSR_READ_4(sc, DC_FRAMESDISCARDED);
/* Don't start the ticker if this is a homePNA link. */
if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
sc->dc_link = 1;
else {
if (sc->dc_flags & DC_21143_NWAY)
callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
else
callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
}
sc->dc_wdog_timer = 0;
callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
}
/*
* Set media options.
*/
static int
dc_ifmedia_upd(if_t ifp)
{
struct dc_softc *sc;
int error;
sc = if_getsoftc(ifp);
DC_LOCK(sc);
error = dc_ifmedia_upd_locked(sc);
DC_UNLOCK(sc);
return (error);
}
static int
dc_ifmedia_upd_locked(struct dc_softc *sc)
{
struct mii_data *mii;
struct ifmedia *ifm;
int error;
DC_LOCK_ASSERT(sc);
sc->dc_link = 0;
mii = device_get_softc(sc->dc_miibus);
error = mii_mediachg(mii);
if (error == 0) {
ifm = &mii->mii_media;
if (DC_IS_INTEL(sc))
dc_setcfg(sc, ifm->ifm_media);
else if (DC_IS_DAVICOM(sc) &&
IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
dc_setcfg(sc, ifm->ifm_media);
}
return (error);
}
/*
* Report current media status.
*/
static void
dc_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct dc_softc *sc;
struct mii_data *mii;
struct ifmedia *ifm;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->dc_miibus);
DC_LOCK(sc);
mii_pollstat(mii);
ifm = &mii->mii_media;
if (DC_IS_DAVICOM(sc)) {
if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
ifmr->ifm_active = ifm->ifm_media;
ifmr->ifm_status = 0;
DC_UNLOCK(sc);
return;
}
}
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
DC_UNLOCK(sc);
}
static int
dc_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct dc_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
struct mii_data *mii;
int error = 0;
switch (command) {
case SIOCSIFFLAGS:
DC_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
int need_setfilt = (if_getflags(ifp) ^ sc->dc_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (need_setfilt)
dc_setfilt(sc);
} else {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
dc_init_locked(sc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
dc_stop(sc);
}
sc->dc_if_flags = if_getflags(ifp);
DC_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
DC_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
dc_setfilt(sc);
DC_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->dc_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
#ifdef DEVICE_POLLING
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(if_getcapenable(ifp) & IFCAP_POLLING)) {
error = ether_poll_register(dc_poll, ifp);
if (error)
return(error);
DC_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
DC_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
if_getcapenable(ifp) & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
DC_LOCK(sc);
CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
DC_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
dc_watchdog(void *xsc)
{
struct dc_softc *sc = xsc;
if_t ifp;
DC_LOCK_ASSERT(sc);
if (sc->dc_wdog_timer == 0 || --sc->dc_wdog_timer != 0) {
callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
return;
}
ifp = sc->dc_ifp;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
device_printf(sc->dc_dev, "watchdog timeout\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
dc_init_locked(sc);
if (!if_sendq_empty(ifp))
dc_start_locked(ifp);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
dc_stop(struct dc_softc *sc)
{
if_t ifp;
struct dc_list_data *ld;
struct dc_chain_data *cd;
int i;
uint32_t ctl, netcfg;
DC_LOCK_ASSERT(sc);
ifp = sc->dc_ifp;
ld = &sc->dc_ldata;
cd = &sc->dc_cdata;
callout_stop(&sc->dc_stat_ch);
callout_stop(&sc->dc_wdog_ch);
sc->dc_wdog_timer = 0;
sc->dc_link = 0;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
netcfg = CSR_READ_4(sc, DC_NETCFG);
if (netcfg & (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON))
CSR_WRITE_4(sc, DC_NETCFG,
netcfg & ~(DC_NETCFG_RX_ON | DC_NETCFG_TX_ON));
CSR_WRITE_4(sc, DC_IMR, 0x00000000);
/* Wait the completion of TX/RX SM. */
if (netcfg & (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON))
dc_netcfg_wait(sc);
CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
/*
* Free data in the RX lists.
*/
for (i = 0; i < DC_RX_LIST_CNT; i++) {
if (cd->dc_rx_chain[i] != NULL) {
bus_dmamap_sync(sc->dc_rx_mtag,
cd->dc_rx_map[i], BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->dc_rx_mtag,
cd->dc_rx_map[i]);
m_freem(cd->dc_rx_chain[i]);
cd->dc_rx_chain[i] = NULL;
}
}
bzero(ld->dc_rx_list, DC_RX_LIST_SZ);
bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Free the TX list buffers.
*/
for (i = 0; i < DC_TX_LIST_CNT; i++) {
if (cd->dc_tx_chain[i] != NULL) {
ctl = le32toh(ld->dc_tx_list[i].dc_ctl);
if (ctl & DC_TXCTL_SETUP) {
bus_dmamap_sync(sc->dc_stag, sc->dc_smap,
BUS_DMASYNC_POSTWRITE);
} else {
bus_dmamap_sync(sc->dc_tx_mtag,
cd->dc_tx_map[i], BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->dc_tx_mtag,
cd->dc_tx_map[i]);
m_freem(cd->dc_tx_chain[i]);
}
cd->dc_tx_chain[i] = NULL;
}
}
bzero(ld->dc_tx_list, DC_TX_LIST_SZ);
bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
/*
* Device suspend routine. Stop the interface and save some PCI
* settings in case the BIOS doesn't restore them properly on
* resume.
*/
static int
dc_suspend(device_t dev)
{
struct dc_softc *sc;
sc = device_get_softc(dev);
DC_LOCK(sc);
dc_stop(sc);
sc->suspended = 1;
DC_UNLOCK(sc);
return (0);
}
/*
* Device resume routine. Restore some PCI settings in case the BIOS
* doesn't, re-enable busmastering, and restart the interface if
* appropriate.
*/
static int
dc_resume(device_t dev)
{
struct dc_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->dc_ifp;
/* reinitialize interface if necessary */
DC_LOCK(sc);
if (if_getflags(ifp) & IFF_UP)
dc_init_locked(sc);
sc->suspended = 0;
DC_UNLOCK(sc);
return (0);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
dc_shutdown(device_t dev)
{
struct dc_softc *sc;
sc = device_get_softc(dev);
DC_LOCK(sc);
dc_stop(sc);
DC_UNLOCK(sc);
return (0);
}
static int
dc_check_multiport(struct dc_softc *sc)
{
struct dc_softc *dsc;
devclass_t dc;
device_t child;
uint8_t *eaddr;
int unit;
dc = devclass_find("dc");
for (unit = 0; unit < devclass_get_maxunit(dc); unit++) {
child = devclass_get_device(dc, unit);
if (child == NULL)
continue;
if (child == sc->dc_dev)
continue;
if (device_get_parent(child) != device_get_parent(sc->dc_dev))
continue;
if (unit > device_get_unit(sc->dc_dev))
continue;
if (device_is_attached(child) == 0)
continue;
dsc = device_get_softc(child);
device_printf(sc->dc_dev,
"Using station address of %s as base\n",
device_get_nameunit(child));
bcopy(dsc->dc_eaddr, sc->dc_eaddr, ETHER_ADDR_LEN);
eaddr = (uint8_t *)sc->dc_eaddr;
eaddr[5]++;
/* Prepare SROM to parse again. */
if (DC_IS_INTEL(sc) && dsc->dc_srom != NULL &&
sc->dc_romwidth != 0) {
free(sc->dc_srom, M_DEVBUF);
sc->dc_romwidth = dsc->dc_romwidth;
sc->dc_srom = malloc(DC_ROM_SIZE(sc->dc_romwidth),
M_DEVBUF, M_NOWAIT);
if (sc->dc_srom == NULL) {
device_printf(sc->dc_dev,
"Could not allocate SROM buffer\n");
return (ENOMEM);
}
bcopy(dsc->dc_srom, sc->dc_srom,
DC_ROM_SIZE(sc->dc_romwidth));
}
return (0);
}
return (ENOENT);
}
diff --git a/sys/dev/dpaa/if_dtsec.c b/sys/dev/dpaa/if_dtsec.c
index b5899a340f98..a5f9955061a4 100644
--- a/sys/dev/dpaa/if_dtsec.c
+++ b/sys/dev/dpaa/if_dtsec.c
@@ -1,884 +1,878 @@
/*-
* Copyright (c) 2011-2012 Semihalf.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/openfirm.h>
#include "miibus_if.h"
#include <contrib/ncsw/inc/integrations/dpaa_integration_ext.h>
#include <contrib/ncsw/inc/Peripherals/fm_mac_ext.h>
#include <contrib/ncsw/inc/Peripherals/fm_port_ext.h>
#include <contrib/ncsw/inc/flib/fsl_fman_dtsec.h>
#include <contrib/ncsw/inc/xx_ext.h>
#include "fman.h"
#include "if_dtsec.h"
#include "if_dtsec_im.h"
#include "if_dtsec_rm.h"
#define DTSEC_MIN_FRAME_SIZE 64
#define DTSEC_MAX_FRAME_SIZE 9600
#define DTSEC_REG_MAXFRM 0x110
#define DTSEC_REG_GADDR(i) (0x0a0 + 4*(i))
/**
* @group dTSEC private defines.
* @{
*/
/**
* dTSEC FMan MAC exceptions info struct.
*/
struct dtsec_fm_mac_ex_str {
const int num;
const char *str;
};
/** @} */
/**
* @group FMan MAC routines.
* @{
*/
#define DTSEC_MAC_EXCEPTIONS_END (-1)
/**
* FMan MAC exceptions.
*/
static const struct dtsec_fm_mac_ex_str dtsec_fm_mac_exceptions[] = {
{ e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO, "MDIO scan event" },
{ e_FM_MAC_EX_10G_MDIO_CMD_CMPL, "MDIO command completion" },
{ e_FM_MAC_EX_10G_REM_FAULT, "Remote fault" },
{ e_FM_MAC_EX_10G_LOC_FAULT, "Local fault" },
{ e_FM_MAC_EX_10G_1TX_ECC_ER, "Transmit frame ECC error" },
{ e_FM_MAC_EX_10G_TX_FIFO_UNFL, "Transmit FIFO underflow" },
{ e_FM_MAC_EX_10G_TX_FIFO_OVFL, "Receive FIFO overflow" },
{ e_FM_MAC_EX_10G_TX_ER, "Transmit frame error" },
{ e_FM_MAC_EX_10G_RX_FIFO_OVFL, "Receive FIFO overflow" },
{ e_FM_MAC_EX_10G_RX_ECC_ER, "Receive frame ECC error" },
{ e_FM_MAC_EX_10G_RX_JAB_FRM, "Receive jabber frame" },
{ e_FM_MAC_EX_10G_RX_OVRSZ_FRM, "Receive oversized frame" },
{ e_FM_MAC_EX_10G_RX_RUNT_FRM, "Receive runt frame" },
{ e_FM_MAC_EX_10G_RX_FRAG_FRM, "Receive fragment frame" },
{ e_FM_MAC_EX_10G_RX_LEN_ER, "Receive payload length error" },
{ e_FM_MAC_EX_10G_RX_CRC_ER, "Receive CRC error" },
{ e_FM_MAC_EX_10G_RX_ALIGN_ER, "Receive alignment error" },
{ e_FM_MAC_EX_1G_BAB_RX, "Babbling receive error" },
{ e_FM_MAC_EX_1G_RX_CTL, "Receive control (pause frame) interrupt" },
{ e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET, "Graceful transmit stop "
"complete" },
{ e_FM_MAC_EX_1G_BAB_TX, "Babbling transmit error" },
{ e_FM_MAC_EX_1G_TX_CTL, "Transmit control (pause frame) interrupt" },
{ e_FM_MAC_EX_1G_TX_ERR, "Transmit error" },
{ e_FM_MAC_EX_1G_LATE_COL, "Late collision" },
{ e_FM_MAC_EX_1G_COL_RET_LMT, "Collision retry limit" },
{ e_FM_MAC_EX_1G_TX_FIFO_UNDRN, "Transmit FIFO underrun" },
{ e_FM_MAC_EX_1G_MAG_PCKT, "Magic Packet detected when dTSEC is in "
"Magic Packet detection mode" },
{ e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET, "MII management read completion" },
{ e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET, "MII management write completion" },
{ e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET, "Graceful receive stop "
"complete" },
{ e_FM_MAC_EX_1G_TX_DATA_ERR, "Internal data error on transmit" },
{ e_FM_MAC_EX_1G_RX_DATA_ERR, "Internal data error on receive" },
{ e_FM_MAC_EX_1G_1588_TS_RX_ERR, "Time-Stamp Receive Error" },
{ e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, "MIB counter overflow" },
{ DTSEC_MAC_EXCEPTIONS_END, "" }
};
static const char *
dtsec_fm_mac_ex_to_str(e_FmMacExceptions exception)
{
int i;
for (i = 0; dtsec_fm_mac_exceptions[i].num != exception &&
dtsec_fm_mac_exceptions[i].num != DTSEC_MAC_EXCEPTIONS_END; ++i)
;
if (dtsec_fm_mac_exceptions[i].num == DTSEC_MAC_EXCEPTIONS_END)
return ("<Unknown Exception>");
return (dtsec_fm_mac_exceptions[i].str);
}
static void
dtsec_fm_mac_mdio_event_callback(t_Handle h_App,
e_FmMacExceptions exception)
{
struct dtsec_softc *sc;
sc = h_App;
device_printf(sc->sc_dev, "MDIO event %i: %s.\n", exception,
dtsec_fm_mac_ex_to_str(exception));
}
static void
dtsec_fm_mac_exception_callback(t_Handle app, e_FmMacExceptions exception)
{
struct dtsec_softc *sc;
sc = app;
device_printf(sc->sc_dev, "MAC exception %i: %s.\n", exception,
dtsec_fm_mac_ex_to_str(exception));
}
static void
dtsec_fm_mac_free(struct dtsec_softc *sc)
{
if (sc->sc_mach == NULL)
return;
FM_MAC_Disable(sc->sc_mach, e_COMM_MODE_RX_AND_TX);
FM_MAC_Free(sc->sc_mach);
sc->sc_mach = NULL;
}
static int
dtsec_fm_mac_init(struct dtsec_softc *sc, uint8_t *mac)
{
t_FmMacParams params;
t_Error error;
memset(&params, 0, sizeof(params));
memcpy(&params.addr, mac, sizeof(params.addr));
params.baseAddr = rman_get_bushandle(sc->sc_mem);
params.enetMode = sc->sc_mac_enet_mode;
params.macId = sc->sc_eth_id;
params.mdioIrq = sc->sc_mac_mdio_irq;
params.f_Event = dtsec_fm_mac_mdio_event_callback;
params.f_Exception = dtsec_fm_mac_exception_callback;
params.h_App = sc;
params.h_Fm = sc->sc_fmh;
sc->sc_mach = FM_MAC_Config(&params);
if (sc->sc_mach == NULL) {
device_printf(sc->sc_dev, "couldn't configure FM_MAC module.\n"
);
return (ENXIO);
}
error = FM_MAC_ConfigResetOnInit(sc->sc_mach, TRUE);
if (error != E_OK) {
device_printf(sc->sc_dev, "couldn't enable reset on init "
"feature.\n");
dtsec_fm_mac_free(sc);
return (ENXIO);
}
/* Do not inform about pause frames */
error = FM_MAC_ConfigException(sc->sc_mach, e_FM_MAC_EX_1G_RX_CTL,
FALSE);
if (error != E_OK) {
device_printf(sc->sc_dev, "couldn't disable pause frames "
"exception.\n");
dtsec_fm_mac_free(sc);
return (ENXIO);
}
error = FM_MAC_Init(sc->sc_mach);
if (error != E_OK) {
device_printf(sc->sc_dev, "couldn't initialize FM_MAC module."
"\n");
dtsec_fm_mac_free(sc);
return (ENXIO);
}
return (0);
}
/** @} */
/**
* @group FMan PORT routines.
* @{
*/
static const char *
dtsec_fm_port_ex_to_str(e_FmPortExceptions exception)
{
switch (exception) {
case e_FM_PORT_EXCEPTION_IM_BUSY:
return ("IM: RX busy");
default:
return ("<Unknown Exception>");
}
}
void
dtsec_fm_port_rx_exception_callback(t_Handle app,
e_FmPortExceptions exception)
{
struct dtsec_softc *sc;
sc = app;
device_printf(sc->sc_dev, "RX exception: %i: %s.\n", exception,
dtsec_fm_port_ex_to_str(exception));
}
void
dtsec_fm_port_tx_exception_callback(t_Handle app,
e_FmPortExceptions exception)
{
struct dtsec_softc *sc;
sc = app;
device_printf(sc->sc_dev, "TX exception: %i: %s.\n", exception,
dtsec_fm_port_ex_to_str(exception));
}
e_FmPortType
dtsec_fm_port_rx_type(enum eth_dev_type type)
{
switch (type) {
case ETH_DTSEC:
return (e_FM_PORT_TYPE_RX);
case ETH_10GSEC:
return (e_FM_PORT_TYPE_RX_10G);
default:
return (e_FM_PORT_TYPE_DUMMY);
}
}
e_FmPortType
dtsec_fm_port_tx_type(enum eth_dev_type type)
{
switch (type) {
case ETH_DTSEC:
return (e_FM_PORT_TYPE_TX);
case ETH_10GSEC:
return (e_FM_PORT_TYPE_TX_10G);
default:
return (e_FM_PORT_TYPE_DUMMY);
}
}
static void
dtsec_fm_port_free_both(struct dtsec_softc *sc)
{
if (sc->sc_rxph) {
FM_PORT_Free(sc->sc_rxph);
sc->sc_rxph = NULL;
}
if (sc->sc_txph) {
FM_PORT_Free(sc->sc_txph);
sc->sc_txph = NULL;
}
}
/** @} */
/**
* @group IFnet routines.
* @{
*/
static int
dtsec_set_mtu(struct dtsec_softc *sc, unsigned int mtu)
{
mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
DTSEC_LOCK_ASSERT(sc);
if (mtu >= DTSEC_MIN_FRAME_SIZE && mtu <= DTSEC_MAX_FRAME_SIZE) {
bus_write_4(sc->sc_mem, DTSEC_REG_MAXFRM, mtu);
return (mtu);
}
return (0);
}
static u_int
dtsec_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct dtsec_softc *sc = arg;
FM_MAC_AddHashMacAddr(sc->sc_mach, (t_EnetAddr *)LLADDR(sdl));
return (1);
}
static void
dtsec_setup_multicast(struct dtsec_softc *sc)
{
int i;
if (if_getflags(sc->sc_ifnet) & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
bus_write_4(sc->sc_mem, DTSEC_REG_GADDR(i), 0xFFFFFFFF);
return;
}
fman_dtsec_reset_filter_table(rman_get_virtual(sc->sc_mem),
true, false);
if_foreach_llmaddr(sc->sc_ifnet, dtsec_hash_maddr, sc);
}
static int
dtsec_if_enable_locked(struct dtsec_softc *sc)
{
int error;
DTSEC_LOCK_ASSERT(sc);
error = FM_MAC_Enable(sc->sc_mach, e_COMM_MODE_RX_AND_TX);
if (error != E_OK)
return (EIO);
error = FM_PORT_Enable(sc->sc_rxph);
if (error != E_OK)
return (EIO);
error = FM_PORT_Enable(sc->sc_txph);
if (error != E_OK)
return (EIO);
dtsec_setup_multicast(sc);
if_setdrvflagbits(sc->sc_ifnet, IFF_DRV_RUNNING, 0);
/* Refresh link state */
dtsec_miibus_statchg(sc->sc_dev);
return (0);
}
static int
dtsec_if_disable_locked(struct dtsec_softc *sc)
{
int error;
DTSEC_LOCK_ASSERT(sc);
error = FM_MAC_Disable(sc->sc_mach, e_COMM_MODE_RX_AND_TX);
if (error != E_OK)
return (EIO);
error = FM_PORT_Disable(sc->sc_rxph);
if (error != E_OK)
return (EIO);
error = FM_PORT_Disable(sc->sc_txph);
if (error != E_OK)
return (EIO);
if_setdrvflagbits(sc->sc_ifnet, 0, IFF_DRV_RUNNING);
return (0);
}
static int
dtsec_if_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct dtsec_softc *sc;
struct ifreq *ifr;
int error;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
/* Basic functionality to achieve media status reports */
switch (command) {
case SIOCSIFMTU:
DTSEC_LOCK(sc);
if (dtsec_set_mtu(sc, ifr->ifr_mtu))
if_setmtu(ifp, ifr->ifr_mtu);
else
error = EINVAL;
DTSEC_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
DTSEC_LOCK(sc);
if (if_getflags(sc->sc_ifnet) & IFF_UP)
error = dtsec_if_enable_locked(sc);
else
error = dtsec_if_disable_locked(sc);
DTSEC_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media,
command);
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
static void
dtsec_if_tick(void *arg)
{
struct dtsec_softc *sc;
sc = arg;
/* TODO */
DTSEC_LOCK(sc);
mii_tick(sc->sc_mii);
callout_reset(&sc->sc_tick_callout, hz, dtsec_if_tick, sc);
DTSEC_UNLOCK(sc);
}
static void
dtsec_if_deinit_locked(struct dtsec_softc *sc)
{
DTSEC_LOCK_ASSERT(sc);
DTSEC_UNLOCK(sc);
callout_drain(&sc->sc_tick_callout);
DTSEC_LOCK(sc);
}
static void
dtsec_if_init_locked(struct dtsec_softc *sc)
{
int error;
DTSEC_LOCK_ASSERT(sc);
/* Set MAC address */
error = FM_MAC_ModifyMacAddr(sc->sc_mach,
(t_EnetAddr *)if_getlladdr(sc->sc_ifnet));
if (error != E_OK) {
device_printf(sc->sc_dev, "couldn't set MAC address.\n");
goto err;
}
/* Start MII polling */
if (sc->sc_mii)
callout_reset(&sc->sc_tick_callout, hz, dtsec_if_tick, sc);
if (if_getflags(sc->sc_ifnet) & IFF_UP) {
error = dtsec_if_enable_locked(sc);
if (error != 0)
goto err;
} else {
error = dtsec_if_disable_locked(sc);
if (error != 0)
goto err;
}
return;
err:
dtsec_if_deinit_locked(sc);
device_printf(sc->sc_dev, "initialization error.\n");
return;
}
static void
dtsec_if_init(void *data)
{
struct dtsec_softc *sc;
sc = data;
DTSEC_LOCK(sc);
dtsec_if_init_locked(sc);
DTSEC_UNLOCK(sc);
}
static void
dtsec_if_start(if_t ifp)
{
struct dtsec_softc *sc;
sc = if_getsoftc(ifp);
DTSEC_LOCK(sc);
sc->sc_start_locked(sc);
DTSEC_UNLOCK(sc);
}
static void
dtsec_if_watchdog(if_t ifp)
{
/* TODO */
}
/** @} */
/**
* @group IFmedia routines.
* @{
*/
static int
dtsec_ifmedia_upd(if_t ifp)
{
struct dtsec_softc *sc = if_getsoftc(ifp);
DTSEC_LOCK(sc);
mii_mediachg(sc->sc_mii);
DTSEC_UNLOCK(sc);
return (0);
}
static void
dtsec_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct dtsec_softc *sc = if_getsoftc(ifp);
DTSEC_LOCK(sc);
mii_pollstat(sc->sc_mii);
ifmr->ifm_active = sc->sc_mii->mii_media_active;
ifmr->ifm_status = sc->sc_mii->mii_media_status;
DTSEC_UNLOCK(sc);
}
/** @} */
/**
* @group dTSEC bus interface.
* @{
*/
static void
dtsec_configure_mode(struct dtsec_softc *sc)
{
char tunable[64];
snprintf(tunable, sizeof(tunable), "%s.independent_mode",
device_get_nameunit(sc->sc_dev));
sc->sc_mode = DTSEC_MODE_REGULAR;
TUNABLE_INT_FETCH(tunable, &sc->sc_mode);
if (sc->sc_mode == DTSEC_MODE_REGULAR) {
sc->sc_port_rx_init = dtsec_rm_fm_port_rx_init;
sc->sc_port_tx_init = dtsec_rm_fm_port_tx_init;
sc->sc_start_locked = dtsec_rm_if_start_locked;
} else {
sc->sc_port_rx_init = dtsec_im_fm_port_rx_init;
sc->sc_port_tx_init = dtsec_im_fm_port_tx_init;
sc->sc_start_locked = dtsec_im_if_start_locked;
}
device_printf(sc->sc_dev, "Configured for %s mode.\n",
(sc->sc_mode == DTSEC_MODE_REGULAR) ? "regular" : "independent");
}
int
dtsec_attach(device_t dev)
{
struct dtsec_softc *sc;
device_t parent;
int error;
if_t ifp;
sc = device_get_softc(dev);
parent = device_get_parent(dev);
sc->sc_dev = dev;
sc->sc_mac_mdio_irq = NO_IRQ;
/* Check if MallocSmart allocator is ready */
if (XX_MallocSmartInit() != E_OK)
return (ENXIO);
/* Init locks */
mtx_init(&sc->sc_lock, device_get_nameunit(dev),
"DTSEC Global Lock", MTX_DEF);
mtx_init(&sc->sc_mii_lock, device_get_nameunit(dev),
"DTSEC MII Lock", MTX_DEF);
/* Init callouts */
callout_init(&sc->sc_tick_callout, CALLOUT_MPSAFE);
/* Read configuraton */
if ((error = fman_get_handle(parent, &sc->sc_fmh)) != 0)
return (error);
if ((error = fman_get_muram_handle(parent, &sc->sc_muramh)) != 0)
return (error);
if ((error = fman_get_bushandle(parent, &sc->sc_fm_base)) != 0)
return (error);
/* Configure working mode */
dtsec_configure_mode(sc);
/* If we are working in regular mode configure BMAN and QMAN */
if (sc->sc_mode == DTSEC_MODE_REGULAR) {
/* Create RX buffer pool */
error = dtsec_rm_pool_rx_init(sc);
if (error != 0)
return (EIO);
/* Create RX frame queue range */
error = dtsec_rm_fqr_rx_init(sc);
if (error != 0)
return (EIO);
/* Create frame info pool */
error = dtsec_rm_fi_pool_init(sc);
if (error != 0)
return (EIO);
/* Create TX frame queue range */
error = dtsec_rm_fqr_tx_init(sc);
if (error != 0)
return (EIO);
}
/* Init FMan MAC module. */
error = dtsec_fm_mac_init(sc, sc->sc_mac_addr);
if (error != 0) {
dtsec_detach(dev);
return (ENXIO);
}
/* Init FMan TX port */
error = sc->sc_port_tx_init(sc, device_get_unit(sc->sc_dev));
if (error != 0) {
dtsec_detach(dev);
return (ENXIO);
}
/* Init FMan RX port */
error = sc->sc_port_rx_init(sc, device_get_unit(sc->sc_dev));
if (error != 0) {
dtsec_detach(dev);
return (ENXIO);
}
/* Create network interface for upper layers */
ifp = sc->sc_ifnet = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "if_alloc() failed.\n");
- dtsec_detach(dev);
- return (ENOMEM);
- }
-
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST);
if_setinitfn(ifp, dtsec_if_init);
if_setstartfn(ifp, dtsec_if_start);
if_setioctlfn(ifp, dtsec_if_ioctl);
if_setsendqlen(ifp, IFQ_MAXLEN);
if (sc->sc_phy_addr >= 0)
if_initname(ifp, device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev));
else
if_initname(ifp, "dtsec_phy", device_get_unit(sc->sc_dev));
/* TODO */
#if 0
if_setsendqlen(ifp, TSEC_TX_NUM_DESC - 1);
if_setsendqready(ifp);
#endif
if_setcapabilities(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Attach PHY(s) */
error = mii_attach(sc->sc_dev, &sc->sc_mii_dev, ifp, dtsec_ifmedia_upd,
dtsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->sc_phy_addr,
MII_OFFSET_ANY, 0);
if (error) {
device_printf(sc->sc_dev, "attaching PHYs failed: %d\n", error);
dtsec_detach(sc->sc_dev);
return (error);
}
sc->sc_mii = device_get_softc(sc->sc_mii_dev);
/* Attach to stack */
ether_ifattach(ifp, sc->sc_mac_addr);
return (0);
}
int
dtsec_detach(device_t dev)
{
struct dtsec_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->sc_ifnet;
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
/* Shutdown interface */
DTSEC_LOCK(sc);
dtsec_if_deinit_locked(sc);
DTSEC_UNLOCK(sc);
}
if (sc->sc_ifnet) {
if_free(sc->sc_ifnet);
sc->sc_ifnet = NULL;
}
if (sc->sc_mode == DTSEC_MODE_REGULAR) {
/* Free RX/TX FQRs */
dtsec_rm_fqr_rx_free(sc);
dtsec_rm_fqr_tx_free(sc);
/* Free frame info pool */
dtsec_rm_fi_pool_free(sc);
/* Free RX buffer pool */
dtsec_rm_pool_rx_free(sc);
}
dtsec_fm_mac_free(sc);
dtsec_fm_port_free_both(sc);
/* Destroy lock */
mtx_destroy(&sc->sc_lock);
return (0);
}
int
dtsec_suspend(device_t dev)
{
return (0);
}
int
dtsec_resume(device_t dev)
{
return (0);
}
int
dtsec_shutdown(device_t dev)
{
return (0);
}
/** @} */
/**
* @group MII bus interface.
* @{
*/
int
dtsec_miibus_readreg(device_t dev, int phy, int reg)
{
struct dtsec_softc *sc;
sc = device_get_softc(dev);
return (MIIBUS_READREG(sc->sc_mdio, phy, reg));
}
int
dtsec_miibus_writereg(device_t dev, int phy, int reg, int value)
{
struct dtsec_softc *sc;
sc = device_get_softc(dev);
return (MIIBUS_WRITEREG(sc->sc_mdio, phy, reg, value));
}
void
dtsec_miibus_statchg(device_t dev)
{
struct dtsec_softc *sc;
e_EnetSpeed speed;
bool duplex;
int error;
sc = device_get_softc(dev);
DTSEC_LOCK_ASSERT(sc);
duplex = ((sc->sc_mii->mii_media_active & IFM_GMASK) == IFM_FDX);
switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
speed = e_ENET_SPEED_1000;
break;
case IFM_100_TX:
speed = e_ENET_SPEED_100;
break;
case IFM_10_T:
speed = e_ENET_SPEED_10;
break;
default:
speed = e_ENET_SPEED_10;
}
error = FM_MAC_AdjustLink(sc->sc_mach, speed, duplex);
if (error != E_OK)
device_printf(sc->sc_dev, "error while adjusting MAC speed.\n");
}
/** @} */
diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
index a9e6aa120549..a21351a20b49 100644
--- a/sys/dev/dpaa2/dpaa2_ni.c
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -1,3723 +1,3718 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright © 2021-2023 Dmitry Salychev
* Copyright © 2022 Mathew McBride
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* The DPAA2 Network Interface (DPNI) driver.
*
* The DPNI object is a network interface that is configurable to support a wide
* range of features from a very basic Ethernet interface up to a
* high-functioning network interface. The DPNI supports features that are
* expected by standard network stacks, from basic features to offloads.
*
* DPNIs work with Ethernet traffic, starting with the L2 header. Additional
* functions are provided for standard network protocols (L2, L3, L4, etc.).
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/taskqueue.h>
#include <sys/sysctl.h>
#include <sys/buf_ring.h>
#include <sys/smp.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/atomic.h>
#include <machine/vmparam.h>
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <dev/pci/pcivar.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include "opt_acpi.h"
#include "opt_platform.h"
#include "pcib_if.h"
#include "pci_if.h"
#include "miibus_if.h"
#include "memac_mdio_if.h"
#include "dpaa2_types.h"
#include "dpaa2_mc.h"
#include "dpaa2_mc_if.h"
#include "dpaa2_mcp.h"
#include "dpaa2_swp.h"
#include "dpaa2_swp_if.h"
#include "dpaa2_cmd_if.h"
#include "dpaa2_ni.h"
#include "dpaa2_channel.h"
#include "dpaa2_buf.h"
#define BIT(x) (1ul << (x))
#define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
/* Frame Dequeue Response status bits. */
#define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0)
#define ALIGN_UP(x, y) roundup2((x), (y))
#define ALIGN_DOWN(x, y) rounddown2((x), (y))
#define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE)
#define DPNI_LOCK(__sc) do { \
mtx_assert(&(__sc)->lock, MA_NOTOWNED); \
mtx_lock(&(__sc)->lock); \
} while (0)
#define DPNI_UNLOCK(__sc) do { \
mtx_assert(&(__sc)->lock, MA_OWNED); \
mtx_unlock(&(__sc)->lock); \
} while (0)
#define DPNI_LOCK_ASSERT(__sc) do { \
mtx_assert(&(__sc)->lock, MA_OWNED); \
} while (0)
#define DPAA2_TX_RING(sc, chan, tc) \
(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
/*
* How many times channel cleanup routine will be repeated if the RX or TX
* budget was depleted.
*/
#define DPAA2_CLEAN_BUDGET 64 /* sysctl(9)? */
/* TX/RX budget for the channel cleanup task */
#define DPAA2_TX_BUDGET 128 /* sysctl(9)? */
#define DPAA2_RX_BUDGET 256 /* sysctl(9)? */
#define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */
#define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */
#define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */
/* Default maximum frame length. */
#define DPAA2_ETH_MFL (ETHER_MAX_LEN - ETHER_CRC_LEN)
/* Minimally supported version of the DPNI API. */
#define DPNI_VER_MAJOR 7
#define DPNI_VER_MINOR 0
/* Rx/Tx buffers configuration. */
#define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */
#define BUF_ALIGN 64
#define BUF_SWA_SIZE 64 /* SW annotation size */
#define BUF_RX_HWA_SIZE 64 /* HW annotation size */
#define BUF_TX_HWA_SIZE 128 /* HW annotation size */
#define DPAA2_RX_BUFRING_SZ (4096u)
#define DPAA2_RXE_BUFRING_SZ (1024u)
#define DPAA2_TXC_BUFRING_SZ (4096u)
#define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */
#define DPAA2_TX_SEG_SZ (PAGE_SIZE)
#define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
#define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */
/* Size of a buffer to keep a QoS table key configuration. */
#define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE)
/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
#define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
/* Buffers layout options. */
#define BUF_LOPT_TIMESTAMP 0x1
#define BUF_LOPT_PARSER_RESULT 0x2
#define BUF_LOPT_FRAME_STATUS 0x4
#define BUF_LOPT_PRIV_DATA_SZ 0x8
#define BUF_LOPT_DATA_ALIGN 0x10
#define BUF_LOPT_DATA_HEAD_ROOM 0x20
#define BUF_LOPT_DATA_TAIL_ROOM 0x40
#define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
#define DPAA2_NI_BUF_CHAN_MASK (0xFu)
#define DPAA2_NI_BUF_CHAN_SHIFT (60)
#define DPAA2_NI_BUF_IDX_MASK (0x7FFFu)
#define DPAA2_NI_BUF_IDX_SHIFT (49)
#define DPAA2_NI_TX_IDX_MASK (0x7u)
#define DPAA2_NI_TX_IDX_SHIFT (57)
#define DPAA2_NI_TXBUF_IDX_MASK (0xFFu)
#define DPAA2_NI_TXBUF_IDX_SHIFT (49)
#define DPAA2_NI_FD_FMT_MASK (0x3u)
#define DPAA2_NI_FD_FMT_SHIFT (12)
#define DPAA2_NI_FD_ERR_MASK (0xFFu)
#define DPAA2_NI_FD_ERR_SHIFT (0)
#define DPAA2_NI_FD_SL_MASK (0x1u)
#define DPAA2_NI_FD_SL_SHIFT (14)
#define DPAA2_NI_FD_LEN_MASK (0x3FFFFu)
#define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
/* Enables TCAM for Flow Steering and QoS look-ups. */
#define DPNI_OPT_HAS_KEY_MASKING 0x10
/* Unique IDs for the supported Rx classification header fields. */
#define DPAA2_ETH_DIST_ETHDST BIT(0)
#define DPAA2_ETH_DIST_ETHSRC BIT(1)
#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
#define DPAA2_ETH_DIST_VLAN BIT(3)
#define DPAA2_ETH_DIST_IPSRC BIT(4)
#define DPAA2_ETH_DIST_IPDST BIT(5)
#define DPAA2_ETH_DIST_IPPROTO BIT(6)
#define DPAA2_ETH_DIST_L4SRC BIT(7)
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
/* L3-L4 network traffic flow hash options. */
#define RXH_L2DA (1 << 1)
#define RXH_VLAN (1 << 2)
#define RXH_L3_PROTO (1 << 3)
#define RXH_IP_SRC (1 << 4)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
#define RXH_DISCARD (1 << 31)
/* Default Rx hash options, set during attaching. */
#define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
/*
* DPAA2 Network Interface resource specification.
*
* NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
* the specification!
*/
struct resource_spec dpaa2_ni_spec[] = {
/*
* DPMCP resources.
*
* NOTE: MC command portals (MCPs) are used to send commands to, and
* receive responses from, the MC firmware. One portal per DPNI.
*/
{ DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
/*
* DPIO resources (software portals).
*
* NOTE: One per running core. While DPIOs are the source of data
* availability interrupts, the DPCONs are used to identify the
* network interface that has produced ingress data to that core.
*/
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
/*
* DPBP resources (buffer pools).
*
* NOTE: One per network interface.
*/
{ DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE },
/*
* DPCON resources (channels).
*
* NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
* distributed to.
* NOTE: Since it is necessary to distinguish between traffic from
* different network interfaces arriving on the same core, the
* DPCONs must be private to the DPNIs.
*/
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
RESOURCE_SPEC_END
};
/* Supported header fields for Rx hash distribution key */
static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
/* L2 header */
.rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA,
.id = DPAA2_ETH_DIST_ETHDST,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
.id = DPAA2_ETH_DIST_ETHSRC,
.size = 6,
}, {
/* This is the last ethertype field parsed:
* depending on frame format, it can be the MAC ethertype
* or the VLAN etype.
*/
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
.id = DPAA2_ETH_DIST_ETHTYPE,
.size = 2,
}, {
/* VLAN header */
.rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN,
.cls_field = NH_FLD_VLAN_TCI,
.id = DPAA2_ETH_DIST_VLAN,
.size = 2,
}, {
/* IP header */
.rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_SRC,
.id = DPAA2_ETH_DIST_IPSRC,
.size = 4,
}, {
.rxnfc_field = RXH_IP_DST,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_DST,
.id = DPAA2_ETH_DIST_IPDST,
.size = 4,
}, {
.rxnfc_field = RXH_L3_PROTO,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_PROTO,
.id = DPAA2_ETH_DIST_IPPROTO,
.size = 1,
}, {
/* Using UDP ports, this is functionally equivalent to raw
* byte pairs from L4 header.
*/
.rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC,
.id = DPAA2_ETH_DIST_L4SRC,
.size = 2,
}, {
.rxnfc_field = RXH_L4_B_2_3,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_DST,
.id = DPAA2_ETH_DIST_L4DST,
.size = 2,
},
};
static struct dpni_stat {
int page;
int cnt;
char *name;
char *desc;
} dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
/* PAGE, COUNTER, NAME, DESCRIPTION */
{ 0, 0, "in_all_frames", "All accepted ingress frames" },
{ 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" },
{ 0, 2, "in_multi_frames", "Multicast accepted ingress frames" },
{ 1, 0, "eg_all_frames", "All egress frames transmitted" },
{ 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" },
{ 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" },
{ 2, 0, "in_filtered_frames", "All ingress frames discarded due to "
"filtering" },
{ 2, 1, "in_discarded_frames", "All frames discarded due to errors" },
{ 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer "
"depletion in DPNI buffer pools" },
};
struct dpaa2_ni_rx_ctx {
struct mbuf *head;
struct mbuf *tail;
int cnt;
bool last;
};
/* Device interface */
static int dpaa2_ni_probe(device_t);
static int dpaa2_ni_attach(device_t);
static int dpaa2_ni_detach(device_t);
/* DPAA2 network interface setup and configuration */
static int dpaa2_ni_setup(device_t);
static int dpaa2_ni_setup_channels(device_t);
static int dpaa2_ni_bind(device_t);
static int dpaa2_ni_setup_rx_dist(device_t);
static int dpaa2_ni_setup_irqs(device_t);
static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
/* Tx/Rx flow configuration */
static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
/* Configuration subroutines */
static int dpaa2_ni_set_buf_layout(device_t);
static int dpaa2_ni_set_pause_frame(device_t);
static int dpaa2_ni_set_qos_table(device_t);
static int dpaa2_ni_set_mac_addr(device_t);
static int dpaa2_ni_set_hash(device_t, uint64_t);
static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
/* Frame descriptor routines */
static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
static int dpaa2_ni_fd_err(struct dpaa2_fd *);
static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
static int dpaa2_ni_fd_format(struct dpaa2_fd *);
static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
/* Various subroutines */
static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
/* Network interface routines */
static void dpaa2_ni_init(void *);
static int dpaa2_ni_transmit(if_t , struct mbuf *);
static void dpaa2_ni_qflush(if_t );
static int dpaa2_ni_ioctl(if_t , u_long, caddr_t);
static int dpaa2_ni_update_mac_filters(if_t );
static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
/* Interrupt handlers */
static void dpaa2_ni_intr(void *);
/* MII handlers */
static void dpaa2_ni_miibus_statchg(device_t);
static int dpaa2_ni_media_change(if_t );
static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
static void dpaa2_ni_media_tick(void *);
/* Tx/Rx routines. */
static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
struct dpaa2_ni_tx_ring *, struct mbuf *);
static void dpaa2_ni_cleanup_task(void *, int);
/* Tx/Rx subroutines */
static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
uint32_t *);
static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *);
static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *);
/* sysctl(9) */
static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
static int
dpaa2_ni_probe(device_t dev)
{
/* DPNI device will be added by a parent resource container itself. */
device_set_desc(dev, "DPAA2 Network Interface");
return (BUS_PROBE_DEFAULT);
}
static int
dpaa2_ni_attach(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
device_t mcp_dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *mcp_dinfo;
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
if_t ifp;
char tq_name[32];
int error;
sc->dev = dev;
sc->ifp = NULL;
sc->miibus = NULL;
sc->mii = NULL;
sc->media_status = 0;
sc->if_flags = 0;
sc->link_state = LINK_STATE_UNKNOWN;
sc->buf_align = 0;
/* For debug purposes only! */
sc->rx_anomaly_frames = 0;
sc->rx_single_buf_frames = 0;
sc->rx_sg_buf_frames = 0;
sc->rx_enq_rej_frames = 0;
sc->rx_ieoi_err_frames = 0;
sc->tx_single_buf_frames = 0;
sc->tx_sg_frames = 0;
DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
sc->rxd_dmat = NULL;
sc->qos_dmat = NULL;
sc->qos_kcfg.dmap = NULL;
sc->qos_kcfg.paddr = 0;
sc->qos_kcfg.vaddr = NULL;
sc->rxd_kcfg.dmap = NULL;
sc->rxd_kcfg.paddr = 0;
sc->rxd_kcfg.vaddr = NULL;
sc->mac.dpmac_id = 0;
sc->mac.phy_dev = NULL;
memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
if (error) {
device_printf(dev, "%s: failed to allocate resources: "
"error=%d\n", __func__, error);
goto err_exit;
}
/* Obtain MC portal. */
mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
mcp_dinfo = device_get_ivars(mcp_dev);
dinfo->portal = mcp_dinfo->portal;
mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
/* Allocate network interface */
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "%s: failed to allocate network interface\n",
- __func__);
- goto err_exit;
- }
sc->ifp = ifp;
if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
if_setinitfn(ifp, dpaa2_ni_init);
if_setioctlfn(ifp, dpaa2_ni_ioctl);
if_settransmitfn(ifp, dpaa2_ni_transmit);
if_setqflushfn(ifp, dpaa2_ni_qflush);
if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
DPAA2_CMD_INIT(&cmd);
/* Open resource container and network interface object. */
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
bzero(tq_name, sizeof(tq_name));
snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
/*
* XXX-DSL: Release new buffers on Buffer Pool State Change Notification
* (BPSCN) returned as a result to the VDQ command instead.
* It is similar to CDAN processed in dpaa2_io_intr().
*/
/* Create a taskqueue thread to release new buffers to the pool. */
sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
taskqueue_thread_enqueue, &sc->bp_taskq);
if (sc->bp_taskq == NULL) {
device_printf(dev, "%s: failed to allocate task queue: %s\n",
__func__, tq_name);
goto close_ni;
}
taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
/* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
/* taskqueue_thread_enqueue, &sc->cleanup_taskq); */
/* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
/* "dpaa2_ch cleanup"); */
error = dpaa2_ni_setup(dev);
if (error) {
device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
__func__, error);
goto close_ni;
}
error = dpaa2_ni_setup_channels(dev);
if (error) {
device_printf(dev, "%s: failed to setup QBMan channels: "
"error=%d\n", __func__, error);
goto close_ni;
}
error = dpaa2_ni_bind(dev);
if (error) {
device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
__func__, error);
goto close_ni;
}
error = dpaa2_ni_setup_irqs(dev);
if (error) {
device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
__func__, error);
goto close_ni;
}
error = dpaa2_ni_setup_sysctls(sc);
if (error) {
device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
__func__, error);
goto close_ni;
}
ether_ifattach(sc->ifp, sc->mac.addr);
callout_init(&sc->mii_callout, 0);
return (0);
close_ni:
DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (ENXIO);
}
static void
dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
DPNI_LOCK(sc);
ifmr->ifm_count = 0;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
ifmr->ifm_current = ifmr->ifm_active =
sc->fixed_ifmedia.ifm_cur->ifm_media;
/*
* In non-PHY usecases, we need to signal link state up, otherwise
* certain things requiring a link event (e.g async DHCP client) from
* devd do not happen.
*/
if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
if_link_state_change(ifp, LINK_STATE_UP);
}
/*
* TODO: Check the status of the link partner (DPMAC, DPNI or other) and
* reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
* the MC firmware sets the status, instead of us telling the MC what
* it is.
*/
DPNI_UNLOCK(sc);
return;
}
static void
dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
{
/*
* FIXME: When the DPNI is connected to a DPMAC, we can get the
* 'apparent' speed from it.
*/
sc->fixed_link = true;
ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
dpaa2_ni_fixed_media_status);
ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
}
static int
dpaa2_ni_detach(device_t dev)
{
/* TBD */
return (0);
}
/**
* @brief Configure DPAA2 network interface object.
*/
static int
dpaa2_ni_setup(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
struct dpaa2_cmd cmd;
uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
uint16_t rc_token, ni_token, mac_token;
struct dpaa2_mac_attr attr;
enum dpaa2_mac_link_type link_type;
uint32_t link;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Check if we can work with this DPNI object. */
error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
&sc->api_minor);
if (error) {
device_printf(dev, "%s: failed to get DPNI API version\n",
__func__);
goto close_ni;
}
if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
device_printf(dev, "%s: DPNI API version %u.%u not supported, "
"need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
DPNI_VER_MAJOR, DPNI_VER_MINOR);
error = ENODEV;
goto close_ni;
}
/* Reset the DPNI object. */
error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
if (error) {
device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
__func__, dinfo->id);
goto close_ni;
}
/* Obtain attributes of the DPNI object. */
error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
if (error) {
device_printf(dev, "%s: failed to obtain DPNI attributes: "
"id=%d\n", __func__, dinfo->id);
goto close_ni;
}
if (bootverbose) {
device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
"wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
sc->attr.num.channels, sc->attr.wriop_ver);
device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
"cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
sc->attr.num.cgs);
device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
"fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
sc->attr.entries.qos, sc->attr.entries.fs);
device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
sc->attr.key_size.qos, sc->attr.key_size.fs);
}
/* Configure buffer layouts of the DPNI queues. */
error = dpaa2_ni_set_buf_layout(dev);
if (error) {
device_printf(dev, "%s: failed to configure buffer layout\n",
__func__);
goto close_ni;
}
/* Configure DMA resources. */
error = dpaa2_ni_setup_dma(sc);
if (error) {
device_printf(dev, "%s: failed to setup DMA\n", __func__);
goto close_ni;
}
/* Setup link between DPNI and an object it's connected to. */
ep1_desc.obj_id = dinfo->id;
ep1_desc.if_id = 0; /* DPNI has the only endpoint */
ep1_desc.type = dinfo->dtype;
error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
&ep1_desc, &ep2_desc, &link);
if (error) {
device_printf(dev, "%s: failed to obtain an object DPNI is "
"connected to: error=%d\n", __func__, error);
} else {
device_printf(dev, "connected to %s (id=%d)\n",
dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
error = dpaa2_ni_set_mac_addr(dev);
if (error) {
device_printf(dev, "%s: failed to set MAC address: "
"error=%d\n", __func__, error);
}
if (ep2_desc.type == DPAA2_DEV_MAC) {
/*
* This is the simplest case when DPNI is connected to
* DPMAC directly.
*/
sc->mac.dpmac_id = ep2_desc.obj_id;
link_type = DPAA2_MAC_LINK_TYPE_NONE;
/*
* Need to determine if DPMAC type is PHY (attached to
* conventional MII PHY) or FIXED (usually SFP/SerDes,
* link state managed by MC firmware).
*/
error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
&mac_token);
/*
* Under VFIO, the DPMAC might be sitting in another
* container (DPRC) we don't have access to.
* Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
* the case.
*/
if (error) {
device_printf(dev, "%s: failed to open "
"connected DPMAC: %d (assuming in other DPRC)\n", __func__,
sc->mac.dpmac_id);
link_type = DPAA2_MAC_LINK_TYPE_FIXED;
} else {
error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
&cmd, &attr);
if (error) {
device_printf(dev, "%s: failed to get "
"DPMAC attributes: id=%d, "
"error=%d\n", __func__, dinfo->id,
error);
} else {
link_type = attr.link_type;
}
}
DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
device_printf(dev, "connected DPMAC is in FIXED "
"mode\n");
dpaa2_ni_setup_fixed_link(sc);
} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
device_printf(dev, "connected DPMAC is in PHY "
"mode\n");
error = DPAA2_MC_GET_PHY_DEV(dev,
&sc->mac.phy_dev, sc->mac.dpmac_id);
if (error == 0) {
error = MEMAC_MDIO_SET_NI_DEV(
sc->mac.phy_dev, dev);
if (error != 0) {
device_printf(dev, "%s: failed "
"to set dpni dev on memac "
"mdio dev %s: error=%d\n",
__func__,
device_get_nameunit(
sc->mac.phy_dev), error);
}
}
if (error == 0) {
error = MEMAC_MDIO_GET_PHY_LOC(
sc->mac.phy_dev, &sc->mac.phy_loc);
if (error == ENODEV) {
error = 0;
}
if (error != 0) {
device_printf(dev, "%s: failed "
"to get phy location from "
"memac mdio dev %s: error=%d\n",
__func__, device_get_nameunit(
sc->mac.phy_dev), error);
}
}
if (error == 0) {
error = mii_attach(sc->mac.phy_dev,
&sc->miibus, sc->ifp,
dpaa2_ni_media_change,
dpaa2_ni_media_status,
BMSR_DEFCAPMASK, sc->mac.phy_loc,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "%s: failed "
"to attach to miibus: "
"error=%d\n",
__func__, error);
}
}
if (error == 0) {
sc->mii = device_get_softc(sc->miibus);
}
} else {
device_printf(dev, "%s: DPMAC link type is not "
"supported\n", __func__);
}
} else if (ep2_desc.type == DPAA2_DEV_NI ||
ep2_desc.type == DPAA2_DEV_MUX ||
ep2_desc.type == DPAA2_DEV_SW) {
dpaa2_ni_setup_fixed_link(sc);
}
}
/* Select mode to enqueue frames. */
/* ... TBD ... */
/*
* Update link configuration to enable Rx/Tx pause frames support.
*
* NOTE: MC may generate an interrupt to the DPMAC and request changes
* in link configuration. It might be necessary to attach miibus
* and PHY before this point.
*/
error = dpaa2_ni_set_pause_frame(dev);
if (error) {
device_printf(dev, "%s: failed to configure Rx/Tx pause "
"frames\n", __func__);
goto close_ni;
}
/* Configure ingress traffic classification. */
error = dpaa2_ni_set_qos_table(dev);
if (error) {
device_printf(dev, "%s: failed to configure QoS table: "
"error=%d\n", __func__, error);
goto close_ni;
}
/* Add broadcast physical address to the MAC filtering table. */
memset(eth_bca, 0xff, ETHER_ADDR_LEN);
error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
ni_token), eth_bca);
if (error) {
device_printf(dev, "%s: failed to add broadcast physical "
"address to the MAC filtering table\n", __func__);
goto close_ni;
}
/* Set the maximum allowed length for received frames. */
error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
if (error) {
device_printf(dev, "%s: failed to set maximum length for "
"received frames\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
/**
* @brief Сonfigure QBMan channels and register data availability notifications.
*/
static int
dpaa2_ni_setup_channels(device_t dev)
{
device_t iodev, condev, bpdev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
uint32_t i, num_chan;
int error;
/* Calculate number of the channels based on the allocated resources */
for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
if (!sc->res[DPAA2_NI_IO_RID(i)]) {
break;
}
}
num_chan = i;
for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
if (!sc->res[DPAA2_NI_CON_RID(i)]) {
break;
}
}
num_chan = i < num_chan ? i : num_chan;
sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
? DPAA2_MAX_CHANNELS : num_chan;
sc->chan_n = sc->chan_n > sc->attr.num.queues
? sc->attr.num.queues : sc->chan_n;
KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
"chan_n=%d", __func__, sc->chan_n));
device_printf(dev, "channels=%d\n", sc->chan_n);
for (i = 0; i < sc->chan_n; i++) {
iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
/* Only one buffer pool available at the moment */
bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
&sc->channels[i], i, dpaa2_ni_cleanup_task);
if (error != 0) {
device_printf(dev, "%s: dpaa2_chan_setup() failed: "
"error=%d, chan_id=%d\n", __func__, error, i);
return (error);
}
}
/* There is exactly one Rx error queue per network interface */
error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
if (error != 0) {
device_printf(dev, "%s: failed to prepare RxError queue: "
"error=%d\n", __func__, error);
return (error);
}
return (0);
}
/**
* @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
*/
static int
dpaa2_ni_bind(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
device_t bp_dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *bp_info;
struct dpaa2_cmd cmd;
struct dpaa2_ni_pools_cfg pools_cfg;
struct dpaa2_ni_err_cfg err_cfg;
struct dpaa2_channel *chan;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Select buffer pool (only one available at the moment). */
bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bp_info = device_get_ivars(bp_dev);
/* Configure buffers pool. */
pools_cfg.pools_num = 1;
pools_cfg.pools[0].bp_obj_id = bp_info->id;
pools_cfg.pools[0].backup_flag = 0;
pools_cfg.pools[0].buf_sz = sc->buf_sz;
error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
if (error) {
device_printf(dev, "%s: failed to set buffer pools\n", __func__);
goto close_ni;
}
/* Setup ingress traffic distribution. */
error = dpaa2_ni_setup_rx_dist(dev);
if (error && error != EOPNOTSUPP) {
device_printf(dev, "%s: failed to setup ingress traffic "
"distribution\n", __func__);
goto close_ni;
}
if (bootverbose && error == EOPNOTSUPP) {
device_printf(dev, "Ingress traffic distribution not "
"supported\n");
}
/* Configure handling of error frames. */
err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
err_cfg.set_err_fas = false;
err_cfg.action = DPAA2_NI_ERR_DISCARD;
error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
if (error) {
device_printf(dev, "%s: failed to set errors behavior\n",
__func__);
goto close_ni;
}
/* Configure channel queues to generate CDANs. */
for (uint32_t i = 0; i < sc->chan_n; i++) {
chan = sc->channels[i];
/* Setup Rx flows. */
for (uint32_t j = 0; j < chan->rxq_n; j++) {
error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
if (error) {
device_printf(dev, "%s: failed to setup Rx "
"flow: error=%d\n", __func__, error);
goto close_ni;
}
}
/* Setup Tx flow. */
error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
if (error) {
device_printf(dev, "%s: failed to setup Tx "
"flow: error=%d\n", __func__, error);
goto close_ni;
}
}
/* Configure RxError queue to generate CDAN. */
error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
if (error) {
device_printf(dev, "%s: failed to setup RxError flow: "
"error=%d\n", __func__, error);
goto close_ni;
}
/*
* Get the Queuing Destination ID (QDID) that should be used for frame
* enqueue operations.
*/
error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
&sc->tx_qdid);
if (error) {
device_printf(dev, "%s: failed to get Tx queuing destination "
"ID\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
/**
* @brief Setup ingress traffic distribution.
*
* NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
* hasn't been set for DPNI and a number of DPNI queues > 1.
*/
static int
dpaa2_ni_setup_rx_dist(device_t dev)
{
/*
* Have the interface implicitly distribute traffic based on the default
* hash key.
*/
return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
}
static int
dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *con_info;
struct dpaa2_cmd cmd;
struct dpaa2_ni_queue_cfg queue_cfg = {0};
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Obtain DPCON associated with the FQ's channel. */
con_info = device_get_ivars(fq->chan->con_dev);
queue_cfg.type = DPAA2_NI_QUEUE_RX;
queue_cfg.tc = fq->tc;
queue_cfg.idx = fq->flowid;
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain Rx queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
fq->fqid = queue_cfg.fqid;
queue_cfg.dest_id = con_info->id;
queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
queue_cfg.priority = 1;
queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
queue_cfg.options =
DPAA2_NI_QUEUE_OPT_USER_CTX |
DPAA2_NI_QUEUE_OPT_DEST;
error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to update Rx queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
if (bootverbose) {
device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
"user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
fq->fqid, (uint64_t) fq);
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_channel *ch = fq->chan;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *con_info;
struct dpaa2_ni_queue_cfg queue_cfg = {0};
struct dpaa2_ni_tx_ring *tx;
struct dpaa2_buf *buf;
struct dpaa2_cmd cmd;
uint32_t tx_rings_n = 0;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Obtain DPCON associated with the FQ's channel. */
con_info = device_get_ivars(fq->chan->con_dev);
KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
sc->attr.num.tx_tcs));
KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
("%s: too many Tx buffers (%d): max=%d\n", __func__,
DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
/* Setup Tx rings. */
for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
queue_cfg.type = DPAA2_NI_QUEUE_TX;
queue_cfg.tc = i;
queue_cfg.idx = fq->flowid;
queue_cfg.chan_id = fq->chan->id;
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain Tx queue "
"configuration: tc=%d, flowid=%d\n", __func__,
queue_cfg.tc, queue_cfg.idx);
goto close_ni;
}
tx = &fq->tx_rings[i];
tx->fq = fq;
tx->fqid = queue_cfg.fqid;
tx->txid = tx_rings_n;
if (bootverbose) {
device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
"fqid=%d\n", fq->flowid, i, fq->chan->id,
queue_cfg.fqid);
}
mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
/* Allocate Tx ring buffer. */
tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
&tx->lock);
if (tx->br == NULL) {
device_printf(dev, "%s: failed to setup Tx ring buffer"
" (2) fqid=%d\n", __func__, tx->fqid);
goto close_ni;
}
/* Configure Tx buffers */
for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
if (buf == NULL) {
device_printf(dev, "%s: malloc() failed (buf)\n",
__func__);
return (ENOMEM);
}
/* Keep DMA tag and Tx ring linked to the buffer */
DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
if (buf->sgt == NULL) {
device_printf(dev, "%s: malloc() failed (sgt)\n",
__func__);
return (ENOMEM);
}
/* Link SGT to DMA tag and back to its Tx buffer */
DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
error = dpaa2_buf_seed_txb(dev, buf);
/* Add Tx buffer to the ring */
buf_ring_enqueue(tx->br, buf);
}
tx_rings_n++;
}
/* All Tx queues which belong to the same flowid have the same qdbin. */
fq->tx_qdbin = queue_cfg.qdbin;
queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
queue_cfg.tc = 0; /* ignored for TxConf queue */
queue_cfg.idx = fq->flowid;
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain TxConf queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
fq->fqid = queue_cfg.fqid;
queue_cfg.dest_id = con_info->id;
queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
queue_cfg.priority = 0;
queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
queue_cfg.options =
DPAA2_NI_QUEUE_OPT_USER_CTX |
DPAA2_NI_QUEUE_OPT_DEST;
error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to update TxConf queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *con_info;
struct dpaa2_ni_queue_cfg queue_cfg = {0};
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Obtain DPCON associated with the FQ's channel. */
con_info = device_get_ivars(fq->chan->con_dev);
queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
queue_cfg.tc = fq->tc; /* ignored */
queue_cfg.idx = fq->flowid; /* ignored */
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain RxErr queue "
"configuration\n", __func__);
goto close_ni;
}
fq->fqid = queue_cfg.fqid;
queue_cfg.dest_id = con_info->id;
queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
queue_cfg.priority = 1;
queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
queue_cfg.options =
DPAA2_NI_QUEUE_OPT_USER_CTX |
DPAA2_NI_QUEUE_OPT_DEST;
error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to update RxErr queue "
"configuration\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
/**
* @brief Configure DPNI object to generate interrupts.
*/
static int
dpaa2_ni_setup_irqs(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Configure IRQs. */
error = dpaa2_ni_setup_msi(sc);
if (error) {
device_printf(dev, "%s: failed to allocate MSI\n", __func__);
goto close_ni;
}
if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
device_printf(dev, "%s: failed to allocate IRQ resource\n",
__func__);
goto close_ni;
}
if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
NULL, dpaa2_ni_intr, sc, &sc->intr)) {
device_printf(dev, "%s: failed to setup IRQ resource\n",
__func__);
goto close_ni;
}
error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
if (error) {
device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
__func__);
goto close_ni;
}
error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
true);
if (error) {
device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
/**
* @brief Allocate MSI interrupts for DPNI.
*/
static int
dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
{
int val;
val = pci_msi_count(sc->dev);
if (val < DPAA2_NI_MSI_COUNT)
device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
DPAA2_IO_MSI_COUNT);
val = MIN(val, DPAA2_NI_MSI_COUNT);
if (pci_alloc_msi(sc->dev, &val) != 0)
return (EINVAL);
for (int i = 0; i < val; i++)
sc->irq_rid[i] = i + 1;
return (0);
}
/**
* @brief Update DPNI according to the updated interface capabilities.
*/
static int
dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
{
const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Setup checksums validation. */
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
if (error) {
device_printf(dev, "%s: failed to %s L3 checksum validation\n",
__func__, en_rxcsum ? "enable" : "disable");
goto close_ni;
}
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
if (error) {
device_printf(dev, "%s: failed to %s L4 checksum validation\n",
__func__, en_rxcsum ? "enable" : "disable");
goto close_ni;
}
/* Setup checksums generation. */
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
if (error) {
device_printf(dev, "%s: failed to %s L3 checksum generation\n",
__func__, en_txcsum ? "enable" : "disable");
goto close_ni;
}
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
if (error) {
device_printf(dev, "%s: failed to %s L4 checksum generation\n",
__func__, en_txcsum ? "enable" : "disable");
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
/**
* @brief Update DPNI according to the updated interface flags.
*/
static int
dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
{
const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
en_promisc ? true : en_allmulti);
if (error) {
device_printf(dev, "%s: failed to %s multicast promiscuous "
"mode\n", __func__, en_allmulti ? "enable" : "disable");
goto close_ni;
}
error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
if (error) {
device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
__func__, en_promisc ? "enable" : "disable");
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *node, *node2;
struct sysctl_oid_list *parent, *parent2;
char cbuf[128];
int i;
ctx = device_get_sysctl_ctx(sc->dev);
parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
/* Add DPNI statistics. */
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
parent = SYSCTL_CHILDREN(node);
for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
"IU", dpni_stat_sysctls[i].desc);
}
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
CTLFLAG_RD, &sc->rx_anomaly_frames,
"Rx frames in the buffers outside of the buffer pools");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
CTLFLAG_RD, &sc->rx_single_buf_frames,
"Rx frames in single buffers");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
CTLFLAG_RD, &sc->rx_sg_buf_frames,
"Rx frames in scatter/gather list");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
CTLFLAG_RD, &sc->rx_enq_rej_frames,
"Enqueue rejected by QMan");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
CTLFLAG_RD, &sc->rx_ieoi_err_frames,
"QMan IEOI error");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
CTLFLAG_RD, &sc->tx_single_buf_frames,
"Tx single buffer frames");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
CTLFLAG_RD, &sc->tx_sg_frames,
"Tx S/G frames");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
"IU", "number of Rx buffers in the buffer pool");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
"IU", "number of free Rx buffers in the buffer pool");
parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
/* Add channels statistics. */
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
parent = SYSCTL_CHILDREN(node);
for (int i = 0; i < sc->chan_n; i++) {
snprintf(cbuf, sizeof(cbuf), "%d", i);
node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
parent2 = SYSCTL_CHILDREN(node2);
SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
CTLFLAG_RD, &sc->channels[i]->tx_frames,
"Tx frames counter");
SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
CTLFLAG_RD, &sc->channels[i]->tx_dropped,
"Tx dropped counter");
}
return (0);
}
static int
dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
{
device_t dev = sc->dev;
int error;
KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
("unexpected buffer alignment: %d\n", sc->buf_align));
/* DMA tag for Rx distribution key. */
error = bus_dma_tag_create(
bus_get_dma_tag(dev),
PAGE_SIZE, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* low restricted addr */
BUS_SPACE_MAXADDR, /* high restricted addr */
NULL, NULL, /* filter, filterarg */
DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rxd_dmat);
if (error) {
device_printf(dev, "%s: failed to create DMA tag for Rx "
"distribution key\n", __func__);
return (error);
}
error = bus_dma_tag_create(
bus_get_dma_tag(dev),
PAGE_SIZE, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* low restricted addr */
BUS_SPACE_MAXADDR, /* high restricted addr */
NULL, NULL, /* filter, filterarg */
ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */
ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->qos_dmat);
if (error) {
device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
__func__);
return (error);
}
return (0);
}
/**
* @brief Configure buffer layouts of the different DPNI queues.
*/
static int
dpaa2_ni_set_buf_layout(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_ni_buf_layout buf_layout = {0};
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/*
* Select Rx/Tx buffer alignment. It's necessary to ensure that the
* buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
* on the WRIOP version.
*/
sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
? BUF_ALIGN_V1 : BUF_ALIGN;
/*
* We need to ensure that the buffer size seen by WRIOP is a multiple
* of 64 or 256 bytes depending on the WRIOP version.
*/
sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
if (bootverbose) {
device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
sc->buf_sz, sc->buf_align);
}
/*
* Frame Descriptor Tx buffer layout
*
* ADDR -> |---------------------|
* | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
* |---------------------|
* | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
* |---------------------|
* | DATA HEADROOM |
* ADDR + OFFSET -> |---------------------|
* | |
* | |
* | FRAME DATA |
* | |
* | |
* |---------------------|
* | DATA TAILROOM |
* |---------------------|
*
* NOTE: It's for a single buffer frame only.
*/
buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
buf_layout.pd_size = BUF_SWA_SIZE;
buf_layout.pass_timestamp = true;
buf_layout.pass_frame_status = true;
buf_layout.options =
BUF_LOPT_PRIV_DATA_SZ |
BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
BUF_LOPT_FRAME_STATUS;
error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
if (error) {
device_printf(dev, "%s: failed to set Tx buffer layout\n",
__func__);
goto close_ni;
}
/* Tx-confirmation buffer layout */
buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
buf_layout.options =
BUF_LOPT_TIMESTAMP |
BUF_LOPT_FRAME_STATUS;
error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
if (error) {
device_printf(dev, "%s: failed to set TxConf buffer layout\n",
__func__);
goto close_ni;
}
/*
* Driver should reserve the amount of space indicated by this command
* as headroom in all Tx frames.
*/
error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
if (error) {
device_printf(dev, "%s: failed to obtain Tx data offset\n",
__func__);
goto close_ni;
}
if (bootverbose) {
device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
}
if ((sc->tx_data_off % 64) != 0) {
device_printf(dev, "Tx data offset (%d) is not a multiplication "
"of 64 bytes\n", sc->tx_data_off);
}
/*
* Frame Descriptor Rx buffer layout
*
* ADDR -> |---------------------|
* | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
* |---------------------|
* | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
* |---------------------|
* | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE
* ADDR + OFFSET -> |---------------------|
* | |
* | |
* | FRAME DATA |
* | |
* | |
* |---------------------|
* | DATA TAILROOM | 0 bytes
* |---------------------|
*
* NOTE: It's for a single buffer frame only.
*/
buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
buf_layout.pd_size = BUF_SWA_SIZE;
buf_layout.fd_align = sc->buf_align;
buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
buf_layout.tail_size = 0;
buf_layout.pass_frame_status = true;
buf_layout.pass_parser_result = true;
buf_layout.pass_timestamp = true;
buf_layout.options =
BUF_LOPT_PRIV_DATA_SZ |
BUF_LOPT_DATA_ALIGN |
BUF_LOPT_DATA_HEAD_ROOM |
BUF_LOPT_DATA_TAIL_ROOM |
BUF_LOPT_FRAME_STATUS |
BUF_LOPT_PARSER_RESULT |
BUF_LOPT_TIMESTAMP;
error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
if (error) {
device_printf(dev, "%s: failed to set Rx buffer layout\n",
__func__);
goto close_ni;
}
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
/**
* @brief Enable Rx/Tx pause frames.
*
* NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
* itself generates pause frames (Tx frame).
*/
static int
dpaa2_ni_set_pause_frame(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_ni_link_cfg link_cfg = {0};
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain link configuration: "
"error=%d\n", __func__, error);
goto close_ni;
}
/* Enable both Rx and Tx pause frames by default. */
link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
if (error) {
device_printf(dev, "%s: failed to set link configuration: "
"error=%d\n", __func__, error);
goto close_ni;
}
sc->link_options = link_cfg.options;
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
/**
* @brief Configure QoS table to determine the traffic class for the received
* frame.
*/
static int
dpaa2_ni_set_qos_table(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_ni_qos_table tbl;
struct dpaa2_buf *buf = &sc->qos_kcfg;
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
if (sc->attr.num.rx_tcs == 1 ||
!(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
if (bootverbose) {
device_printf(dev, "Ingress traffic classification is "
"not supported\n");
}
return (0);
}
/*
* Allocate a buffer visible to the device to hold the QoS table key
* configuration.
*/
if (__predict_true(buf->dmat == NULL)) {
buf->dmat = sc->qos_dmat;
}
error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
if (error) {
device_printf(dev, "%s: failed to allocate a buffer for QoS key "
"configuration\n", __func__);
goto err_exit;
}
error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "%s: failed to map QoS key configuration "
"buffer into bus space\n", __func__);
goto err_exit;
}
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
tbl.default_tc = 0;
tbl.discard_on_miss = false;
tbl.keep_entries = false;
tbl.kcfg_busaddr = buf->paddr;
error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
if (error) {
device_printf(dev, "%s: failed to set QoS table\n", __func__);
goto close_ni;
}
error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
if (error) {
device_printf(dev, "%s: failed to clear QoS table\n", __func__);
goto close_ni;
}
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_set_mac_addr(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
if_t ifp = sc->ifp;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
struct ether_addr rnd_mac_addr;
uint16_t rc_token, ni_token;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/*
* Get the MAC address associated with the physical port, if the DPNI is
* connected to a DPMAC directly associated with one of the physical
* ports.
*/
error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
if (error) {
device_printf(dev, "%s: failed to obtain the MAC address "
"associated with the physical port\n", __func__);
goto close_ni;
}
/* Get primary MAC address from the DPNI attributes. */
error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
if (error) {
device_printf(dev, "%s: failed to obtain primary MAC address\n",
__func__);
goto close_ni;
}
if (!ETHER_IS_ZERO(mac_addr)) {
/* Set MAC address of the physical port as DPNI's primary one. */
error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
mac_addr);
if (error) {
device_printf(dev, "%s: failed to set primary MAC "
"address\n", __func__);
goto close_ni;
}
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
sc->mac.addr[i] = mac_addr[i];
}
} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
/* Generate random MAC address as DPNI's primary one. */
ether_gen_addr(ifp, &rnd_mac_addr);
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
mac_addr[i] = rnd_mac_addr.octet[i];
}
error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
mac_addr);
if (error) {
device_printf(dev, "%s: failed to set random primary "
"MAC address\n", __func__);
goto close_ni;
}
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
sc->mac.addr[i] = mac_addr[i];
}
} else {
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
sc->mac.addr[i] = dpni_mac_addr[i];
}
}
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static void
dpaa2_ni_miibus_statchg(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_mac_link_state mac_link = { 0 };
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_cmd cmd;
uint16_t rc_token, mac_token;
int error, link_state;
if (sc->fixed_link || sc->mii == NULL) {
return;
}
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
/*
* We will receive calls and adjust the changes but
* not have setup everything (called before dpaa2_ni_init()
* really). This will then setup the link and internal
* sc->link_state and not trigger the update once needed,
* so basically dpmac never knows about it.
*/
return;
}
/*
* Note: ifp link state will only be changed AFTER we are called so we
* cannot rely on ifp->if_linkstate here.
*/
if (sc->mii->mii_media_status & IFM_AVALID) {
if (sc->mii->mii_media_status & IFM_ACTIVE) {
link_state = LINK_STATE_UP;
} else {
link_state = LINK_STATE_DOWN;
}
} else {
link_state = LINK_STATE_UNKNOWN;
}
if (link_state != sc->link_state) {
sc->link_state = link_state;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
&rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource "
"container: id=%d, error=%d\n", __func__, rcinfo->id,
error);
goto err_exit;
}
error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
&mac_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: "
"id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
error);
goto close_rc;
}
if (link_state == LINK_STATE_UP ||
link_state == LINK_STATE_DOWN) {
/* Update DPMAC link state. */
mac_link.supported = sc->mii->mii_media.ifm_media;
mac_link.advert = sc->mii->mii_media.ifm_media;
mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */
mac_link.options =
DPAA2_MAC_LINK_OPT_AUTONEG |
DPAA2_MAC_LINK_OPT_PAUSE;
mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
mac_link.state_valid = true;
/* Inform DPMAC about link state. */
error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
&mac_link);
if (error) {
device_printf(sc->dev, "%s: failed to set DPMAC "
"link state: id=%d, error=%d\n", __func__,
sc->mac.dpmac_id, error);
}
}
(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
}
return;
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return;
}
/**
* @brief Callback function to process media change request.
*/
static int
dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
{
DPNI_LOCK_ASSERT(sc);
if (sc->mii) {
mii_mediachg(sc->mii);
sc->media_status = sc->mii->mii_media.ifm_media;
} else if (sc->fixed_link) {
if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
__func__);
}
return (0);
}
static int
dpaa2_ni_media_change(if_t ifp)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
int error;
DPNI_LOCK(sc);
error = dpaa2_ni_media_change_locked(sc);
DPNI_UNLOCK(sc);
return (error);
}
/**
* @brief Callback function to process media status request.
*/
static void
dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
DPNI_LOCK(sc);
if (sc->mii) {
mii_pollstat(sc->mii);
ifmr->ifm_active = sc->mii->mii_media_active;
ifmr->ifm_status = sc->mii->mii_media_status;
}
DPNI_UNLOCK(sc);
}
/**
* @brief Callout function to check and update media status.
*/
static void
dpaa2_ni_media_tick(void *arg)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
/* Check for media type change */
if (sc->mii) {
mii_tick(sc->mii);
if (sc->media_status != sc->mii->mii_media.ifm_media) {
printf("%s: media type changed (ifm_media=%x)\n",
__func__, sc->mii->mii_media.ifm_media);
dpaa2_ni_media_change(sc->ifp);
}
}
/* Schedule another timeout one second from now */
callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
}
static void
dpaa2_ni_init(void *arg)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
if_t ifp = sc->ifp;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPNI_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
DPNI_UNLOCK(sc);
return;
}
DPNI_UNLOCK(sc);
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
if (error) {
device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
__func__, error);
}
DPNI_LOCK(sc);
/* Announce we are up and running and can queue packets. */
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
if (sc->mii) {
/*
* mii_mediachg() will trigger a call into
* dpaa2_ni_miibus_statchg() to setup link state.
*/
dpaa2_ni_media_change_locked(sc);
}
callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
DPNI_UNLOCK(sc);
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return;
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return;
}
static int
dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
struct dpaa2_channel *ch;
uint32_t fqid;
bool found = false;
int chidx = 0, error;
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
return (0);
}
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
fqid = m->m_pkthdr.flowid;
for (int i = 0; i < sc->chan_n; i++) {
ch = sc->channels[i];
for (int j = 0; j < ch->rxq_n; j++) {
if (fqid == ch->rx_queues[j].fqid) {
chidx = ch->flowid;
found = true;
break;
}
}
if (found) {
break;
}
}
}
ch = sc->channels[chidx];
error = buf_ring_enqueue(ch->xmit_br, m);
if (__predict_false(error != 0)) {
m_freem(m);
} else {
taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
}
return (error);
}
static void
dpaa2_ni_qflush(if_t ifp)
{
/* TODO: Find a way to drain Tx queues in QBMan. */
if_qflush(ifp);
}
static int
dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint32_t changed = 0;
uint16_t rc_token, ni_token;
int mtu, error, rc = 0;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
switch (c) {
case SIOCSIFMTU:
DPNI_LOCK(sc);
mtu = ifr->ifr_mtu;
if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
DPNI_UNLOCK(sc);
error = EINVAL;
goto close_ni;
}
if_setmtu(ifp, mtu);
DPNI_UNLOCK(sc);
/* Update maximum frame length. */
error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
if (error) {
device_printf(dev, "%s: failed to update maximum frame "
"length: error=%d\n", __func__, error);
goto close_ni;
}
break;
case SIOCSIFCAP:
changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if (changed & IFCAP_HWCSUM) {
if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
} else {
if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
}
}
rc = dpaa2_ni_setup_if_caps(sc);
if (rc) {
printf("%s: failed to update iface capabilities: "
"error=%d\n", __func__, rc);
rc = ENXIO;
}
break;
case SIOCSIFFLAGS:
DPNI_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
changed = if_getflags(ifp) ^ sc->if_flags;
if (changed & IFF_PROMISC ||
changed & IFF_ALLMULTI) {
rc = dpaa2_ni_setup_if_flags(sc);
}
} else {
DPNI_UNLOCK(sc);
dpaa2_ni_init(sc);
DPNI_LOCK(sc);
}
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
}
sc->if_flags = if_getflags(ifp);
DPNI_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
DPNI_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
DPNI_UNLOCK(sc);
rc = dpaa2_ni_update_mac_filters(ifp);
if (rc) {
device_printf(dev, "%s: failed to update MAC "
"filters: error=%d\n", __func__, rc);
}
DPNI_LOCK(sc);
}
DPNI_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
if (sc->mii)
rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
else if(sc->fixed_link) {
rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
}
break;
default:
rc = ether_ioctl(ifp, c, data);
break;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (rc);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_update_mac_filters(if_t ifp)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
struct dpaa2_ni_mcaddr_ctx ctx;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
/* Remove all multicast MAC filters. */
error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
if (error) {
device_printf(dev, "%s: failed to clear multicast MAC filters: "
"error=%d\n", __func__, error);
goto close_ni;
}
ctx.ifp = ifp;
ctx.error = 0;
ctx.nent = 0;
if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
error = ctx.error;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static u_int
dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct dpaa2_ni_mcaddr_ctx *ctx = arg;
struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
if (ctx->error != 0) {
return (0);
}
if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
&rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource "
"container: id=%d, error=%d\n", __func__, rcinfo->id,
error);
return (0);
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
&ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
return (0);
}
ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
LLADDR(sdl));
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
if (ctx->error != 0) {
device_printf(dev, "%s: can't add more then %d MAC "
"addresses, switching to the multicast promiscuous "
"mode\n", __func__, ctx->nent);
/* Enable multicast promiscuous mode. */
DPNI_LOCK(sc);
if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
sc->if_flags |= IFF_ALLMULTI;
ctx->error = dpaa2_ni_setup_if_flags(sc);
DPNI_UNLOCK(sc);
return (0);
}
ctx->nent++;
}
return (1);
}
static void
dpaa2_ni_intr(void *arg)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint32_t status = ~0u; /* clear all IRQ status bits */
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
&status);
if (error) {
device_printf(sc->dev, "%s: failed to obtain IRQ status: "
"error=%d\n", __func__, error);
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return;
}
/**
* @brief Execute channel's Rx/Tx routines.
*
* NOTE: Should not be re-entrant for the same channel. It is achieved by
* enqueuing the cleanup routine on a single-threaded taskqueue.
*/
static void
dpaa2_ni_cleanup_task(void *arg, int count)
{
struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
int error, rxc, txc;
for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
rxc = dpaa2_ni_rx_cleanup(ch);
txc = dpaa2_ni_tx_cleanup(ch);
if (__predict_false((if_getdrvflags(sc->ifp) &
IFF_DRV_RUNNING) == 0)) {
return;
}
if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
break;
}
}
/* Re-arm channel to generate CDAN */
error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
if (error != 0) {
panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
__func__, ch->id, error);
}
}
/**
* @brief Poll frames from a specific channel when CDAN is received.
*/
static int
dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
{
struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
struct dpaa2_swp *swp = iosc->swp;
struct dpaa2_ni_fq *fq;
struct dpaa2_buf *buf = &ch->store;
int budget = DPAA2_RX_BUDGET;
int error, consumed = 0;
do {
error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
if (error) {
device_printf(ch->ni_dev, "%s: failed to pull frames: "
"chan_id=%d, error=%d\n", __func__, ch->id, error);
break;
}
error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
if (error == ENOENT || error == EALREADY) {
break;
}
if (error == ETIMEDOUT) {
device_printf(ch->ni_dev, "%s: timeout to consume "
"frames: chan_id=%d\n", __func__, ch->id);
}
} while (--budget);
return (DPAA2_RX_BUDGET - budget);
}
static int
dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
{
struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
struct mbuf *m = NULL;
int budget = DPAA2_TX_BUDGET;
do {
mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
mtx_lock(&ch->xmit_mtx);
m = buf_ring_dequeue_sc(ch->xmit_br);
mtx_unlock(&ch->xmit_mtx);
if (__predict_false(m == NULL)) {
/* TODO: Do not give up easily */
break;
} else {
dpaa2_ni_tx(sc, ch, tx, m);
}
} while (--budget);
return (DPAA2_TX_BUDGET - budget);
}
static void
dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
{
device_t dev = sc->dev;
struct dpaa2_ni_fq *fq = tx->fq;
struct dpaa2_buf *buf, *sgt;
struct dpaa2_fd fd;
struct mbuf *md;
bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
int rc, nsegs;
int error;
mtx_assert(&tx->lock, MA_NOTOWNED);
mtx_lock(&tx->lock);
buf = buf_ring_dequeue_sc(tx->br);
mtx_unlock(&tx->lock);
if (__predict_false(buf == NULL)) {
/* TODO: Do not give up easily */
m_freem(m);
return;
} else {
DPAA2_BUF_ASSERT_TXREADY(buf);
buf->m = m;
sgt = buf->sgt;
}
#if defined(INVARIANTS)
struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
#endif /* INVARIANTS */
error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
/* Too many fragments, trying to defragment... */
md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
if (md == NULL) {
device_printf(dev, "%s: m_collapse() failed\n", __func__);
fq->chan->tx_dropped++;
goto err;
}
buf->m = m = md;
error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
"failed: error=%d\n", __func__, error);
fq->chan->tx_dropped++;
goto err;
}
}
error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
if (__predict_false(error != 0)) {
device_printf(dev, "%s: failed to build frame descriptor: "
"error=%d\n", __func__, error);
fq->chan->tx_dropped++;
goto err_unload;
}
/* TODO: Enqueue several frames in a single command */
for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
/* TODO: Return error codes instead of # of frames */
rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
if (rc == 1) {
break;
}
}
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
if (rc != 1) {
fq->chan->tx_dropped++;
goto err_unload;
} else {
fq->chan->tx_frames++;
}
return;
err_unload:
bus_dmamap_unload(buf->dmat, buf->dmap);
if (sgt->paddr != 0) {
bus_dmamap_unload(sgt->dmat, sgt->dmap);
}
err:
m_freem(buf->m);
buf_ring_enqueue(tx->br, buf);
}
static int
dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
uint32_t *consumed)
{
struct dpaa2_ni_fq *fq = NULL;
struct dpaa2_dq *dq;
struct dpaa2_fd *fd;
struct dpaa2_ni_rx_ctx ctx = {
.head = NULL,
.tail = NULL,
.cnt = 0,
.last = false
};
int rc, frames = 0;
do {
rc = dpaa2_chan_next_frame(chan, &dq);
if (rc == EINPROGRESS) {
if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
fd = &dq->fdr.fd;
fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
switch (fq->type) {
case DPAA2_NI_QUEUE_RX:
(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
break;
case DPAA2_NI_QUEUE_RX_ERR:
(void)dpaa2_ni_rx_err(chan, fq, fd);
break;
case DPAA2_NI_QUEUE_TX_CONF:
(void)dpaa2_ni_tx_conf(chan, fq, fd);
break;
default:
panic("%s: unknown queue type (1)",
__func__);
}
frames++;
}
} else if (rc == EALREADY || rc == ENOENT) {
if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
fd = &dq->fdr.fd;
fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
switch (fq->type) {
case DPAA2_NI_QUEUE_RX:
/*
* Last VDQ response (mbuf) in a chain
* obtained from the Rx queue.
*/
ctx.last = true;
(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
break;
case DPAA2_NI_QUEUE_RX_ERR:
(void)dpaa2_ni_rx_err(chan, fq, fd);
break;
case DPAA2_NI_QUEUE_TX_CONF:
(void)dpaa2_ni_tx_conf(chan, fq, fd);
break;
default:
panic("%s: unknown queue type (2)",
__func__);
}
frames++;
}
break;
} else {
panic("%s: should not reach here: rc=%d", __func__, rc);
}
} while (true);
KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
"store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
/*
* VDQ operation pulls frames from a single queue into the store.
* Return the frame queue and a number of consumed frames as an output.
*/
if (src != NULL) {
*src = fq;
}
if (consumed != NULL) {
*consumed = frames;
}
return (rc);
}
/**
* @brief Receive frames.
*/
static int
dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
struct dpaa2_ni_rx_ctx *ctx)
{
bus_addr_t paddr = (bus_addr_t)fd->addr;
struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
struct dpaa2_buf *buf = fa->buf;
struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
struct dpaa2_bp_softc *bpsc;
struct mbuf *m;
device_t bpdev;
bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
void *buf_data;
int buf_len, error, released_n = 0;
KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
/*
* NOTE: Current channel might not be the same as the "buffer" channel
* and it's fine. It must not be NULL though.
*/
KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
if (__predict_false(paddr != buf->paddr)) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
__func__, paddr, buf->paddr);
}
switch (dpaa2_ni_fd_err(fd)) {
case 1: /* Enqueue rejected by QMan */
sc->rx_enq_rej_frames++;
break;
case 2: /* QMan IEOI error */
sc->rx_ieoi_err_frames++;
break;
default:
break;
}
switch (dpaa2_ni_fd_format(fd)) {
case DPAA2_FD_SINGLE:
sc->rx_single_buf_frames++;
break;
case DPAA2_FD_SG:
sc->rx_sg_buf_frames++;
break;
default:
break;
}
mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
mtx_lock(&bch->dma_mtx);
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(buf->dmat, buf->dmap);
m = buf->m;
buf_len = dpaa2_ni_fd_data_len(fd);
buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
/* Prepare buffer to be re-cycled */
buf->m = NULL;
buf->paddr = 0;
buf->vaddr = NULL;
buf->seg.ds_addr = 0;
buf->seg.ds_len = 0;
buf->nseg = 0;
mtx_unlock(&bch->dma_mtx);
m->m_flags |= M_PKTHDR;
m->m_data = buf_data;
m->m_len = buf_len;
m->m_pkthdr.len = buf_len;
m->m_pkthdr.rcvif = sc->ifp;
m->m_pkthdr.flowid = fq->fqid;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
if (ctx->head == NULL) {
KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
ctx->head = m;
ctx->tail = m;
} else {
KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
ctx->tail->m_nextpkt = m;
ctx->tail = m;
}
ctx->cnt++;
if (ctx->last) {
ctx->tail->m_nextpkt = NULL;
if_input(sc->ifp, ctx->head);
}
/* Keep the buffer to be recycled */
ch->recycled[ch->recycled_n++] = buf;
/* Re-seed and release recycled buffers back to the pool */
if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
/* Release new buffers to the pool if needed */
taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
for (int i = 0; i < ch->recycled_n; i++) {
buf = ch->recycled[i];
bch = (struct dpaa2_channel *)buf->opt;
mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
mtx_lock(&bch->dma_mtx);
error = dpaa2_buf_seed_rxb(sc->dev, buf,
DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
mtx_unlock(&bch->dma_mtx);
if (__predict_false(error != 0)) {
/* TODO: What else to do with the buffer? */
panic("%s: failed to recycle buffer: error=%d",
__func__, error);
}
/* Prepare buffer to be released in a single command */
released[released_n++] = buf->paddr;
}
/* There's only one buffer pool for now */
bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bpsc = device_get_softc(bpdev);
error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
released, released_n);
if (__predict_false(error != 0)) {
device_printf(sc->dev, "%s: failed to release buffers "
"to the pool: error=%d\n", __func__, error);
return (error);
}
ch->recycled_n = 0;
}
return (0);
}
/**
* @brief Receive Rx error frames.
*/
static int
dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
bus_addr_t paddr = (bus_addr_t)fd->addr;
struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
struct dpaa2_buf *buf = fa->buf;
struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
device_t bpdev;
struct dpaa2_bp_softc *bpsc;
int error;
KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
/*
* NOTE: Current channel might not be the same as the "buffer" channel
* and it's fine. It must not be NULL though.
*/
KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
if (__predict_false(paddr != buf->paddr)) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
__func__, paddr, buf->paddr);
}
/* There's only one buffer pool for now */
bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bpsc = device_get_softc(bpdev);
/* Release buffer to QBMan buffer pool */
error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
if (error != 0) {
device_printf(sc->dev, "%s: failed to release frame buffer to "
"the pool: error=%d\n", __func__, error);
return (error);
}
return (0);
}
/**
* @brief Receive Tx confirmation frames.
*/
static int
dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
bus_addr_t paddr = (bus_addr_t)fd->addr;
struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
struct dpaa2_buf *buf = fa->buf;
struct dpaa2_buf *sgt = buf->sgt;
struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
struct dpaa2_channel *bch = tx->fq->chan;
KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
/*
* NOTE: Current channel might not be the same as the "buffer" channel
* and it's fine. It must not be NULL though.
*/
KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
if (paddr != buf->paddr) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
__func__, paddr, buf->paddr);
}
mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
mtx_lock(&bch->dma_mtx);
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(buf->dmat, buf->dmap);
bus_dmamap_unload(sgt->dmat, sgt->dmap);
m_freem(buf->m);
buf->m = NULL;
buf->paddr = 0;
buf->vaddr = NULL;
sgt->paddr = 0;
mtx_unlock(&bch->dma_mtx);
/* Return Tx buffer back to the ring */
buf_ring_enqueue(tx->br, buf);
return (0);
}
/**
* @brief Compare versions of the DPAA2 network interface API.
*/
static int
dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
uint16_t minor)
{
if (sc->api_major == major) {
return sc->api_minor - minor;
}
return sc->api_major - major;
}
/**
* @brief Build a DPAA2 frame descriptor.
*/
static int
dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
{
struct dpaa2_buf *sgt = buf->sgt;
struct dpaa2_sg_entry *sge;
struct dpaa2_fa *fa;
int i, error;
KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
memset(fd, 0, sizeof(*fd));
/* Populate and map S/G table */
if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
for (i = 0; i < nsegs; i++) {
sge[i].addr = (uint64_t)segs[i].ds_addr;
sge[i].len = (uint32_t)segs[i].ds_len;
sge[i].offset_fmt = 0u;
}
sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
sgt->paddr));
error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
"error=%d\n", __func__, error);
return (error);
}
buf->paddr = sgt->paddr;
buf->vaddr = sgt->vaddr;
sc->tx_sg_frames++; /* for sysctl(9) */
} else {
return (EINVAL);
}
fa = (struct dpaa2_fa *)sgt->vaddr;
fa->magic = DPAA2_MAGIC;
fa->buf = buf;
fd->addr = buf->paddr;
fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
fd->bpid_ivp_bmt = 0;
fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
fd->ctrl = 0x00800000u;
return (0);
}
static int
dpaa2_ni_fd_err(struct dpaa2_fd *fd)
{
return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
}
static uint32_t
dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
{
if (dpaa2_ni_fd_short_len(fd)) {
return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
}
return (fd->data_length);
}
static int
dpaa2_ni_fd_format(struct dpaa2_fd *fd)
{
return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
}
static bool
dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
{
return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
& DPAA2_NI_FD_SL_MASK) == 1);
}
static int
dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
{
return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
}
/**
* @brief Collect statistics of the network interface.
*/
static int
dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
uint64_t result = 0;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
if (!error) {
result = cnt[stat->cnt];
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
exit:
return (sysctl_handle_64(oidp, &result, 0, req));
}
static int
dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
return (sysctl_handle_32(oidp, &buf_num, 0, req));
}
static int
dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
return (sysctl_handle_32(oidp, &buf_free, 0, req));
}
static int
dpaa2_ni_set_hash(device_t dev, uint64_t flags)
{
struct dpaa2_ni_softc *sc = device_get_softc(dev);
uint64_t key = 0;
int i;
if (!(sc->attr.num.queues > 1)) {
return (EOPNOTSUPP);
}
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
if (dist_fields[i].rxnfc_field & flags) {
key |= dist_fields[i].id;
}
}
return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
}
/**
* @brief Set Rx distribution (hash or flow classification) key flags is a
* combination of RXH_ bits.
*/
static int
dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpkg_profile_cfg cls_cfg;
struct dpkg_extract *key;
struct dpaa2_buf *buf = &sc->rxd_kcfg;
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int i, error = 0;
if (__predict_true(buf->dmat == NULL)) {
buf->dmat = sc->rxd_dmat;
}
memset(&cls_cfg, 0, sizeof(cls_cfg));
/* Configure extracts according to the given flags. */
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
key = &cls_cfg.extracts[cls_cfg.num_extracts];
if (!(flags & dist_fields[i].id)) {
continue;
}
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
device_printf(dev, "%s: failed to add key extraction "
"rule\n", __func__);
return (E2BIG);
}
key->type = DPKG_EXTRACT_FROM_HDR;
key->extract.from_hdr.prot = dist_fields[i].cls_prot;
key->extract.from_hdr.type = DPKG_FULL_FIELD;
key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
}
error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
if (error != 0) {
device_printf(dev, "%s: failed to allocate a buffer for Rx "
"traffic distribution key configuration\n", __func__);
return (error);
}
error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
if (error != 0) {
device_printf(dev, "%s: failed to prepare key configuration: "
"error=%d\n", __func__, error);
return (error);
}
/* Prepare for setting the Rx dist. */
error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->dev, "%s: failed to map a buffer for Rx "
"traffic distribution key configuration\n", __func__);
return (error);
}
if (type == DPAA2_NI_DIST_MODE_HASH) {
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
&rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource "
"container: id=%d, error=%d\n", __func__, rcinfo->id,
error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
&ni_token);
if (error) {
device_printf(dev, "%s: failed to open network "
"interface: id=%d, error=%d\n", __func__, dinfo->id,
error);
goto close_rc;
}
error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
if (error != 0) {
device_printf(dev, "%s: failed to set distribution mode "
"and size for the traffic class\n", __func__);
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
}
err_exit:
return (error);
}
/**
* @brief Prepares extract parameters.
*
* cfg: Defining a full Key Generation profile.
* key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA.
*/
static int
dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
{
struct dpni_ext_set_rx_tc_dist *dpni_ext;
struct dpni_dist_extract *extr;
int i, j;
if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
return (EINVAL);
dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
dpni_ext->num_extracts = cfg->num_extracts;
for (i = 0; i < cfg->num_extracts; i++) {
extr = &dpni_ext->extracts[i];
switch (cfg->extracts[i].type) {
case DPKG_EXTRACT_FROM_HDR:
extr->prot = cfg->extracts[i].extract.from_hdr.prot;
extr->efh_type =
cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
extr->size = cfg->extracts[i].extract.from_hdr.size;
extr->offset = cfg->extracts[i].extract.from_hdr.offset;
extr->field = cfg->extracts[i].extract.from_hdr.field;
extr->hdr_index =
cfg->extracts[i].extract.from_hdr.hdr_index;
break;
case DPKG_EXTRACT_FROM_DATA:
extr->size = cfg->extracts[i].extract.from_data.size;
extr->offset =
cfg->extracts[i].extract.from_data.offset;
break;
case DPKG_EXTRACT_FROM_PARSE:
extr->size = cfg->extracts[i].extract.from_parse.size;
extr->offset =
cfg->extracts[i].extract.from_parse.offset;
break;
default:
return (EINVAL);
}
extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
extr->extract_type = cfg->extracts[i].type & 0x0Fu;
for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
extr->masks[j].offset =
cfg->extracts[i].masks[j].offset;
}
}
return (0);
}
static device_method_t dpaa2_ni_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, dpaa2_ni_probe),
DEVMETHOD(device_attach, dpaa2_ni_attach),
DEVMETHOD(device_detach, dpaa2_ni_detach),
/* mii via memac_mdio */
DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg),
DEVMETHOD_END
};
static driver_t dpaa2_ni_driver = {
"dpaa2_ni",
dpaa2_ni_methods,
sizeof(struct dpaa2_ni_softc),
};
DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
#ifdef DEV_ACPI
MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
#endif
#ifdef FDT
MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
#endif
diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c
index 3ff32cc9966c..47d066a2e4f1 100644
--- a/sys/dev/ena/ena.c
+++ b/sys/dev/ena/ena.c
@@ -1,4087 +1,4076 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include "opt_rss.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/time.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <machine/resource.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include "ena.h"
#include "ena_datapath.h"
#include "ena_rss.h"
#include "ena_sysctl.h"
#ifdef DEV_NETMAP
#include "ena_netmap.h"
#endif /* DEV_NETMAP */
/*********************************************************
* Function prototypes
*********************************************************/
static int ena_probe(device_t);
static void ena_intr_msix_mgmnt(void *);
static void ena_free_pci_resources(struct ena_adapter *);
static int ena_change_mtu(if_t, int);
static inline void ena_alloc_counters(counter_u64_t *, int);
static inline void ena_free_counters(counter_u64_t *, int);
static inline void ena_reset_counters(counter_u64_t *, int);
static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *,
uint16_t);
static void ena_init_io_rings_basic(struct ena_adapter *);
static void ena_init_io_rings_advanced(struct ena_adapter *);
static void ena_init_io_rings(struct ena_adapter *);
static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
static void ena_free_all_io_rings_resources(struct ena_adapter *);
static int ena_setup_tx_dma_tag(struct ena_adapter *);
static int ena_free_tx_dma_tag(struct ena_adapter *);
static int ena_setup_rx_dma_tag(struct ena_adapter *);
static int ena_free_rx_dma_tag(struct ena_adapter *);
static void ena_release_all_tx_dmamap(struct ena_ring *);
static int ena_setup_tx_resources(struct ena_adapter *, int);
static void ena_free_tx_resources(struct ena_adapter *, int);
static int ena_setup_all_tx_resources(struct ena_adapter *);
static void ena_free_all_tx_resources(struct ena_adapter *);
static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
static int ena_setup_all_rx_resources(struct ena_adapter *);
static void ena_free_all_rx_resources(struct ena_adapter *);
static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
struct ena_rx_buffer *);
static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
struct ena_rx_buffer *);
static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
static void ena_refill_all_rx_bufs(struct ena_adapter *);
static void ena_free_all_rx_bufs(struct ena_adapter *);
static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
static void ena_free_all_tx_bufs(struct ena_adapter *);
static void ena_destroy_all_tx_queues(struct ena_adapter *);
static void ena_destroy_all_rx_queues(struct ena_adapter *);
static void ena_destroy_all_io_queues(struct ena_adapter *);
static int ena_create_io_queues(struct ena_adapter *);
static int ena_handle_msix(void *);
static int ena_enable_msix(struct ena_adapter *);
static void ena_setup_mgmnt_intr(struct ena_adapter *);
static int ena_setup_io_intr(struct ena_adapter *);
static int ena_request_mgmnt_irq(struct ena_adapter *);
static int ena_request_io_irq(struct ena_adapter *);
static void ena_free_mgmnt_irq(struct ena_adapter *);
static void ena_free_io_irq(struct ena_adapter *);
static void ena_free_irqs(struct ena_adapter *);
static void ena_disable_msix(struct ena_adapter *);
static void ena_unmask_all_io_irqs(struct ena_adapter *);
static int ena_up_complete(struct ena_adapter *);
static uint64_t ena_get_counter(if_t, ift_counter);
static int ena_media_change(if_t);
static void ena_media_status(if_t, struct ifmediareq *);
static void ena_init(void *);
static int ena_ioctl(if_t, u_long, caddr_t);
static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
static void ena_update_host_info(struct ena_admin_host_info *, if_t);
static void ena_update_hwassist(struct ena_adapter *);
-static int ena_setup_ifnet(device_t, struct ena_adapter *,
+static void ena_setup_ifnet(device_t, struct ena_adapter *,
struct ena_com_dev_get_features_ctx *);
static int ena_enable_wc(device_t, struct resource *);
static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *);
static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
struct ena_com_dev_get_features_ctx *);
static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
static void ena_config_host_info(struct ena_com_dev *, device_t);
static int ena_attach(device_t);
static int ena_detach(device_t);
static int ena_device_init(struct ena_adapter *, device_t,
struct ena_com_dev_get_features_ctx *, int *);
static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *);
static int ena_copy_eni_metrics(struct ena_adapter *);
static int ena_copy_srd_metrics(struct ena_adapter *);
static int ena_copy_customer_metrics(struct ena_adapter *);
static void ena_timer_service(void *);
static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME
" v" ENA_DRV_MODULE_VERSION;
static ena_vendor_info_t ena_vendor_info_array[] = {
{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 },
{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 },
{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 },
{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 },
/* Last entry */
{ 0, 0, 0 }
};
struct sx ena_global_lock;
/*
* Contains pointers to event handlers, e.g. link state chage.
*/
static struct ena_aenq_handlers aenq_handlers;
void
ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
*(bus_addr_t *)arg = segs[0].ds_addr;
}
int
ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
int mapflags, bus_size_t alignment, int domain)
{
struct ena_adapter *adapter = device_get_softc(dmadev);
device_t pdev = adapter->pdev;
uint32_t maxsize;
uint64_t dma_space_addr;
int error;
maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
if (unlikely(dma_space_addr == 0))
dma_space_addr = BUS_SPACE_MAXADDR;
error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
alignment, 0, /* alignment, bounds */
dma_space_addr, /* lowaddr of exclusion window */
BUS_SPACE_MAXADDR, /* highaddr of exclusion window */
NULL, NULL, /* filter, filterarg */
maxsize, /* maxsize */
1, /* nsegments */
maxsize, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&dma->tag);
if (unlikely(error != 0)) {
ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error);
goto fail_tag;
}
error = bus_dma_tag_set_domain(dma->tag, domain);
if (unlikely(error != 0)) {
ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n",
error);
goto fail_map_create;
}
error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
if (unlikely(error != 0)) {
ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n",
(uintmax_t)size, error);
goto fail_map_create;
}
dma->paddr = 0;
error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
ena_dmamap_callback, &dma->paddr, mapflags);
if (unlikely((error != 0) || (dma->paddr == 0))) {
ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error);
goto fail_map_load;
}
bus_dmamap_sync(dma->tag, dma->map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
fail_map_load:
bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
fail_map_create:
bus_dma_tag_destroy(dma->tag);
fail_tag:
dma->tag = NULL;
dma->vaddr = NULL;
dma->paddr = 0;
return (error);
}
static void
ena_free_pci_resources(struct ena_adapter *adapter)
{
device_t pdev = adapter->pdev;
if (adapter->memory != NULL) {
bus_release_resource(pdev, SYS_RES_MEMORY,
PCIR_BAR(ENA_MEM_BAR), adapter->memory);
}
if (adapter->registers != NULL) {
bus_release_resource(pdev, SYS_RES_MEMORY,
PCIR_BAR(ENA_REG_BAR), adapter->registers);
}
if (adapter->msix != NULL) {
bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid,
adapter->msix);
}
}
static int
ena_probe(device_t dev)
{
ena_vendor_info_t *ent;
uint16_t pci_vendor_id = 0;
uint16_t pci_device_id = 0;
pci_vendor_id = pci_get_vendor(dev);
pci_device_id = pci_get_device(dev);
ent = ena_vendor_info_array;
while (ent->vendor_id != 0) {
if ((pci_vendor_id == ent->vendor_id) &&
(pci_device_id == ent->device_id)) {
ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id,
pci_device_id);
device_set_desc(dev, ENA_DEVICE_DESC);
return (BUS_PROBE_DEFAULT);
}
ent++;
}
return (ENXIO);
}
static int
ena_change_mtu(if_t ifp, int new_mtu)
{
struct ena_adapter *adapter = if_getsoftc(ifp);
device_t pdev = adapter->pdev;
int rc;
if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n",
new_mtu, adapter->max_mtu, ENA_MIN_MTU);
return (EINVAL);
}
rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
if (likely(rc == 0)) {
ena_log(pdev, DBG, "set MTU to %d\n", new_mtu);
if_setmtu(ifp, new_mtu);
} else {
ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu);
}
return (rc);
}
static inline void
ena_alloc_counters(counter_u64_t *begin, int size)
{
counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
for (; begin < end; ++begin)
*begin = counter_u64_alloc(M_WAITOK);
}
static inline void
ena_free_counters(counter_u64_t *begin, int size)
{
counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
for (; begin < end; ++begin)
counter_u64_free(*begin);
}
static inline void
ena_reset_counters(counter_u64_t *begin, int size)
{
counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
for (; begin < end; ++begin)
counter_u64_zero(*begin);
}
static void
ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
uint16_t qid)
{
ring->qid = qid;
ring->adapter = adapter;
ring->ena_dev = adapter->ena_dev;
atomic_store_8(&ring->first_interrupt, 0);
ring->no_interrupt_event_cnt = 0;
}
static void
ena_init_io_rings_basic(struct ena_adapter *adapter)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
struct ena_que *que;
int i;
ena_dev = adapter->ena_dev;
for (i = 0; i < adapter->num_io_queues; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
/* TX/RX common ring state */
ena_init_io_rings_common(adapter, txr, i);
ena_init_io_rings_common(adapter, rxr, i);
/* TX specific ring state */
txr->tx_max_header_size = ena_dev->tx_max_header_size;
txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
que = &adapter->que[i];
que->adapter = adapter;
que->id = i;
que->tx_ring = txr;
que->rx_ring = rxr;
txr->que = que;
rxr->que = que;
rxr->empty_rx_queue = 0;
rxr->rx_mbuf_sz = ena_mbuf_sz;
}
}
static void
ena_init_io_rings_advanced(struct ena_adapter *adapter)
{
struct ena_ring *txr, *rxr;
int i;
for (i = 0; i < adapter->num_io_queues; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
/* Allocate a buf ring */
txr->buf_ring_size = adapter->buf_ring_size;
txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK,
&txr->ring_mtx);
/* Allocate Tx statistics. */
ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
sizeof(txr->tx_stats));
txr->tx_last_cleanup_ticks = ticks;
/* Allocate Rx statistics. */
ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
sizeof(rxr->rx_stats));
/* Initialize locks */
snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(adapter->pdev), i);
snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
device_get_nameunit(adapter->pdev), i);
mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
}
}
static void
ena_init_io_rings(struct ena_adapter *adapter)
{
/*
* IO rings initialization can be divided into the 2 steps:
* 1. Initialize variables and fields with initial values and copy
* them from adapter/ena_dev (basic)
* 2. Allocate mutex, counters and buf_ring (advanced)
*/
ena_init_io_rings_basic(adapter);
ena_init_io_rings_advanced(adapter);
}
static void
ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
{
struct ena_ring *txr = &adapter->tx_ring[qid];
struct ena_ring *rxr = &adapter->rx_ring[qid];
ena_free_counters((counter_u64_t *)&txr->tx_stats,
sizeof(txr->tx_stats));
ena_free_counters((counter_u64_t *)&rxr->rx_stats,
sizeof(rxr->rx_stats));
ENA_RING_MTX_LOCK(txr);
drbr_free(txr->br, M_DEVBUF);
ENA_RING_MTX_UNLOCK(txr);
mtx_destroy(&txr->ring_mtx);
}
static void
ena_free_all_io_rings_resources(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_io_queues; i++)
ena_free_io_ring_resources(adapter, i);
}
static int
ena_setup_tx_dma_tag(struct ena_adapter *adapter)
{
int ret;
/* Create DMA tag for Tx buffers */
ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
1, 0, /* alignment, bounds */
ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
BUS_SPACE_MAXADDR, /* highaddr of excl window */
NULL, NULL, /* filter, filterarg */
ENA_TSO_MAXSIZE, /* maxsize */
adapter->max_tx_sgl_size - 1, /* nsegments */
ENA_TSO_MAXSIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&adapter->tx_buf_tag);
return (ret);
}
static int
ena_free_tx_dma_tag(struct ena_adapter *adapter)
{
int ret;
ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
if (likely(ret == 0))
adapter->tx_buf_tag = NULL;
return (ret);
}
static int
ena_setup_rx_dma_tag(struct ena_adapter *adapter)
{
int ret;
/* Create DMA tag for Rx buffers*/
ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */
1, 0, /* alignment, bounds */
ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
BUS_SPACE_MAXADDR, /* highaddr of excl window */
NULL, NULL, /* filter, filterarg */
ena_mbuf_sz, /* maxsize */
adapter->max_rx_sgl_size, /* nsegments */
ena_mbuf_sz, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&adapter->rx_buf_tag);
return (ret);
}
static int
ena_free_rx_dma_tag(struct ena_adapter *adapter)
{
int ret;
ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
if (likely(ret == 0))
adapter->rx_buf_tag = NULL;
return (ret);
}
static void
ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
{
struct ena_adapter *adapter = tx_ring->adapter;
struct ena_tx_buffer *tx_info;
bus_dma_tag_t tx_tag = adapter->tx_buf_tag;
int i;
#ifdef DEV_NETMAP
struct ena_netmap_tx_info *nm_info;
int j;
#endif /* DEV_NETMAP */
for (i = 0; i < tx_ring->ring_size; ++i) {
tx_info = &tx_ring->tx_buffer_info[i];
#ifdef DEV_NETMAP
if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
nm_info = &tx_info->nm_info;
for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
if (nm_info->map_seg[j] != NULL) {
bus_dmamap_destroy(tx_tag,
nm_info->map_seg[j]);
nm_info->map_seg[j] = NULL;
}
}
}
#endif /* DEV_NETMAP */
if (tx_info->dmamap != NULL) {
bus_dmamap_destroy(tx_tag, tx_info->dmamap);
tx_info->dmamap = NULL;
}
}
}
/**
* ena_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: network interface device structure
* @qid: queue index
*
* Returns 0 on success, otherwise on failure.
**/
static int
ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
{
device_t pdev = adapter->pdev;
char thread_name[MAXCOMLEN + 1];
struct ena_que *que = &adapter->que[qid];
struct ena_ring *tx_ring = que->tx_ring;
cpuset_t *cpu_mask = NULL;
int size, i, err;
#ifdef DEV_NETMAP
bus_dmamap_t *map;
int j;
ena_netmap_reset_tx_ring(adapter, qid);
#endif /* DEV_NETMAP */
size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (unlikely(tx_ring->tx_buffer_info == NULL))
return (ENOMEM);
size = sizeof(uint16_t) * tx_ring->ring_size;
tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (unlikely(tx_ring->free_tx_ids == NULL))
goto err_buf_info_free;
size = tx_ring->tx_max_header_size;
tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
goto err_tx_ids_free;
/* Req id stack for TX OOO completions */
for (i = 0; i < tx_ring->ring_size; i++)
tx_ring->free_tx_ids[i] = i;
/* Reset TX statistics. */
ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
sizeof(tx_ring->tx_stats));
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->acum_pkts = 0;
/* Make sure that drbr is empty */
ENA_RING_MTX_LOCK(tx_ring);
drbr_flush(adapter->ifp, tx_ring->br);
ENA_RING_MTX_UNLOCK(tx_ring);
/* ... and create the buffer DMA maps */
for (i = 0; i < tx_ring->ring_size; i++) {
err = bus_dmamap_create(adapter->tx_buf_tag, 0,
&tx_ring->tx_buffer_info[i].dmamap);
if (unlikely(err != 0)) {
ena_log(pdev, ERR,
"Unable to create Tx DMA map for buffer %d\n", i);
goto err_map_release;
}
#ifdef DEV_NETMAP
if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
err = bus_dmamap_create(adapter->tx_buf_tag, 0,
&map[j]);
if (unlikely(err != 0)) {
ena_log(pdev, ERR,
"Unable to create Tx DMA for buffer %d %d\n",
i, j);
goto err_map_release;
}
}
}
#endif /* DEV_NETMAP */
}
/* Allocate taskqueues */
TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
if (unlikely(tx_ring->enqueue_tq == NULL)) {
ena_log(pdev, ERR,
"Unable to create taskqueue for enqueue task\n");
i = tx_ring->ring_size;
goto err_map_release;
}
tx_ring->running = true;
#ifdef RSS
cpu_mask = &que->cpu_mask;
snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
device_get_nameunit(adapter->pdev), que->cpu);
#else
snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
device_get_nameunit(adapter->pdev), que->id);
#endif
taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
cpu_mask, "%s", thread_name);
return (0);
err_map_release:
ena_release_all_tx_dmamap(tx_ring);
err_tx_ids_free:
free(tx_ring->free_tx_ids, M_DEVBUF);
tx_ring->free_tx_ids = NULL;
err_buf_info_free:
free(tx_ring->tx_buffer_info, M_DEVBUF);
tx_ring->tx_buffer_info = NULL;
return (ENOMEM);
}
/**
* ena_free_tx_resources - Free Tx Resources per Queue
* @adapter: network interface device structure
* @qid: queue index
*
* Free all transmit software resources
**/
static void
ena_free_tx_resources(struct ena_adapter *adapter, int qid)
{
struct ena_ring *tx_ring = &adapter->tx_ring[qid];
#ifdef DEV_NETMAP
struct ena_netmap_tx_info *nm_info;
int j;
#endif /* DEV_NETMAP */
while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
taskqueue_free(tx_ring->enqueue_tq);
ENA_RING_MTX_LOCK(tx_ring);
/* Flush buffer ring, */
drbr_flush(adapter->ifp, tx_ring->br);
/* Free buffer DMA maps, */
for (int i = 0; i < tx_ring->ring_size; i++) {
bus_dmamap_sync(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].dmamap);
bus_dmamap_destroy(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].dmamap);
#ifdef DEV_NETMAP
if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
nm_info = &tx_ring->tx_buffer_info[i].nm_info;
for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
if (nm_info->socket_buf_idx[j] != 0) {
bus_dmamap_sync(adapter->tx_buf_tag,
nm_info->map_seg[j],
BUS_DMASYNC_POSTWRITE);
ena_netmap_unload(adapter,
nm_info->map_seg[j]);
}
bus_dmamap_destroy(adapter->tx_buf_tag,
nm_info->map_seg[j]);
nm_info->socket_buf_idx[j] = 0;
}
}
#endif /* DEV_NETMAP */
m_freem(tx_ring->tx_buffer_info[i].mbuf);
tx_ring->tx_buffer_info[i].mbuf = NULL;
}
ENA_RING_MTX_UNLOCK(tx_ring);
/* And free allocated memory. */
free(tx_ring->tx_buffer_info, M_DEVBUF);
tx_ring->tx_buffer_info = NULL;
free(tx_ring->free_tx_ids, M_DEVBUF);
tx_ring->free_tx_ids = NULL;
free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
tx_ring->push_buf_intermediate_buf = NULL;
}
/**
* ena_setup_all_tx_resources - allocate all queues Tx resources
* @adapter: network interface device structure
*
* Returns 0 on success, otherwise on failure.
**/
static int
ena_setup_all_tx_resources(struct ena_adapter *adapter)
{
int i, rc;
for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc != 0) {
ena_log(adapter->pdev, ERR,
"Allocation for Tx Queue %u failed\n", i);
goto err_setup_tx;
}
}
return (0);
err_setup_tx:
/* Rewind the index freeing the rings as we go */
while (i--)
ena_free_tx_resources(adapter, i);
return (rc);
}
/**
* ena_free_all_tx_resources - Free Tx Resources for All Queues
* @adapter: network interface device structure
*
* Free all transmit software resources
**/
static void
ena_free_all_tx_resources(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_io_queues; i++)
ena_free_tx_resources(adapter, i);
}
/**
* ena_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: network interface device structure
* @qid: queue index
*
* Returns 0 on success, otherwise on failure.
**/
static int
ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
{
device_t pdev = adapter->pdev;
struct ena_que *que = &adapter->que[qid];
struct ena_ring *rx_ring = que->rx_ring;
int size, err, i;
size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
#ifdef DEV_NETMAP
ena_netmap_reset_rx_ring(adapter, qid);
rx_ring->initialized = false;
#endif /* DEV_NETMAP */
/*
* Alloc extra element so in rx path
* we can always prefetch rx_info + 1
*/
size += sizeof(struct ena_rx_buffer);
rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
size = sizeof(uint16_t) * rx_ring->ring_size;
rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
for (i = 0; i < rx_ring->ring_size; i++)
rx_ring->free_rx_ids[i] = i;
/* Reset RX statistics. */
ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
sizeof(rx_ring->rx_stats));
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
/* ... and create the buffer DMA maps */
for (i = 0; i < rx_ring->ring_size; i++) {
err = bus_dmamap_create(adapter->rx_buf_tag, 0,
&(rx_ring->rx_buffer_info[i].map));
if (err != 0) {
ena_log(pdev, ERR,
"Unable to create Rx DMA map for buffer %d\n", i);
goto err_buf_info_unmap;
}
}
/* Create LRO for the ring */
if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) {
int err = tcp_lro_init(&rx_ring->lro);
if (err != 0) {
ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n",
qid);
} else {
ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n",
qid);
rx_ring->lro.ifp = adapter->ifp;
}
}
return (0);
err_buf_info_unmap:
while (i--) {
bus_dmamap_destroy(adapter->rx_buf_tag,
rx_ring->rx_buffer_info[i].map);
}
free(rx_ring->free_rx_ids, M_DEVBUF);
rx_ring->free_rx_ids = NULL;
free(rx_ring->rx_buffer_info, M_DEVBUF);
rx_ring->rx_buffer_info = NULL;
return (ENOMEM);
}
/**
* ena_free_rx_resources - Free Rx Resources
* @adapter: network interface device structure
* @qid: queue index
*
* Free all receive software resources
**/
static void
ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
{
struct ena_ring *rx_ring = &adapter->rx_ring[qid];
/* Free buffer DMA maps, */
for (int i = 0; i < rx_ring->ring_size; i++) {
bus_dmamap_sync(adapter->rx_buf_tag,
rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
m_freem(rx_ring->rx_buffer_info[i].mbuf);
rx_ring->rx_buffer_info[i].mbuf = NULL;
bus_dmamap_unload(adapter->rx_buf_tag,
rx_ring->rx_buffer_info[i].map);
bus_dmamap_destroy(adapter->rx_buf_tag,
rx_ring->rx_buffer_info[i].map);
}
/* free LRO resources, */
tcp_lro_free(&rx_ring->lro);
/* free allocated memory */
free(rx_ring->rx_buffer_info, M_DEVBUF);
rx_ring->rx_buffer_info = NULL;
free(rx_ring->free_rx_ids, M_DEVBUF);
rx_ring->free_rx_ids = NULL;
}
/**
* ena_setup_all_rx_resources - allocate all queues Rx resources
* @adapter: network interface device structure
*
* Returns 0 on success, otherwise on failure.
**/
static int
ena_setup_all_rx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_rx_resources(adapter, i);
if (rc != 0) {
ena_log(adapter->pdev, ERR,
"Allocation for Rx Queue %u failed\n", i);
goto err_setup_rx;
}
}
return (0);
err_setup_rx:
/* rewind the index freeing the rings as we go */
while (i--)
ena_free_rx_resources(adapter, i);
return (rc);
}
/**
* ena_free_all_rx_resources - Free Rx resources for all queues
* @adapter: network interface device structure
*
* Free all receive software resources
**/
static void
ena_free_all_rx_resources(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_resources(adapter, i);
}
static inline int
ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info)
{
device_t pdev = adapter->pdev;
struct ena_com_buf *ena_buf;
bus_dma_segment_t segs[1];
int nsegs, error;
int mlen;
/* if previous allocated frag is not used */
if (unlikely(rx_info->mbuf != NULL))
return (0);
/* Get mbuf using UMA allocator */
rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
rx_ring->rx_mbuf_sz);
if (unlikely(rx_info->mbuf == NULL)) {
counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (unlikely(rx_info->mbuf == NULL)) {
counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
return (ENOMEM);
}
mlen = MCLBYTES;
} else {
mlen = rx_ring->rx_mbuf_sz;
}
/* Set mbuf length*/
rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
/* Map packets for DMA */
ena_log(pdev, DBG,
"Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len);
error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
if (unlikely((error != 0) || (nsegs != 1))) {
ena_log(pdev, WARN,
"failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs);
counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
goto exit;
}
bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = segs[0].ds_addr;
ena_buf->len = mlen;
ena_log(pdev, DBG,
"ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr);
return (0);
exit:
m_freem(rx_info->mbuf);
rx_info->mbuf = NULL;
return (EFAULT);
}
static void
ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info)
{
if (rx_info->mbuf == NULL) {
ena_log(adapter->pdev, WARN,
"Trying to free unallocated buffer\n");
return;
}
bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
m_freem(rx_info->mbuf);
rx_info->mbuf = NULL;
}
/**
* ena_refill_rx_bufs - Refills ring with descriptors
* @rx_ring: the ring which we want to feed with free descriptors
* @num: number of descriptors to refill
* Refills the ring with newly allocated DMA-mapped mbufs for receiving
**/
int
ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
{
struct ena_adapter *adapter = rx_ring->adapter;
device_t pdev = adapter->pdev;
uint16_t next_to_use, req_id;
uint32_t i;
int rc;
ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
next_to_use = rx_ring->next_to_use;
for (i = 0; i < num; i++) {
struct ena_rx_buffer *rx_info;
ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n",
next_to_use);
req_id = rx_ring->free_rx_ids[next_to_use];
rx_info = &rx_ring->rx_buffer_info[req_id];
#ifdef DEV_NETMAP
if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
rc = ena_netmap_alloc_rx_slot(adapter, rx_ring,
rx_info);
else
#endif /* DEV_NETMAP */
rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
if (unlikely(rc != 0)) {
ena_log_io(pdev, WARN,
"failed to alloc buffer for rx queue %d\n",
rx_ring->qid);
break;
}
rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
&rx_info->ena_buf, req_id);
if (unlikely(rc != 0)) {
ena_log_io(pdev, WARN,
"failed to add buffer for rx queue %d\n",
rx_ring->qid);
break;
}
next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
rx_ring->ring_size);
}
if (unlikely(i < num)) {
counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
ena_log_io(pdev, WARN,
"refilled rx qid %d with only %d mbufs (from %d)\n",
rx_ring->qid, i, num);
}
if (likely(i != 0))
ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
rx_ring->next_to_use = next_to_use;
return (i);
}
int
ena_update_buf_ring_size(struct ena_adapter *adapter,
uint32_t new_buf_ring_size)
{
uint32_t old_buf_ring_size;
int rc = 0;
bool dev_was_up;
old_buf_ring_size = adapter->buf_ring_size;
adapter->buf_ring_size = new_buf_ring_size;
dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
ena_down(adapter);
/* Reconfigure buf ring for all Tx rings. */
ena_free_all_io_rings_resources(adapter);
ena_init_io_rings_advanced(adapter);
if (dev_was_up) {
/*
* If ena_up() fails, it's not because of recent buf_ring size
* changes. Because of that, we just want to revert old drbr
* value and trigger the reset because something else had to
* go wrong.
*/
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
new_buf_ring_size, old_buf_ring_size);
/* Revert old size and trigger the reset */
adapter->buf_ring_size = old_buf_ring_size;
ena_free_all_io_rings_resources(adapter);
ena_init_io_rings_advanced(adapter);
ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
adapter);
ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
}
}
return (rc);
}
int
ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
uint32_t new_rx_size)
{
uint32_t old_tx_size, old_rx_size;
int rc = 0;
bool dev_was_up;
old_tx_size = adapter->requested_tx_ring_size;
old_rx_size = adapter->requested_rx_ring_size;
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
ena_down(adapter);
/* Configure queues with new size. */
ena_init_io_rings_basic(adapter);
if (dev_was_up) {
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
new_tx_size, new_rx_size, old_tx_size, old_rx_size);
/* Revert old size. */
adapter->requested_tx_ring_size = old_tx_size;
adapter->requested_rx_ring_size = old_rx_size;
ena_init_io_rings_basic(adapter);
/* And try again. */
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to revert old queue sizes. Triggering device reset.\n");
/*
* If we've failed again, something had to go
* wrong. After reset, the device should try to
* go up
*/
ENA_FLAG_SET_ATOMIC(
ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
ena_trigger_reset(adapter,
ENA_REGS_RESET_OS_TRIGGER);
}
}
}
return (rc);
}
static void
ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
{
ena_free_all_io_rings_resources(adapter);
/* Force indirection table to be reinitialized */
ena_com_rss_destroy(adapter->ena_dev);
adapter->num_io_queues = num;
ena_init_io_rings(adapter);
}
int
ena_update_base_cpu(struct ena_adapter *adapter, int new_num)
{
int old_num;
int rc = 0;
bool dev_was_up;
dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
old_num = adapter->irq_cpu_base;
ena_down(adapter);
adapter->irq_cpu_base = new_num;
if (dev_was_up) {
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to configure device %d IRQ base CPU. "
"Reverting to previous value: %d\n",
new_num, old_num);
adapter->irq_cpu_base = old_num;
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to revert to previous setup."
"Triggering device reset.\n");
ENA_FLAG_SET_ATOMIC(
ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
ena_trigger_reset(adapter,
ENA_REGS_RESET_OS_TRIGGER);
}
}
}
return (rc);
}
int
ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num)
{
uint32_t old_num;
int rc = 0;
bool dev_was_up;
dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
old_num = adapter->irq_cpu_stride;
ena_down(adapter);
adapter->irq_cpu_stride = new_num;
if (dev_was_up) {
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to configure device %d IRQ CPU stride. "
"Reverting to previous value: %d\n",
new_num, old_num);
adapter->irq_cpu_stride = old_num;
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to revert to previous setup."
"Triggering device reset.\n");
ENA_FLAG_SET_ATOMIC(
ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
ena_trigger_reset(adapter,
ENA_REGS_RESET_OS_TRIGGER);
}
}
}
return (rc);
}
/* Caller should sanitize new_num */
int
ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
{
uint32_t old_num;
int rc = 0;
bool dev_was_up;
dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
old_num = adapter->num_io_queues;
ena_down(adapter);
ena_update_io_rings(adapter, new_num);
if (dev_was_up) {
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to configure device with %u IO queues. "
"Reverting to previous value: %u\n",
new_num, old_num);
ena_update_io_rings(adapter, old_num);
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to revert to previous setup IO "
"queues. Triggering device reset.\n");
ENA_FLAG_SET_ATOMIC(
ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
ena_trigger_reset(adapter,
ENA_REGS_RESET_OS_TRIGGER);
}
}
}
return (rc);
}
static void
ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
{
struct ena_ring *rx_ring = &adapter->rx_ring[qid];
unsigned int i;
for (i = 0; i < rx_ring->ring_size; i++) {
struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
if (rx_info->mbuf != NULL)
ena_free_rx_mbuf(adapter, rx_ring, rx_info);
#ifdef DEV_NETMAP
if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
(if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) {
if (rx_info->netmap_buf_idx != 0)
ena_netmap_free_rx_slot(adapter, rx_ring,
rx_info);
}
#endif /* DEV_NETMAP */
}
}
/**
* ena_refill_all_rx_bufs - allocate all queues Rx buffers
* @adapter: network interface device structure
*
*/
static void
ena_refill_all_rx_bufs(struct ena_adapter *adapter)
{
struct ena_ring *rx_ring;
int i, rc, bufs_num;
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
bufs_num = rx_ring->ring_size - 1;
rc = ena_refill_rx_bufs(rx_ring, bufs_num);
if (unlikely(rc != bufs_num))
ena_log_io(adapter->pdev, WARN,
"refilling Queue %d failed. "
"Allocated %d buffers from: %d\n",
i, rc, bufs_num);
#ifdef DEV_NETMAP
rx_ring->initialized = true;
#endif /* DEV_NETMAP */
}
}
static void
ena_free_all_rx_bufs(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_bufs(adapter, i);
}
/**
* ena_free_tx_bufs - Free Tx Buffers per Queue
* @adapter: network interface device structure
* @qid: queue index
**/
static void
ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
{
bool print_once = true;
struct ena_ring *tx_ring = &adapter->tx_ring[qid];
ENA_RING_MTX_LOCK(tx_ring);
for (int i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
if (tx_info->mbuf == NULL)
continue;
if (print_once) {
ena_log(adapter->pdev, WARN,
"free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
i);
print_once = false;
} else {
ena_log(adapter->pdev, DBG,
"free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
i);
}
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
m_free(tx_info->mbuf);
tx_info->mbuf = NULL;
}
ENA_RING_MTX_UNLOCK(tx_ring);
}
static void
ena_free_all_tx_bufs(struct ena_adapter *adapter)
{
for (int i = 0; i < adapter->num_io_queues; i++)
ena_free_tx_bufs(adapter, i);
}
static void
ena_destroy_all_tx_queues(struct ena_adapter *adapter)
{
uint16_t ena_qid;
int i;
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
}
static void
ena_destroy_all_rx_queues(struct ena_adapter *adapter)
{
uint16_t ena_qid;
int i;
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
}
static void
ena_destroy_all_io_queues(struct ena_adapter *adapter)
{
struct ena_que *queue;
int i;
for (i = 0; i < adapter->num_io_queues; i++) {
queue = &adapter->que[i];
while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL))
taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task);
taskqueue_free(queue->cleanup_tq);
}
ena_destroy_all_tx_queues(adapter);
ena_destroy_all_rx_queues(adapter);
}
static int
ena_create_io_queues(struct ena_adapter *adapter)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct ena_com_create_io_ctx ctx;
struct ena_ring *ring;
struct ena_que *queue;
uint16_t ena_qid;
uint32_t msix_vector;
cpuset_t *cpu_mask = NULL;
int rc, i;
/* Create TX queues */
for (i = 0; i < adapter->num_io_queues; i++) {
msix_vector = ENA_IO_IRQ_IDX(i);
ena_qid = ENA_IO_TXQ_IDX(i);
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
ctx.queue_size = adapter->requested_tx_ring_size;
ctx.msix_vector = msix_vector;
ctx.qid = ena_qid;
ctx.numa_node = adapter->que[i].domain;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc != 0) {
ena_log(adapter->pdev, ERR,
"Failed to create io TX queue #%d rc: %d\n", i, rc);
goto err_tx;
}
ring = &adapter->tx_ring[i];
rc = ena_com_get_io_handlers(ena_dev, ena_qid,
&ring->ena_com_io_sq, &ring->ena_com_io_cq);
if (rc != 0) {
ena_log(adapter->pdev, ERR,
"Failed to get TX queue handlers. TX queue num"
" %d rc: %d\n",
i, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
goto err_tx;
}
if (ctx.numa_node >= 0) {
ena_com_update_numa_node(ring->ena_com_io_cq,
ctx.numa_node);
}
}
/* Create RX queues */
for (i = 0; i < adapter->num_io_queues; i++) {
msix_vector = ENA_IO_IRQ_IDX(i);
ena_qid = ENA_IO_RXQ_IDX(i);
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
ctx.queue_size = adapter->requested_rx_ring_size;
ctx.msix_vector = msix_vector;
ctx.qid = ena_qid;
ctx.numa_node = adapter->que[i].domain;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to create io RX queue[%d] rc: %d\n", i, rc);
goto err_rx;
}
ring = &adapter->rx_ring[i];
rc = ena_com_get_io_handlers(ena_dev, ena_qid,
&ring->ena_com_io_sq, &ring->ena_com_io_cq);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"Failed to get RX queue handlers. RX queue num"
" %d rc: %d\n",
i, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
goto err_rx;
}
if (ctx.numa_node >= 0) {
ena_com_update_numa_node(ring->ena_com_io_cq,
ctx.numa_node);
}
}
for (i = 0; i < adapter->num_io_queues; i++) {
queue = &adapter->que[i];
NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
#ifdef RSS
cpu_mask = &queue->cpu_mask;
#endif
taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
cpu_mask, "%s queue %d cleanup",
device_get_nameunit(adapter->pdev), i);
}
return (0);
err_rx:
while (i--)
ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
i = adapter->num_io_queues;
err_tx:
while (i--)
ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
return (ENXIO);
}
/*********************************************************************
*
* MSIX & Interrupt Service routine
*
**********************************************************************/
/**
* ena_handle_msix - MSIX Interrupt Handler for admin/async queue
* @arg: interrupt number
**/
static void
ena_intr_msix_mgmnt(void *arg)
{
struct ena_adapter *adapter = (struct ena_adapter *)arg;
ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
ena_com_aenq_intr_handler(adapter->ena_dev, arg);
}
/**
* ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
* @arg: queue
**/
static int
ena_handle_msix(void *arg)
{
struct ena_que *queue = arg;
struct ena_adapter *adapter = queue->adapter;
if_t ifp = adapter->ifp;
if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
return (FILTER_STRAY);
taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
return (FILTER_HANDLED);
}
static int
ena_enable_msix(struct ena_adapter *adapter)
{
device_t dev = adapter->pdev;
int msix_vecs, msix_req;
int i, rc = 0;
if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
ena_log(dev, ERR, "Error, MSI-X is already enabled\n");
return (EINVAL);
}
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
M_DEVBUF, M_WAITOK | M_ZERO);
ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
for (i = 0; i < msix_vecs; i++) {
adapter->msix_entries[i].entry = i;
/* Vectors must start from 1 */
adapter->msix_entries[i].vector = i + 1;
}
msix_req = msix_vecs;
rc = pci_alloc_msix(dev, &msix_vecs);
if (unlikely(rc != 0)) {
ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n",
msix_vecs, rc);
rc = ENOSPC;
goto err_msix_free;
}
if (msix_vecs != msix_req) {
if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
ena_log(dev, ERR,
"Not enough number of MSI-x allocated: %d\n",
msix_vecs);
pci_release_msi(dev);
rc = ENOSPC;
goto err_msix_free;
}
ena_log(dev, ERR,
"Enable only %d MSI-x (out of %d), reduce "
"the number of queues\n",
msix_vecs, msix_req);
}
adapter->msix_vecs = msix_vecs;
ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
return (0);
err_msix_free:
free(adapter->msix_entries, M_DEVBUF);
adapter->msix_entries = NULL;
return (rc);
}
static void
ena_setup_mgmnt_intr(struct ena_adapter *adapter)
{
snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE,
"ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev));
/*
* Handler is NULL on purpose, it will be set
* when mgmnt interrupt is acquired
*/
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
}
static int
ena_setup_io_intr(struct ena_adapter *adapter)
{
#ifdef RSS
int num_buckets = rss_getnumbuckets();
static int last_bind = 0;
int cur_bind;
int idx;
#endif
int irq_idx;
if (adapter->msix_entries == NULL)
return (EINVAL);
#ifdef RSS
if (adapter->first_bind < 0) {
adapter->first_bind = last_bind;
last_bind = (last_bind + adapter->num_io_queues) % num_buckets;
}
cur_bind = adapter->first_bind;
#endif
for (int i = 0; i < adapter->num_io_queues; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
"%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
adapter->irq_tbl[irq_idx].data = &adapter->que[i];
adapter->irq_tbl[irq_idx].vector =
adapter->msix_entries[irq_idx].vector;
ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n",
adapter->msix_entries[irq_idx].vector);
if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
(unsigned)(adapter->irq_cpu_base +
i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus;
CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
}
#ifdef RSS
adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
rss_getcpu(cur_bind);
cur_bind = (cur_bind + 1) % num_buckets;
CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
for (idx = 0; idx < MAXMEMDOM; ++idx) {
if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx]))
break;
}
adapter->que[i].domain = idx;
#else
adapter->que[i].domain = -1;
#endif
}
return (0);
}
static int
ena_request_mgmnt_irq(struct ena_adapter *adapter)
{
device_t pdev = adapter->pdev;
struct ena_irq *irq;
unsigned long flags;
int rc, rcc;
flags = RF_ACTIVE | RF_SHAREABLE;
irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
&irq->vector, flags);
if (unlikely(irq->res == NULL)) {
ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
irq->vector);
return (ENXIO);
}
rc = bus_setup_intr(adapter->pdev, irq->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data,
&irq->cookie);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"failed to register interrupt handler for irq %ju: %d\n",
rman_get_start(irq->res), rc);
goto err_res_free;
}
irq->requested = true;
return (rc);
err_res_free:
ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector);
rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
irq->res);
if (unlikely(rcc != 0))
ena_log(pdev, ERR,
"dev has no parent while releasing res for irq: %d\n",
irq->vector);
irq->res = NULL;
return (rc);
}
static int
ena_request_io_irq(struct ena_adapter *adapter)
{
device_t pdev = adapter->pdev;
struct ena_irq *irq;
unsigned long flags = 0;
int rc = 0, i, rcc;
if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
ena_log(pdev, ERR,
"failed to request I/O IRQ: MSI-X is not enabled\n");
return (EINVAL);
} else {
flags = RF_ACTIVE | RF_SHAREABLE;
}
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i];
if (unlikely(irq->requested))
continue;
irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
&irq->vector, flags);
if (unlikely(irq->res == NULL)) {
rc = ENOMEM;
ena_log(pdev, ERR,
"could not allocate irq vector: %d\n", irq->vector);
goto err;
}
rc = bus_setup_intr(adapter->pdev, irq->res,
INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data,
&irq->cookie);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"failed to register interrupt handler for irq %ju: %d\n",
rman_get_start(irq->res), rc);
goto err;
}
irq->requested = true;
if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"failed to bind interrupt handler for irq %ju to cpu %d: %d\n",
rman_get_start(irq->res), irq->cpu, rc);
goto err;
}
ena_log(pdev, INFO, "queue %d - cpu %d\n",
i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
}
}
return (rc);
err:
for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
irq = &adapter->irq_tbl[i];
rcc = 0;
/* Once we entered err: section and irq->requested is true we
free both intr and resources */
if (irq->requested) {
rcc = bus_teardown_intr(adapter->pdev, irq->res,
irq->cookie);
if (unlikely(rcc != 0))
ena_log(pdev, ERR,
"could not release irq: %d, error: %d\n",
irq->vector, rcc);
}
/* If we entered err: section without irq->requested set we know
it was bus_alloc_resource_any() that needs cleanup, provided
res is not NULL. In case res is NULL no work in needed in
this iteration */
rcc = 0;
if (irq->res != NULL) {
rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
}
if (unlikely(rcc != 0))
ena_log(pdev, ERR,
"dev has no parent while releasing res for irq: %d\n",
irq->vector);
irq->requested = false;
irq->res = NULL;
}
return (rc);
}
static void
ena_free_mgmnt_irq(struct ena_adapter *adapter)
{
device_t pdev = adapter->pdev;
struct ena_irq *irq;
int rc;
irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
if (irq->requested) {
ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
if (unlikely(rc != 0))
ena_log(pdev, ERR, "failed to tear down irq: %d\n",
irq->vector);
irq->requested = 0;
}
if (irq->res != NULL) {
ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector);
rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
irq->res = NULL;
if (unlikely(rc != 0))
ena_log(pdev, ERR,
"dev has no parent while releasing res for irq: %d\n",
irq->vector);
}
}
static void
ena_free_io_irq(struct ena_adapter *adapter)
{
device_t pdev = adapter->pdev;
struct ena_irq *irq;
int rc;
for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i];
if (irq->requested) {
ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
rc = bus_teardown_intr(adapter->pdev, irq->res,
irq->cookie);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"failed to tear down irq: %d\n",
irq->vector);
}
irq->requested = 0;
}
if (irq->res != NULL) {
ena_log(pdev, DBG, "release resource irq: %d\n",
irq->vector);
rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
irq->res = NULL;
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"dev has no parent while releasing res for irq: %d\n",
irq->vector);
}
}
}
}
static void
ena_free_irqs(struct ena_adapter *adapter)
{
ena_free_io_irq(adapter);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
}
static void
ena_disable_msix(struct ena_adapter *adapter)
{
if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
pci_release_msi(adapter->pdev);
}
adapter->msix_vecs = 0;
free(adapter->msix_entries, M_DEVBUF);
adapter->msix_entries = NULL;
}
static void
ena_unmask_all_io_irqs(struct ena_adapter *adapter)
{
struct ena_com_io_cq *io_cq;
struct ena_eth_io_intr_reg intr_reg;
struct ena_ring *tx_ring;
uint16_t ena_qid;
int i;
/* Unmask interrupts for all queues */
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
ena_com_update_intr_reg(&intr_reg, 0, 0, true, false);
tx_ring = &adapter->tx_ring[i];
counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
ena_com_unmask_intr(io_cq, &intr_reg);
}
}
static int
ena_up_complete(struct ena_adapter *adapter)
{
int rc;
if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
rc = ena_rss_configure(adapter);
if (rc != 0) {
ena_log(adapter->pdev, ERR,
"Failed to configure RSS\n");
return (rc);
}
}
rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp));
if (unlikely(rc != 0))
return (rc);
ena_refill_all_rx_bufs(adapter);
ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
sizeof(adapter->hw_stats));
return (0);
}
static void
set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size)
{
int i;
for (i = 0; i < adapter->num_io_queues; i++) {
adapter->tx_ring[i].ring_size = new_tx_size;
adapter->rx_ring[i].ring_size = new_rx_size;
}
}
static int
create_queues_with_size_backoff(struct ena_adapter *adapter)
{
device_t pdev = adapter->pdev;
int rc;
uint32_t cur_rx_ring_size, cur_tx_ring_size;
uint32_t new_rx_ring_size, new_tx_ring_size;
/*
* Current queue sizes might be set to smaller than the requested
* ones due to past queue allocation failures.
*/
set_io_rings_size(adapter, adapter->requested_tx_ring_size,
adapter->requested_rx_ring_size);
while (1) {
/* Allocate transmit descriptors */
rc = ena_setup_all_tx_resources(adapter);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "err_setup_tx\n");
goto err_setup_tx;
}
/* Allocate receive descriptors */
rc = ena_setup_all_rx_resources(adapter);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "err_setup_rx\n");
goto err_setup_rx;
}
/* Create IO queues for Rx & Tx */
rc = ena_create_io_queues(adapter);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "create IO queues failed\n");
goto err_io_que;
}
return (0);
err_io_que:
ena_free_all_rx_resources(adapter);
err_setup_rx:
ena_free_all_tx_resources(adapter);
err_setup_tx:
/*
* Lower the ring size if ENOMEM. Otherwise, return the
* error straightaway.
*/
if (unlikely(rc != ENOMEM)) {
ena_log(pdev, ERR,
"Queue creation failed with error code: %d\n", rc);
return (rc);
}
cur_tx_ring_size = adapter->tx_ring[0].ring_size;
cur_rx_ring_size = adapter->rx_ring[0].ring_size;
ena_log(pdev, ERR,
"Not enough memory to create queues with sizes TX=%d, RX=%d\n",
cur_tx_ring_size, cur_rx_ring_size);
new_tx_ring_size = cur_tx_ring_size;
new_rx_ring_size = cur_rx_ring_size;
/*
* Decrease the size of a larger queue, or decrease both if they
* are the same size.
*/
if (cur_rx_ring_size <= cur_tx_ring_size)
new_tx_ring_size = cur_tx_ring_size / 2;
if (cur_rx_ring_size >= cur_tx_ring_size)
new_rx_ring_size = cur_rx_ring_size / 2;
if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
new_rx_ring_size < ENA_MIN_RING_SIZE) {
ena_log(pdev, ERR,
"Queue creation failed with the smallest possible queue size"
"of %d for both queues. Not retrying with smaller queues\n",
ENA_MIN_RING_SIZE);
return (rc);
}
ena_log(pdev, INFO,
"Retrying queue creation with sizes TX=%d, RX=%d\n",
new_tx_ring_size, new_rx_ring_size);
set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
}
}
int
ena_up(struct ena_adapter *adapter)
{
int rc = 0;
ENA_LOCK_ASSERT();
if (unlikely(device_is_attached(adapter->pdev) == 0)) {
ena_log(adapter->pdev, ERR, "device is not attached!\n");
return (ENXIO);
}
if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
return (0);
ena_log(adapter->pdev, INFO, "device is going UP\n");
/* setup interrupts for IO queues */
rc = ena_setup_io_intr(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n");
goto error;
}
rc = ena_request_io_irq(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR, "err_req_irq\n");
goto error;
}
ena_log(adapter->pdev, INFO,
"Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n",
adapter->num_io_queues,
adapter->requested_rx_ring_size,
adapter->requested_tx_ring_size,
(adapter->ena_dev->tx_mem_queue_type ==
ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED");
rc = create_queues_with_size_backoff(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
"error creating queues with size backoff\n");
goto err_create_queues_with_backoff;
}
if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
if_link_state_change(adapter->ifp, LINK_STATE_UP);
rc = ena_up_complete(adapter);
if (unlikely(rc != 0))
goto err_up_complete;
counter_u64_add(adapter->dev_stats.interface_up, 1);
ena_update_hwassist(adapter);
if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
ena_unmask_all_io_irqs(adapter);
return (0);
err_up_complete:
ena_destroy_all_io_queues(adapter);
ena_free_all_rx_resources(adapter);
ena_free_all_tx_resources(adapter);
err_create_queues_with_backoff:
ena_free_io_irq(adapter);
error:
return (rc);
}
static uint64_t
ena_get_counter(if_t ifp, ift_counter cnt)
{
struct ena_adapter *adapter;
struct ena_hw_stats *stats;
adapter = if_getsoftc(ifp);
stats = &adapter->hw_stats;
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (counter_u64_fetch(stats->rx_packets));
case IFCOUNTER_OPACKETS:
return (counter_u64_fetch(stats->tx_packets));
case IFCOUNTER_IBYTES:
return (counter_u64_fetch(stats->rx_bytes));
case IFCOUNTER_OBYTES:
return (counter_u64_fetch(stats->tx_bytes));
case IFCOUNTER_IQDROPS:
return (counter_u64_fetch(stats->rx_drops));
case IFCOUNTER_OQDROPS:
return (counter_u64_fetch(stats->tx_drops));
default:
return (if_get_counter_default(ifp, cnt));
}
}
static int
ena_media_change(if_t ifp)
{
/* Media Change is not supported by firmware */
return (0);
}
static void
ena_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct ena_adapter *adapter = if_getsoftc(ifp);
ena_log(adapter->pdev, DBG, "Media status update\n");
ENA_LOCK_LOCK();
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
ENA_LOCK_UNLOCK();
ena_log(adapter->pdev, INFO, "Link is down\n");
return;
}
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
ENA_LOCK_UNLOCK();
}
static void
ena_init(void *arg)
{
struct ena_adapter *adapter = (struct ena_adapter *)arg;
if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
ENA_LOCK_LOCK();
ena_up(adapter);
ENA_LOCK_UNLOCK();
}
}
static int
ena_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct ena_adapter *adapter;
struct ifreq *ifr;
int rc;
adapter = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
/*
* Acquiring lock to prevent from running up and down routines parallel.
*/
rc = 0;
switch (command) {
case SIOCSIFMTU:
if (if_getmtu(ifp) == ifr->ifr_mtu)
break;
ENA_LOCK_LOCK();
ena_down(adapter);
ena_change_mtu(ifp, ifr->ifr_mtu);
rc = ena_up(adapter);
ENA_LOCK_UNLOCK();
break;
case SIOCSIFFLAGS:
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((if_getflags(ifp) & (IFF_PROMISC |
IFF_ALLMULTI)) != 0) {
ena_log(adapter->pdev, INFO,
"ioctl promisc/allmulti\n");
}
} else {
ENA_LOCK_LOCK();
rc = ena_up(adapter);
ENA_LOCK_UNLOCK();
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
ENA_LOCK_LOCK();
ena_down(adapter);
ENA_LOCK_UNLOCK();
}
}
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
break;
case SIOCSIFCAP:
{
int reinit = 0;
if (ifr->ifr_reqcap != if_getcapenable(ifp)) {
if_setcapenable(ifp, ifr->ifr_reqcap);
reinit = 1;
}
if ((reinit != 0) &&
((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
ENA_LOCK_LOCK();
ena_down(adapter);
rc = ena_up(adapter);
ENA_LOCK_UNLOCK();
}
}
break;
default:
rc = ether_ioctl(ifp, command, data);
break;
}
return (rc);
}
static int
ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
{
int caps = 0;
if ((feat->offload.tx &
(ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
caps |= IFCAP_TXCSUM;
if ((feat->offload.tx &
(ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
caps |= IFCAP_TXCSUM_IPV6;
if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
caps |= IFCAP_TSO4;
if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
caps |= IFCAP_TSO6;
if ((feat->offload.rx_supported &
(ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
caps |= IFCAP_RXCSUM;
if ((feat->offload.rx_supported &
ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
caps |= IFCAP_RXCSUM_IPV6;
caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
return (caps);
}
static void
ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
{
host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp);
}
static void
ena_update_hwassist(struct ena_adapter *adapter)
{
if_t ifp = adapter->ifp;
uint32_t feat = adapter->tx_offload_cap;
int cap = if_getcapenable(ifp);
int flags = 0;
if_clearhwassist(ifp);
if ((cap & IFCAP_TXCSUM) != 0) {
if ((feat &
ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
flags |= CSUM_IP;
if ((feat &
(ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
flags |= CSUM_IP_UDP | CSUM_IP_TCP;
}
if ((cap & IFCAP_TXCSUM_IPV6) != 0)
flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
if ((cap & IFCAP_TSO4) != 0)
flags |= CSUM_IP_TSO;
if ((cap & IFCAP_TSO6) != 0)
flags |= CSUM_IP6_TSO;
if_sethwassistbits(ifp, flags, 0);
}
-static int
+static void
ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
struct ena_com_dev_get_features_ctx *feat)
{
if_t ifp;
int caps = 0;
ifp = adapter->ifp = if_gethandle(IFT_ETHER);
- if (unlikely(ifp == NULL)) {
- ena_log(pdev, ERR, "can not allocate ifnet structure\n");
- return (ENXIO);
- }
if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
if_setdev(ifp, pdev);
if_setsoftc(ifp, adapter);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, ena_init);
if_settransmitfn(ifp, ena_mq_start);
if_setqflushfn(ifp, ena_qflush);
if_setioctlfn(ifp, ena_ioctl);
if_setgetcounterfn(ifp, ena_get_counter);
if_setsendqlen(ifp, adapter->requested_tx_ring_size);
if_setsendqready(ifp);
if_setmtu(ifp, ETHERMTU);
if_setbaudrate(ifp, 0);
/* Zeroize capabilities... */
if_setcapabilities(ifp, 0);
if_setcapenable(ifp, 0);
/* check hardware support */
caps = ena_get_dev_offloads(feat);
/* ... and set them */
if_setcapabilitiesbit(ifp, caps, 0);
/* TSO parameters */
if_sethwtsomax(ifp, ENA_TSO_MAXSIZE -
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1);
if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE);
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
*/
ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change,
ena_media_status);
ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
ether_ifattach(ifp, adapter->mac_addr);
-
- return (0);
}
void
ena_down(struct ena_adapter *adapter)
{
int rc;
ENA_LOCK_ASSERT();
if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
return;
ena_log(adapter->pdev, INFO, "device is going DOWN\n");
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
ena_free_io_irq(adapter);
if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (unlikely(rc != 0))
ena_log(adapter->pdev, ERR, "Device reset failed\n");
}
ena_destroy_all_io_queues(adapter);
ena_free_all_tx_bufs(adapter);
ena_free_all_rx_bufs(adapter);
ena_free_all_tx_resources(adapter);
ena_free_all_rx_resources(adapter);
counter_u64_add(adapter->dev_stats.interface_down, 1);
}
static uint32_t
ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
/* Regular queues capabilities */
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&get_feat_ctx->max_queue_ext.max_queue_ext;
io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
max_queue_ext->max_rx_cq_num);
io_tx_sq_num = max_queue_ext->max_tx_sq_num;
io_tx_cq_num = max_queue_ext->max_tx_cq_num;
} else {
struct ena_admin_queue_feature_desc *max_queues =
&get_feat_ctx->max_queues;
io_tx_sq_num = max_queues->max_sq_num;
io_tx_cq_num = max_queues->max_cq_num;
io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
}
/* In case of LLQ use the llq fields for the tx SQ/CQ */
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */
max_num_io_queues = min_t(uint32_t, max_num_io_queues,
pci_msix_count(pdev) - 1);
#ifdef RSS
max_num_io_queues = min_t(uint32_t, max_num_io_queues,
rss_getnumbuckets());
#endif
return (max_num_io_queues);
}
static int
ena_enable_wc(device_t pdev, struct resource *res)
{
#if defined(__i386) || defined(__amd64) || defined(__aarch64__)
vm_offset_t va;
vm_size_t len;
int rc;
va = (vm_offset_t)rman_get_virtual(res);
len = rman_get_size(res);
/* Enable write combining */
rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc);
return (rc);
}
return (0);
#endif
return (EOPNOTSUPP);
}
static int
ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq,
struct ena_llq_configurations *llq_default_configurations)
{
int rc;
uint32_t llq_feature_mask;
llq_feature_mask = 1 << ENA_ADMIN_LLQ;
if (!(ena_dev->supported_features & llq_feature_mask)) {
ena_log(pdev, WARN,
"LLQ is not supported. Fallback to host mode policy.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return (0);
}
if (ena_dev->mem_bar == NULL) {
ena_log(pdev, WARN,
"LLQ is advertised as supported but device doesn't expose mem bar.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return (0);
}
rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
if (unlikely(rc != 0)) {
ena_log(pdev, WARN,
"Failed to configure the device mode. "
"Fallback to host mode policy.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
}
return (0);
}
static int
ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev)
{
struct ena_adapter *adapter = device_get_softc(pdev);
int rc, rid;
/* Try to allocate resources for LLQ bar */
rid = PCIR_BAR(ENA_MEM_BAR);
adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (unlikely(adapter->memory == NULL)) {
ena_log(pdev, WARN,
"Unable to allocate LLQ bar resource. LLQ mode won't be used.\n");
return (0);
}
/* Enable write combining for better LLQ performance */
rc = ena_enable_wc(adapter->pdev, adapter->memory);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "failed to enable write combining.\n");
return (rc);
}
/*
* Save virtual address of the device's memory region
* for the ena_com layer.
*/
ena_dev->mem_bar = rman_get_virtual(adapter->memory);
return (0);
}
static inline void
set_default_llq_configurations(struct ena_llq_configurations *llq_config,
struct ena_admin_feature_llq_desc *llq)
{
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
llq_config->llq_num_decs_before_header =
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) !=
0 && ena_force_large_llq_header) {
llq_config->llq_ring_entry_size =
ENA_ADMIN_LIST_ENTRY_SIZE_256B;
llq_config->llq_ring_entry_size_value = 256;
} else {
llq_config->llq_ring_entry_size =
ENA_ADMIN_LIST_ENTRY_SIZE_128B;
llq_config->llq_ring_entry_size_value = 128;
}
}
static int
ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev;
uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
uint32_t max_tx_queue_size;
uint32_t max_rx_queue_size;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(uint32_t,
max_queue_ext->max_rx_cq_depth,
max_queue_ext->max_rx_sq_depth);
max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
if (ena_dev->tx_mem_queue_type ==
ENA_ADMIN_PLACEMENT_POLICY_DEV)
max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
llq->max_llq_depth);
else
max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
max_queue_ext->max_tx_sq_depth);
ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
max_queue_ext->max_per_packet_tx_descs);
ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
max_queue_ext->max_per_packet_rx_descs);
} else {
struct ena_admin_queue_feature_desc *max_queues =
&ctx->get_feat_ctx->max_queues;
max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth,
max_queues->max_sq_depth);
max_tx_queue_size = max_queues->max_cq_depth;
if (ena_dev->tx_mem_queue_type ==
ENA_ADMIN_PLACEMENT_POLICY_DEV)
max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
llq->max_llq_depth);
else
max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
max_queues->max_sq_depth);
ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
max_queues->max_packet_tx_descs);
ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
max_queues->max_packet_rx_descs);
}
/* round down to the nearest power of 2 */
max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
/*
* When forcing large headers, we multiply the entry size by 2,
* and therefore divide the queue size by 2, leaving the amount
* of memory used by the queues unchanged.
*/
if (ena_force_large_llq_header) {
if ((llq->entry_size_ctrl_supported &
ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
ena_dev->tx_mem_queue_type ==
ENA_ADMIN_PLACEMENT_POLICY_DEV) {
max_tx_queue_size /= 2;
ena_log(ctx->pdev, INFO,
"Forcing large headers and decreasing maximum Tx queue size to %d\n",
max_tx_queue_size);
} else {
ena_log(ctx->pdev, WARN,
"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
}
}
tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
max_tx_queue_size);
rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
max_rx_queue_size);
tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
ctx->max_tx_queue_size = max_tx_queue_size;
ctx->max_rx_queue_size = max_rx_queue_size;
ctx->tx_queue_size = tx_queue_size;
ctx->rx_queue_size = rx_queue_size;
return (0);
}
static void
ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
{
struct ena_admin_host_info *host_info;
uintptr_t rid;
int rc;
/* Allocate only the host info */
rc = ena_com_allocate_host_info(ena_dev);
if (unlikely(rc != 0)) {
ena_log(dev, ERR, "Cannot allocate host info\n");
return;
}
host_info = ena_dev->host_attr.host_info;
if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
host_info->bdf = rid;
host_info->os_type = ENA_ADMIN_OS_FREEBSD;
host_info->kernel_ver = osreldate;
sprintf(host_info->kernel_ver_str, "%d", osreldate);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, osrelease,
sizeof(host_info->os_dist_str) - 1);
host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) |
(ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
(ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
host_info->num_cpus = mp_ncpus;
host_info->driver_supported_features =
ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
rc = ena_com_set_host_attributes(ena_dev);
if (unlikely(rc != 0)) {
if (rc == EOPNOTSUPP)
ena_log(dev, WARN, "Cannot set host attributes\n");
else
ena_log(dev, ERR, "Cannot set host attributes\n");
goto err;
}
return;
err:
ena_com_delete_host_info(ena_dev);
}
static int
ena_device_init(struct ena_adapter *adapter, device_t pdev,
struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
{
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool readless_supported;
uint32_t aenq_groups;
int dma_width;
int rc;
rc = ena_com_mmio_reg_read_request_init(ena_dev);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "failed to init mmio read less\n");
return (rc);
}
/*
* The PCIe configuration space revision id indicate if mmio reg
* read is disabled
*/
readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "Can not reset device\n");
goto err_mmio_read_less;
}
rc = ena_com_validate_version(ena_dev);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "device version is too low\n");
goto err_mmio_read_less;
}
dma_width = ena_com_get_dma_width(ena_dev);
if (unlikely(dma_width < 0)) {
ena_log(pdev, ERR, "Invalid dma width value %d", dma_width);
rc = dma_width;
goto err_mmio_read_less;
}
adapter->dma_width = dma_width;
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"Can not initialize ena admin queue with device\n");
goto err_mmio_read_less;
}
/*
* To enable the msix interrupts the driver needs to know the number
* of queues. So the driver uses polling mode to retrieve this
* information
*/
ena_com_set_admin_polling_mode(ena_dev, true);
ena_config_host_info(ena_dev, pdev);
/* Get Device Attributes */
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"Cannot get attribute for ena device rc: %d\n", rc);
goto err_admin_init;
}
aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
BIT(ENA_ADMIN_FATAL_ERROR) |
BIT(ENA_ADMIN_WARNING) |
BIT(ENA_ADMIN_NOTIFICATION) |
BIT(ENA_ADMIN_KEEP_ALIVE);
aenq_groups &= get_feat_ctx->aenq.supported_groups;
rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc);
goto err_admin_init;
}
*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
set_default_llq_configurations(&llq_config, &get_feat_ctx->llq);
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "Failed to set placement policy\n");
goto err_admin_init;
}
return (0);
err_admin_init:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less:
ena_com_mmio_reg_read_request_destroy(ena_dev);
return (rc);
}
static int
ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc;
rc = ena_enable_msix(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n");
return (rc);
}
ena_setup_mgmnt_intr(adapter);
rc = ena_request_mgmnt_irq(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n");
goto err_disable_msix;
}
ena_com_set_admin_polling_mode(ena_dev, false);
ena_com_admin_aenq_enable(ena_dev);
return (0);
err_disable_msix:
ena_disable_msix(adapter);
return (rc);
}
/* Function called on ENA_ADMIN_KEEP_ALIVE event */
static void
ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_aenq_keep_alive_desc *desc;
sbintime_t stime;
uint64_t rx_drops;
uint64_t tx_drops;
desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
counter_u64_zero(adapter->hw_stats.rx_drops);
counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
counter_u64_zero(adapter->hw_stats.tx_drops);
counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
stime = getsbinuptime();
atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
}
/* Check for keep alive expiration */
static void
check_for_missing_keep_alive(struct ena_adapter *adapter)
{
sbintime_t timestamp, time;
if (adapter->wd_active == 0)
return;
if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
return;
timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
time = getsbinuptime() - timestamp;
if (unlikely(time > adapter->keep_alive_timeout)) {
ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
counter_u64_add(adapter->dev_stats.wd_expired, 1);
ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
}
}
/* Check if admin queue is enabled */
static void
check_for_admin_com_state(struct ena_adapter *adapter)
{
if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) {
ena_log(adapter->pdev, ERR,
"ENA admin queue is not in running state!\n");
counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
}
}
static int
check_for_rx_interrupt_queue(struct ena_adapter *adapter,
struct ena_ring *rx_ring)
{
if (likely(atomic_load_8(&rx_ring->first_interrupt)))
return (0);
if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
return (0);
rx_ring->no_interrupt_event_cnt++;
if (rx_ring->no_interrupt_event_cnt ==
ENA_MAX_NO_INTERRUPT_ITERATIONS) {
ena_log(adapter->pdev, ERR,
"Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
rx_ring->qid);
ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return (EIO);
}
return (0);
}
static int
check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
device_t pdev = adapter->pdev;
struct bintime curtime, time;
struct ena_tx_buffer *tx_buf;
int time_since_last_cleanup;
int missing_tx_comp_to;
sbintime_t time_offset;
uint32_t missed_tx = 0;
int i, rc = 0;
getbinuptime(&curtime);
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
if (bintime_isset(&tx_buf->timestamp) == 0)
continue;
time = curtime;
bintime_sub(&time, &tx_buf->timestamp);
time_offset = bttosbt(time);
if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
time_offset > 2 * adapter->missing_tx_timeout)) {
/*
* If after graceful period interrupt is still not
* received, we schedule a reset.
*/
ena_log(pdev, ERR,
"Potential MSIX issue on Tx side Queue = %d. "
"Reset the device\n",
tx_ring->qid);
ena_trigger_reset(adapter,
ENA_REGS_RESET_MISS_INTERRUPT);
return (EIO);
}
/* Check again if packet is still waiting */
if (unlikely(time_offset > adapter->missing_tx_timeout)) {
if (tx_buf->print_once) {
time_since_last_cleanup = TICKS_2_MSEC(ticks -
tx_ring->tx_last_cleanup_ticks);
missing_tx_comp_to = sbttoms(
adapter->missing_tx_timeout);
ena_log(pdev, WARN,
"Found a Tx that wasn't completed on time, qid %d, index %d. "
"%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n",
tx_ring->qid, i, time_since_last_cleanup,
missing_tx_comp_to);
}
tx_buf->print_once = false;
missed_tx++;
}
}
if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
ena_log(pdev, ERR,
"The number of lost tx completion is above the threshold "
"(%d > %d). Reset the device\n",
missed_tx, adapter->missing_tx_threshold);
ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
rc = EIO;
}
counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
return (rc);
}
/*
* Check for TX which were not completed on time.
* Timeout is defined by "missing_tx_timeout".
* Reset will be performed if number of incompleted
* transactions exceeds "missing_tx_threshold".
*/
static void
check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
int i, budget, rc;
/* Make sure the driver doesn't turn the device in other process */
rmb();
if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
return;
if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
return;
if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
return;
budget = adapter->missing_tx_max_queues;
for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc != 0))
return;
rc = check_for_rx_interrupt_queue(adapter, rx_ring);
if (unlikely(rc != 0))
return;
budget--;
if (budget == 0) {
i++;
break;
}
}
adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
}
/* trigger rx cleanup after 2 consecutive detections */
#define EMPTY_RX_REFILL 2
/* For the rare case where the device runs out of Rx descriptors and the
* msix handler failed to refill new Rx descriptors (due to a lack of memory
* for example).
* This case will lead to a deadlock:
* The device won't send interrupts since all the new Rx packets will be dropped
* The msix handler won't allocate new Rx descriptors so the device won't be
* able to send new packets.
*
* When such a situation is detected - execute rx cleanup task in another thread
*/
static void
check_for_empty_rx_ring(struct ena_adapter *adapter)
{
struct ena_ring *rx_ring;
int i, refill_required;
if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
return;
if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
return;
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
refill_required = ena_com_free_q_entries(
rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++;
if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
1);
ena_log(adapter->pdev, WARN,
"Rx ring %d is stalled. Triggering the refill function\n",
i);
taskqueue_enqueue(rx_ring->que->cleanup_tq,
&rx_ring->que->cleanup_task);
rx_ring->empty_rx_queue = 0;
}
} else {
rx_ring->empty_rx_queue = 0;
}
}
}
static void
ena_update_hints(struct ena_adapter *adapter,
struct ena_admin_ena_hw_hints *hints)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
if (hints->admin_completion_tx_timeout)
ena_dev->admin_queue.completion_timeout =
hints->admin_completion_tx_timeout * 1000;
if (hints->mmio_read_timeout)
/* convert to usec */
ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000;
if (hints->missed_tx_completion_count_threshold_to_reset)
adapter->missing_tx_threshold =
hints->missed_tx_completion_count_threshold_to_reset;
if (hints->missing_tx_completion_timeout) {
if (hints->missing_tx_completion_timeout ==
ENA_HW_HINTS_NO_TIMEOUT)
adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
else
adapter->missing_tx_timeout = SBT_1MS *
hints->missing_tx_completion_timeout;
}
if (hints->driver_watchdog_timeout) {
if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
else
adapter->keep_alive_timeout = SBT_1MS *
hints->driver_watchdog_timeout;
}
}
/**
* ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
* @adapter: ENA device adapter
*
* Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
* and other error codes on failure.
*
* This function can possibly cause a race with other calls to the admin queue.
* Because of that, the caller should either lock this function or make sure
* that there is no race in the current context.
*/
static int
ena_copy_eni_metrics(struct ena_adapter *adapter)
{
static bool print_once = true;
int rc;
rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
if (rc != 0) {
if (rc == ENA_COM_UNSUPPORTED) {
if (print_once) {
ena_log(adapter->pdev, WARN,
"Retrieving ENI metrics is not supported.\n");
print_once = false;
} else {
ena_log(adapter->pdev, DBG,
"Retrieving ENI metrics is not supported.\n");
}
} else {
ena_log(adapter->pdev, ERR,
"Failed to get ENI metrics: %d\n", rc);
}
}
return (rc);
}
static int
ena_copy_srd_metrics(struct ena_adapter *adapter)
{
return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info);
}
static int
ena_copy_customer_metrics(struct ena_adapter *adapter)
{
struct ena_com_dev *dev;
u32 supported_metrics_count;
int rc, len;
dev = adapter->ena_dev;
supported_metrics_count = ena_com_get_customer_metric_count(dev);
len = supported_metrics_count * sizeof(u64);
/* Fill the data buffer */
rc = ena_com_get_customer_metrics(adapter->ena_dev,
(char *)(adapter->customer_metrics_array), len);
return (rc);
}
static void
ena_timer_service(void *data)
{
struct ena_adapter *adapter = (struct ena_adapter *)data;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
check_for_missing_keep_alive(adapter);
check_for_admin_com_state(adapter);
check_for_missing_completions(adapter);
check_for_empty_rx_ring(adapter);
/*
* User controller update of the ENA metrics.
* If the delay was set to 0, then the stats shouldn't be updated at
* all.
* Otherwise, wait 'metrics_sample_interval' seconds, before
* updating stats.
* As timer service is executed every second, it's enough to increment
* appropriate counter each time the timer service is executed.
*/
if ((adapter->metrics_sample_interval != 0) &&
(++adapter->metrics_sample_interval_cnt >=
adapter->metrics_sample_interval)) {
taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task);
adapter->metrics_sample_interval_cnt = 0;
}
if (host_info != NULL)
ena_update_host_info(host_info, adapter->ifp);
if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
/*
* Timeout when validating version indicates that the device
* became unresponsive. If that happens skip the reset and
* reschedule timer service, so the reset can be retried later.
*/
if (ena_com_validate_version(adapter->ena_dev) ==
ENA_COM_TIMER_EXPIRED) {
ena_log(adapter->pdev, WARN,
"FW unresponsive, skipping reset\n");
ENA_TIMER_RESET(adapter);
return;
}
ena_log(adapter->pdev, WARN, "Trigger reset is on\n");
taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
return;
}
/*
* Schedule another timeout one second from now.
*/
ENA_TIMER_RESET(adapter);
}
void
ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
if_t ifp = adapter->ifp;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
return;
if (!graceful)
if_link_state_change(ifp, LINK_STATE_DOWN);
ENA_TIMER_DRAIN(adapter);
dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
if (dev_up)
ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
ena_down(adapter);
/*
* Stop the device from sending AENQ events (if the device was up, and
* the trigger reset was on, ena_down already performs device reset)
*/
if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
/*
* IO rings resources should be freed because `ena_restore_device()`
* calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
* vectors. The amount of MSIX vectors after destroy-restore may be
* different than before. Therefore, IO rings resources should be
* established from scratch each time.
*/
ena_free_all_io_rings_resources(adapter);
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_mmio_reg_read_request_destroy(ena_dev);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
}
static int
ena_device_validate_params(struct ena_adapter *adapter,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
ETHER_ADDR_LEN) != 0) {
ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n");
return (EINVAL);
}
if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
ena_log(adapter->pdev, ERR,
"Error, device max mtu is smaller than ifp MTU\n");
return (EINVAL);
}
return 0;
}
int
ena_restore_device(struct ena_adapter *adapter)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
if_t ifp = adapter->ifp;
device_t dev = adapter->pdev;
int wd_active;
int rc;
ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
if (rc != 0) {
ena_log(dev, ERR, "Cannot initialize device\n");
goto err;
}
/*
* Only enable WD if it was enabled before reset, so it won't override
* value set by the user by the sysctl.
*/
if (adapter->wd_active != 0)
adapter->wd_active = wd_active;
rc = ena_device_validate_params(adapter, &get_feat_ctx);
if (rc != 0) {
ena_log(dev, ERR, "Validation of device parameters failed\n");
goto err_device_destroy;
}
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
/* Make sure we don't have a race with AENQ Links state handler */
if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
if_link_state_change(ifp, LINK_STATE_UP);
rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc != 0) {
ena_log(dev, ERR, "Enable MSI-X failed\n");
goto err_device_destroy;
}
/*
* Effective value of used MSIX vectors should be the same as before
* `ena_destroy_device()`, if possible, or closest to it if less vectors
* are available.
*/
if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
/* Re-initialize rings basic information */
ena_init_io_rings(adapter);
/* If the interface was up before the reset bring it up */
if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
rc = ena_up(adapter);
if (rc != 0) {
ena_log(dev, ERR, "Failed to create I/O queues\n");
goto err_disable_msix;
}
}
/* Indicate that device is running again and ready to work */
ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
/*
* As the AENQ handlers weren't executed during reset because
* the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
* timestamp must be updated again That will prevent next reset
* caused by missing keep alive.
*/
adapter->keep_alive_timestamp = getsbinuptime();
ENA_TIMER_RESET(adapter);
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
return (rc);
err_disable_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_device_destroy:
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n");
return (rc);
}
static void
ena_metrics_task(void *arg, int pending)
{
struct ena_adapter *adapter = (struct ena_adapter *)arg;
ENA_LOCK_LOCK();
if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS))
(void)ena_copy_customer_metrics(adapter);
else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS))
(void)ena_copy_eni_metrics(adapter);
if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO))
(void)ena_copy_srd_metrics(adapter);
ENA_LOCK_UNLOCK();
}
static void
ena_reset_task(void *arg, int pending)
{
struct ena_adapter *adapter = (struct ena_adapter *)arg;
ENA_LOCK_LOCK();
if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
ena_destroy_device(adapter, false);
ena_restore_device(adapter);
ena_log(adapter->pdev, INFO,
"Device reset completed successfully, Driver info: %s\n",
ena_version);
}
ENA_LOCK_UNLOCK();
}
static void
ena_free_stats(struct ena_adapter *adapter)
{
ena_free_counters((counter_u64_t *)&adapter->hw_stats,
sizeof(struct ena_hw_stats));
ena_free_counters((counter_u64_t *)&adapter->dev_stats,
sizeof(struct ena_stats_dev));
}
/**
* ena_attach - Device Initialization Routine
* @pdev: device information struct
*
* Returns 0 on success, otherwise on failure.
*
* ena_attach initializes an adapter identified by a device structure.
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
static int
ena_attach(device_t pdev)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
static int version_printed;
struct ena_adapter *adapter;
struct ena_com_dev *ena_dev = NULL;
uint32_t max_num_io_queues;
int msix_rid;
int rid, rc;
adapter = device_get_softc(pdev);
adapter->pdev = pdev;
adapter->first_bind = -1;
/*
* Set up the timer service - driver is responsible for avoiding
* concurrency, as the callout won't be using any locking inside.
*/
ENA_TIMER_INIT(adapter);
adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO;
adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO;
adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES;
adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD;
adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED;
adapter->irq_cpu_stride = 0;
#ifdef RSS
adapter->rss_enabled = 1;
#endif
if (version_printed++ == 0)
ena_log(pdev, INFO, "%s\n", ena_version);
/* Allocate memory for ena_dev structure */
ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
M_WAITOK | M_ZERO);
adapter->ena_dev = ena_dev;
ena_dev->dmadev = pdev;
rid = PCIR_BAR(ENA_REG_BAR);
adapter->memory = NULL;
adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (unlikely(adapter->registers == NULL)) {
ena_log(pdev, ERR,
"unable to allocate bus resource: registers!\n");
rc = ENOMEM;
goto err_dev_free;
}
/* MSIx vector table may reside on BAR0 with registers or on BAR1. */
msix_rid = pci_msix_table_bar(pdev);
if (msix_rid != rid) {
adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
&msix_rid, RF_ACTIVE);
if (unlikely(adapter->msix == NULL)) {
ena_log(pdev, ERR,
"unable to allocate bus resource: msix!\n");
rc = ENOMEM;
goto err_pci_free;
}
adapter->msix_rid = msix_rid;
}
ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
M_WAITOK | M_ZERO);
/* Store register resources */
((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag(
adapter->registers);
((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(
adapter->registers);
if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) {
ena_log(pdev, ERR, "failed to pmap registers bar\n");
rc = ENXIO;
goto err_bus_free;
}
rc = ena_map_llq_mem_bar(pdev, ena_dev);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "Failed to map ENA mem bar");
goto err_bus_free;
}
/* Initially clear all the flags */
ENA_FLAG_ZERO(adapter);
/* Device initialization */
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc);
rc = ENXIO;
goto err_bus_free;
}
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
adapter->disable_meta_caching = !!(
get_feat_ctx.llq.accel_mode.u.get.supported_flags &
BIT(ENA_ADMIN_DISABLE_META_CACHING));
adapter->keep_alive_timestamp = getsbinuptime();
adapter->tx_offload_cap = get_feat_ctx.offload.tx;
memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
ETHER_ADDR_LEN);
calc_queue_ctx.pdev = pdev;
calc_queue_ctx.ena_dev = ena_dev;
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
/* Calculate initial and maximum IO queue number and size */
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
&get_feat_ctx);
rc = ena_calc_io_queue_size(&calc_queue_ctx);
if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
rc = EFAULT;
goto err_com_free;
}
adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
adapter->max_num_io_queues = max_num_io_queues;
adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
/* set up dma tags for rx and tx buffers */
rc = ena_setup_tx_dma_tag(adapter);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "Failed to create TX DMA tag\n");
goto err_com_free;
}
rc = ena_setup_rx_dma_tag(adapter);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR, "Failed to create RX DMA tag\n");
goto err_tx_tag_free;
}
/*
* The amount of requested MSIX vectors is equal to
* adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
* number of admin queue interrupts. The former is initially determined
* by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
* achieved if there are not enough system resources. By default, the
* number of effectively used IO queues is the same but later on it can
* be limited by the user using sysctl interface.
*/
rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (unlikely(rc != 0)) {
ena_log(pdev, ERR,
"Failed to enable and set the admin interrupts\n");
goto err_io_free;
}
/* By default all of allocated MSIX vectors are actively used */
adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
/* initialize rings basic information */
ena_init_io_rings(adapter);
rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
if (rc) {
ena_log(pdev, ERR, "Failed to allocate customer metrics buffer.\n");
goto err_msix_free;
}
rc = ena_sysctl_allocate_customer_metrics_buffer(adapter);
if (unlikely(rc)){
ena_log(pdev, ERR, "Failed to allocate sysctl customer metrics buffer.\n");
goto err_metrics_buffer_destroy;
}
/* Initialize statistics */
ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
sizeof(struct ena_stats_dev));
ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
sizeof(struct ena_hw_stats));
ena_sysctl_add_nodes(adapter);
/* setup network interface */
- rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
- if (unlikely(rc != 0)) {
- ena_log(pdev, ERR, "Error with network interface setup\n");
- goto err_customer_metrics_alloc;
- }
+ ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
/* Initialize reset task queue */
TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq",
device_get_nameunit(adapter->pdev));
/* Initialize metrics task queue */
TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter);
adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue",
M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq);
taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq",
device_get_nameunit(adapter->pdev));
#ifdef DEV_NETMAP
rc = ena_netmap_attach(adapter);
if (rc != 0) {
ena_log(pdev, ERR, "netmap attach failed: %d\n", rc);
goto err_detach;
}
#endif /* DEV_NETMAP */
/* Tell the stack that the interface is not active */
if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
/* Run the timer service */
ENA_TIMER_RESET(adapter);
return (0);
#ifdef DEV_NETMAP
err_detach:
ether_ifdetach(adapter->ifp);
#endif /* DEV_NETMAP */
-err_customer_metrics_alloc:
free(adapter->customer_metrics_array, M_DEVBUF);
err_metrics_buffer_destroy:
ena_com_delete_customer_metrics_buffer(ena_dev);
err_msix_free:
ena_free_stats(adapter);
ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_io_free:
ena_free_all_io_rings_resources(adapter);
ena_free_rx_dma_tag(adapter);
err_tx_tag_free:
ena_free_tx_dma_tag(adapter);
err_com_free:
ena_com_admin_destroy(ena_dev);
ena_com_delete_host_info(ena_dev);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err_bus_free:
free(ena_dev->bus, M_DEVBUF);
err_pci_free:
ena_free_pci_resources(adapter);
err_dev_free:
free(ena_dev, M_DEVBUF);
return (rc);
}
/**
* ena_detach - Device Removal Routine
* @pdev: device information struct
*
* ena_detach is called by the device subsystem to alert the driver
* that it should release a PCI device.
**/
static int
ena_detach(device_t pdev)
{
struct ena_adapter *adapter = device_get_softc(pdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc;
/* Make sure VLANS are not using driver */
if (if_vlantrunkinuse(adapter->ifp)) {
ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n");
return (EBUSY);
}
ether_ifdetach(adapter->ifp);
/* Stop timer service */
ENA_LOCK_LOCK();
ENA_TIMER_DRAIN(adapter);
ENA_LOCK_UNLOCK();
/* Release metrics task */
while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL))
taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task);
taskqueue_free(adapter->metrics_tq);
/* Release reset task */
while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
taskqueue_free(adapter->reset_tq);
ENA_LOCK_LOCK();
ena_down(adapter);
ena_destroy_device(adapter, true);
ENA_LOCK_UNLOCK();
/* Restore unregistered sysctl queue nodes. */
ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues,
adapter->max_num_io_queues);
#ifdef DEV_NETMAP
netmap_detach(adapter->ifp);
#endif /* DEV_NETMAP */
ena_free_stats(adapter);
rc = ena_free_rx_dma_tag(adapter);
if (unlikely(rc != 0))
ena_log(adapter->pdev, WARN,
"Unmapped RX DMA tag associations\n");
rc = ena_free_tx_dma_tag(adapter);
if (unlikely(rc != 0))
ena_log(adapter->pdev, WARN,
"Unmapped TX DMA tag associations\n");
ena_free_irqs(adapter);
ena_free_pci_resources(adapter);
if (adapter->rss_indir != NULL)
free(adapter->rss_indir, M_DEVBUF);
if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
ena_com_rss_destroy(ena_dev);
ena_com_delete_host_info(ena_dev);
free(adapter->customer_metrics_array, M_DEVBUF);
ena_com_delete_customer_metrics_buffer(ena_dev);
if_free(adapter->ifp);
free(ena_dev->bus, M_DEVBUF);
free(ena_dev, M_DEVBUF);
return (bus_generic_detach(pdev));
}
/******************************************************************************
******************************** AENQ Handlers *******************************
*****************************************************************************/
/**
* ena_update_on_link_change:
* Notify the network interface about the change in link status
**/
static void
ena_update_on_link_change(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_aenq_link_change_desc *aenq_desc;
int status;
if_t ifp;
aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
ifp = adapter->ifp;
status = aenq_desc->flags &
ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
if (status != 0) {
ena_log(adapter->pdev, INFO, "link is UP\n");
ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
if_link_state_change(ifp, LINK_STATE_UP);
} else {
ena_log(adapter->pdev, INFO, "link is DOWN\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
}
}
static void
ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_ena_hw_hints *hints;
ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
adapter->ena_dev, "Invalid group(%x) expected %x\n",
aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION);
switch (aenq_e->aenq_common_desc.syndrome) {
case ENA_ADMIN_UPDATE_HINTS:
hints =
(struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
ena_update_hints(adapter, hints);
break;
default:
ena_log(adapter->pdev, ERR,
"Invalid aenq notification link state %d\n",
aenq_e->aenq_common_desc.syndrome);
}
}
static void
ena_lock_init(void *arg)
{
ENA_LOCK_INIT();
}
SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL);
static void
ena_lock_uninit(void *arg)
{
ENA_LOCK_DESTROY();
}
SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL);
/**
* This handler will called for unknown event group or unimplemented handlers
**/
static void
unimplemented_aenq_handler(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
ena_log(adapter->pdev, ERR,
"Unknown event was received or event with unimplemented handler\n");
}
static struct ena_aenq_handlers aenq_handlers = {
.handlers = {
[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
[ENA_ADMIN_NOTIFICATION] = ena_notification,
[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
},
.unimplemented_handler = unimplemented_aenq_handler
};
/*********************************************************************
* FreeBSD Device Interface Entry Points
*********************************************************************/
static device_method_t ena_methods[] = { /* Device interface */
DEVMETHOD(device_probe, ena_probe),
DEVMETHOD(device_attach, ena_attach),
DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END
};
static driver_t ena_driver = {
"ena",
ena_methods,
sizeof(struct ena_adapter),
};
DRIVER_MODULE(ena, pci, ena_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
nitems(ena_vendor_info_array) - 1);
MODULE_DEPEND(ena, pci, 1, 1, 1);
MODULE_DEPEND(ena, ether, 1, 1, 1);
#ifdef DEV_NETMAP
MODULE_DEPEND(ena, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
/*********************************************************************/
diff --git a/sys/dev/et/if_et.c b/sys/dev/et/if_et.c
index e6d73ab3c860..dedd2218b4a8 100644
--- a/sys/dev/et/if_et.c
+++ b/sys/dev/et/if_et.c
@@ -1,2736 +1,2731 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Sepherosa Ziehau <sepherosa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of The DragonFly Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific, prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/bpf.h>
#include <net/if_arp.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/et/if_etreg.h>
#include <dev/et/if_etvar.h>
#include "miibus_if.h"
MODULE_DEPEND(et, pci, 1, 1, 1);
MODULE_DEPEND(et, ether, 1, 1, 1);
MODULE_DEPEND(et, miibus, 1, 1, 1);
/* Tunables. */
static int msi_disable = 0;
TUNABLE_INT("hw.et.msi_disable", &msi_disable);
#define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
static int et_probe(device_t);
static int et_attach(device_t);
static int et_detach(device_t);
static int et_shutdown(device_t);
static int et_suspend(device_t);
static int et_resume(device_t);
static int et_miibus_readreg(device_t, int, int);
static int et_miibus_writereg(device_t, int, int, int);
static void et_miibus_statchg(device_t);
static void et_init_locked(struct et_softc *);
static void et_init(void *);
static int et_ioctl(if_t, u_long, caddr_t);
static void et_start_locked(if_t);
static void et_start(if_t);
static int et_watchdog(struct et_softc *);
static int et_ifmedia_upd_locked(if_t);
static int et_ifmedia_upd(if_t);
static void et_ifmedia_sts(if_t, struct ifmediareq *);
static uint64_t et_get_counter(if_t, ift_counter);
static void et_add_sysctls(struct et_softc *);
static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
static void et_intr(void *);
static void et_rxeof(struct et_softc *);
static void et_txeof(struct et_softc *);
static int et_dma_alloc(struct et_softc *);
static void et_dma_free(struct et_softc *);
static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
const char *);
static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
bus_dmamap_t, bus_addr_t *);
static void et_init_tx_ring(struct et_softc *);
static int et_init_rx_ring(struct et_softc *);
static void et_free_tx_ring(struct et_softc *);
static void et_free_rx_ring(struct et_softc *);
static int et_encap(struct et_softc *, struct mbuf **);
static int et_newbuf_cluster(struct et_rxbuf_data *, int);
static int et_newbuf_hdr(struct et_rxbuf_data *, int);
static void et_rxbuf_discard(struct et_rxbuf_data *, int);
static void et_stop(struct et_softc *);
static int et_chip_init(struct et_softc *);
static void et_chip_attach(struct et_softc *);
static void et_init_mac(struct et_softc *);
static void et_init_rxmac(struct et_softc *);
static void et_init_txmac(struct et_softc *);
static int et_init_rxdma(struct et_softc *);
static int et_init_txdma(struct et_softc *);
static int et_start_rxdma(struct et_softc *);
static int et_start_txdma(struct et_softc *);
static int et_stop_rxdma(struct et_softc *);
static int et_stop_txdma(struct et_softc *);
static void et_reset(struct et_softc *);
static int et_bus_config(struct et_softc *);
static void et_get_eaddr(device_t, uint8_t[]);
static void et_setmulti(struct et_softc *);
static void et_tick(void *);
static void et_stats_update(struct et_softc *);
static const struct et_dev {
uint16_t vid;
uint16_t did;
const char *desc;
} et_devices[] = {
{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
"Agere ET1310 Gigabit Ethernet" },
{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
"Agere ET1310 Fast Ethernet" },
{ 0, 0, NULL }
};
static device_method_t et_methods[] = {
DEVMETHOD(device_probe, et_probe),
DEVMETHOD(device_attach, et_attach),
DEVMETHOD(device_detach, et_detach),
DEVMETHOD(device_shutdown, et_shutdown),
DEVMETHOD(device_suspend, et_suspend),
DEVMETHOD(device_resume, et_resume),
DEVMETHOD(miibus_readreg, et_miibus_readreg),
DEVMETHOD(miibus_writereg, et_miibus_writereg),
DEVMETHOD(miibus_statchg, et_miibus_statchg),
DEVMETHOD_END
};
static driver_t et_driver = {
"et",
et_methods,
sizeof(struct et_softc)
};
DRIVER_MODULE(et, pci, et_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, et, et_devices,
nitems(et_devices) - 1);
DRIVER_MODULE(miibus, et, miibus_driver, 0, 0);
static int et_rx_intr_npkts = 32;
static int et_rx_intr_delay = 20; /* x10 usec */
static int et_tx_intr_nsegs = 126;
static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
TUNABLE_INT("hw.et.timer", &et_timer);
TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
static int
et_probe(device_t dev)
{
const struct et_dev *d;
uint16_t did, vid;
vid = pci_get_vendor(dev);
did = pci_get_device(dev);
for (d = et_devices; d->desc != NULL; ++d) {
if (vid == d->vid && did == d->did) {
device_set_desc(dev, d->desc);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
et_attach(device_t dev)
{
struct et_softc *sc;
if_t ifp;
uint8_t eaddr[ETHER_ADDR_LEN];
uint32_t pmcfg;
int cap, error, msic;
sc = device_get_softc(dev);
sc->dev = dev;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/*
* Initialize tunables
*/
sc->sc_rx_intr_npkts = et_rx_intr_npkts;
sc->sc_rx_intr_delay = et_rx_intr_delay;
sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
sc->sc_timer = et_timer;
/* Enable bus mastering */
pci_enable_busmaster(dev);
/*
* Allocate IO memory
*/
sc->sc_mem_rid = PCIR_BAR(0);
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->sc_mem_rid, RF_ACTIVE);
if (sc->sc_mem_res == NULL) {
device_printf(dev, "can't allocate IO memory\n");
return (ENXIO);
}
msic = 0;
if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
sc->sc_expcap = cap;
sc->sc_flags |= ET_FLAG_PCIE;
msic = pci_msi_count(dev);
if (bootverbose)
device_printf(dev, "MSI count: %d\n", msic);
}
if (msic > 0 && msi_disable == 0) {
msic = 1;
if (pci_alloc_msi(dev, &msic) == 0) {
if (msic == 1) {
device_printf(dev, "Using %d MSI message\n",
msic);
sc->sc_flags |= ET_FLAG_MSI;
} else
pci_release_msi(dev);
}
}
/*
* Allocate IRQ
*/
if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
sc->sc_irq_rid = 0;
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
} else {
sc->sc_irq_rid = 1;
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&sc->sc_irq_rid, RF_ACTIVE);
}
if (sc->sc_irq_res == NULL) {
device_printf(dev, "can't allocate irq\n");
error = ENXIO;
goto fail;
}
if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
sc->sc_flags |= ET_FLAG_FASTETHER;
error = et_bus_config(sc);
if (error)
goto fail;
et_get_eaddr(dev, eaddr);
/* Take PHY out of COMA and enable clocks. */
pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
pmcfg |= EM_PM_GIGEPHY_ENB;
CSR_WRITE_4(sc, ET_PM, pmcfg);
et_reset(sc);
error = et_dma_alloc(sc);
if (error)
goto fail;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, et_init);
if_setioctlfn(ifp, et_ioctl);
if_setstartfn(ifp, et_start);
if_setgetcounterfn(ifp, et_get_counter);
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_VLAN_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setsendqlen(ifp, ET_TX_NDESC - 1);
if_setsendqready(ifp);
et_chip_attach(sc);
error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
MIIF_DOPAUSE);
if (error) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, eaddr);
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
NULL, et_intr, sc, &sc->sc_irq_handle);
if (error) {
ether_ifdetach(ifp);
device_printf(dev, "can't setup intr\n");
goto fail;
}
et_add_sysctls(sc);
return (0);
fail:
et_detach(dev);
return (error);
}
static int
et_detach(device_t dev)
{
struct et_softc *sc;
sc = device_get_softc(dev);
if (device_is_attached(dev)) {
ether_ifdetach(sc->ifp);
ET_LOCK(sc);
et_stop(sc);
ET_UNLOCK(sc);
callout_drain(&sc->sc_tick);
}
if (sc->sc_miibus != NULL)
device_delete_child(dev, sc->sc_miibus);
bus_generic_detach(dev);
if (sc->sc_irq_handle != NULL)
bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
if (sc->sc_irq_res != NULL)
bus_release_resource(dev, SYS_RES_IRQ,
rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
if ((sc->sc_flags & ET_FLAG_MSI) != 0)
pci_release_msi(dev);
if (sc->sc_mem_res != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
if (sc->ifp != NULL)
if_free(sc->ifp);
et_dma_free(sc);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static int
et_shutdown(device_t dev)
{
struct et_softc *sc;
sc = device_get_softc(dev);
ET_LOCK(sc);
et_stop(sc);
ET_UNLOCK(sc);
return (0);
}
static int
et_miibus_readreg(device_t dev, int phy, int reg)
{
struct et_softc *sc;
uint32_t val;
int i, ret;
sc = device_get_softc(dev);
/* Stop any pending operations */
CSR_WRITE_4(sc, ET_MII_CMD, 0);
val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
CSR_WRITE_4(sc, ET_MII_ADDR, val);
/* Start reading */
CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
#define NRETRY 50
for (i = 0; i < NRETRY; ++i) {
val = CSR_READ_4(sc, ET_MII_IND);
if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
break;
DELAY(50);
}
if (i == NRETRY) {
if_printf(sc->ifp,
"read phy %d, reg %d timed out\n", phy, reg);
ret = 0;
goto back;
}
#undef NRETRY
val = CSR_READ_4(sc, ET_MII_STAT);
ret = val & ET_MII_STAT_VALUE_MASK;
back:
/* Make sure that the current operation is stopped */
CSR_WRITE_4(sc, ET_MII_CMD, 0);
return (ret);
}
static int
et_miibus_writereg(device_t dev, int phy, int reg, int val0)
{
struct et_softc *sc;
uint32_t val;
int i;
sc = device_get_softc(dev);
/* Stop any pending operations */
CSR_WRITE_4(sc, ET_MII_CMD, 0);
val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
CSR_WRITE_4(sc, ET_MII_ADDR, val);
/* Start writing */
CSR_WRITE_4(sc, ET_MII_CTRL,
(val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
#define NRETRY 100
for (i = 0; i < NRETRY; ++i) {
val = CSR_READ_4(sc, ET_MII_IND);
if ((val & ET_MII_IND_BUSY) == 0)
break;
DELAY(50);
}
if (i == NRETRY) {
if_printf(sc->ifp,
"write phy %d, reg %d timed out\n", phy, reg);
et_miibus_readreg(dev, phy, reg);
}
#undef NRETRY
/* Make sure that the current operation is stopped */
CSR_WRITE_4(sc, ET_MII_CMD, 0);
return (0);
}
static void
et_miibus_statchg(device_t dev)
{
struct et_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t cfg1, cfg2, ctrl;
int i;
sc = device_get_softc(dev);
mii = device_get_softc(sc->sc_miibus);
ifp = sc->ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->sc_flags &= ~ET_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->sc_flags |= ET_FLAG_LINK;
break;
case IFM_1000_T:
if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
sc->sc_flags |= ET_FLAG_LINK;
break;
}
}
/* XXX Stop TX/RX MAC? */
if ((sc->sc_flags & ET_FLAG_LINK) == 0)
return;
/* Program MACs with resolved speed/duplex/flow-control. */
ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
ET_MAC_CFG1_LOOPBACK);
cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
ET_MAC_CFG2_PREAMBLE_LEN_MASK);
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
cfg2 |= ET_MAC_CFG2_MODE_GMII;
else {
cfg2 |= ET_MAC_CFG2_MODE_MII;
ctrl |= ET_MAC_CTRL_MODE_MII;
}
if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
cfg2 |= ET_MAC_CFG2_FDX;
/*
* Controller lacks automatic TX pause frame
* generation so it should be handled by driver.
* Even though driver can send pause frame with
* arbitrary pause time, controller does not
* provide a way that tells how many free RX
* buffers are available in controller. This
* limitation makes it hard to generate XON frame
* in time on driver side so don't enable TX flow
* control.
*/
#ifdef notyet
if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
cfg1 |= ET_MAC_CFG1_TXFLOW;
#endif
if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
cfg1 |= ET_MAC_CFG1_RXFLOW;
} else
ctrl |= ET_MAC_CTRL_GHDX;
CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
#define NRETRY 50
for (i = 0; i < NRETRY; ++i) {
cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
(ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
break;
DELAY(100);
}
if (i == NRETRY)
if_printf(ifp, "can't enable RX/TX\n");
sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
#undef NRETRY
}
static int
et_ifmedia_upd_locked(if_t ifp)
{
struct et_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->sc_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
return (mii_mediachg(mii));
}
static int
et_ifmedia_upd(if_t ifp)
{
struct et_softc *sc;
int res;
sc = if_getsoftc(ifp);
ET_LOCK(sc);
res = et_ifmedia_upd_locked(ifp);
ET_UNLOCK(sc);
return (res);
}
static void
et_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct et_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
ET_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
ET_UNLOCK(sc);
return;
}
mii = device_get_softc(sc->sc_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
ET_UNLOCK(sc);
}
static void
et_stop(struct et_softc *sc)
{
if_t ifp;
ET_LOCK_ASSERT(sc);
ifp = sc->ifp;
callout_stop(&sc->sc_tick);
/* Disable interrupts. */
CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
DELAY(100);
et_stop_rxdma(sc);
et_stop_txdma(sc);
et_stats_update(sc);
et_free_tx_ring(sc);
et_free_rx_ring(sc);
sc->sc_tx = 0;
sc->sc_tx_intr = 0;
sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
sc->watchdog_timer = 0;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
static int
et_bus_config(struct et_softc *sc)
{
uint32_t val, max_plsz;
uint16_t ack_latency, replay_timer;
/*
* Test whether EEPROM is valid
* NOTE: Read twice to get the correct value
*/
pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
return (ENXIO);
}
/* TODO: LED */
if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
return (0);
/*
* Configure ACK latency and replay timer according to
* max playload size
*/
val = pci_read_config(sc->dev,
sc->sc_expcap + PCIER_DEVICE_CAP, 4);
max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
switch (max_plsz) {
case ET_PCIV_DEVICE_CAPS_PLSZ_128:
ack_latency = ET_PCIV_ACK_LATENCY_128;
replay_timer = ET_PCIV_REPLAY_TIMER_128;
break;
case ET_PCIV_DEVICE_CAPS_PLSZ_256:
ack_latency = ET_PCIV_ACK_LATENCY_256;
replay_timer = ET_PCIV_REPLAY_TIMER_256;
break;
default:
ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
replay_timer = pci_read_config(sc->dev,
ET_PCIR_REPLAY_TIMER, 2);
device_printf(sc->dev, "ack latency %u, replay timer %u\n",
ack_latency, replay_timer);
break;
}
if (ack_latency != 0) {
pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
2);
}
/*
* Set L0s and L1 latency timer to 2us
*/
val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
/* L0s exit latency : 2us */
val |= 0x00005000;
/* L1 exit latency : 2us */
val |= 0x00028000;
pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
/*
* Set max read request size to 2048 bytes
*/
pci_set_max_read_req(sc->dev, 2048);
return (0);
}
static void
et_get_eaddr(device_t dev, uint8_t eaddr[])
{
uint32_t val;
int i;
val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
for (i = 0; i < 4; ++i)
eaddr[i] = (val >> (8 * i)) & 0xff;
val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
for (; i < ETHER_ADDR_LEN; ++i)
eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
}
static void
et_reset(struct et_softc *sc)
{
CSR_WRITE_4(sc, ET_MAC_CFG1,
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
CSR_WRITE_4(sc, ET_SWRST,
ET_SWRST_TXDMA | ET_SWRST_RXDMA |
ET_SWRST_TXMAC | ET_SWRST_RXMAC |
ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
CSR_WRITE_4(sc, ET_MAC_CFG1,
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
/* Disable interrupts. */
CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
}
struct et_dmamap_arg {
bus_addr_t et_busaddr;
};
static void
et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct et_dmamap_arg *ctx;
if (error)
return;
KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
ctx = arg;
ctx->et_busaddr = segs->ds_addr;
}
static int
et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
const char *msg)
{
struct et_dmamap_arg ctx;
int error;
error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
tag);
if (error != 0) {
device_printf(sc->dev, "could not create %s dma tag\n", msg);
return (error);
}
/* Allocate DMA'able memory for ring. */
error = bus_dmamem_alloc(*tag, (void **)ring,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate DMA'able memory for %s\n", msg);
return (error);
}
/* Load the address of the ring. */
ctx.et_busaddr = 0;
error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
&ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->dev,
"could not load DMA'able memory for %s\n", msg);
return (error);
}
*paddr = ctx.et_busaddr;
return (0);
}
static void
et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
bus_dmamap_t map, bus_addr_t *paddr)
{
if (*paddr != 0) {
bus_dmamap_unload(*tag, map);
*paddr = 0;
}
if (*ring != NULL) {
bus_dmamem_free(*tag, *ring, map);
*ring = NULL;
}
if (*tag) {
bus_dma_tag_destroy(*tag);
*tag = NULL;
}
}
static int
et_dma_alloc(struct et_softc *sc)
{
struct et_txdesc_ring *tx_ring;
struct et_rxdesc_ring *rx_ring;
struct et_rxstat_ring *rxst_ring;
struct et_rxstatus_data *rxsd;
struct et_rxbuf_data *rbd;
struct et_txbuf_data *tbd;
struct et_txstatus_data *txsd;
int i, error;
error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
&sc->sc_dtag);
if (error != 0) {
device_printf(sc->dev, "could not allocate parent dma tag\n");
return (error);
}
/* TX ring. */
tx_ring = &sc->sc_tx_ring;
error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
&tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
&tx_ring->tr_paddr, "TX ring");
if (error)
return (error);
/* TX status block. */
txsd = &sc->sc_tx_status;
error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
&txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
&txsd->txsd_paddr, "TX status block");
if (error)
return (error);
/* RX ring 0, used as to recive small sized frames. */
rx_ring = &sc->sc_rx_ring[0];
error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
&rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
&rx_ring->rr_paddr, "RX ring 0");
rx_ring->rr_posreg = ET_RX_RING0_POS;
if (error)
return (error);
/* RX ring 1, used as to store normal sized frames. */
rx_ring = &sc->sc_rx_ring[1];
error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
&rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
&rx_ring->rr_paddr, "RX ring 1");
rx_ring->rr_posreg = ET_RX_RING1_POS;
if (error)
return (error);
/* RX stat ring. */
rxst_ring = &sc->sc_rxstat_ring;
error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
&rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
&rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
if (error)
return (error);
/* RX status block. */
rxsd = &sc->sc_rx_status;
error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
(uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
&rxsd->rxsd_paddr, "RX status block");
if (error)
return (error);
/* Create parent DMA tag for mbufs. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
&sc->sc_mbuf_dtag);
if (error != 0) {
device_printf(sc->dev,
"could not allocate parent dma tag for mbuf\n");
return (error);
}
/* Create DMA tag for mini RX mbufs to use RX ring 0. */
error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
if (error) {
device_printf(sc->dev, "could not create mini RX dma tag\n");
return (error);
}
/* Create DMA tag for standard RX mbufs to use RX ring 1. */
error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
if (error) {
device_printf(sc->dev, "could not create RX dma tag\n");
return (error);
}
/* Create DMA tag for TX mbufs. */
error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
&sc->sc_tx_tag);
if (error) {
device_printf(sc->dev, "could not create TX dma tag\n");
return (error);
}
/* Initialize RX ring 0. */
rbd = &sc->sc_rx_data[0];
rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
rbd->rbd_newbuf = et_newbuf_hdr;
rbd->rbd_discard = et_rxbuf_discard;
rbd->rbd_softc = sc;
rbd->rbd_ring = &sc->sc_rx_ring[0];
/* Create DMA maps for mini RX buffers, ring 0. */
for (i = 0; i < ET_RX_NDESC; i++) {
error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
&rbd->rbd_buf[i].rb_dmap);
if (error) {
device_printf(sc->dev,
"could not create DMA map for mini RX mbufs\n");
return (error);
}
}
/* Create a spare DMA map for mini RX buffers, ring 0. */
error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
&sc->sc_rx_mini_sparemap);
if (error) {
device_printf(sc->dev,
"could not create spare DMA map for mini RX mbuf\n");
return (error);
}
/* Initialize RX ring 1. */
rbd = &sc->sc_rx_data[1];
rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
rbd->rbd_newbuf = et_newbuf_cluster;
rbd->rbd_discard = et_rxbuf_discard;
rbd->rbd_softc = sc;
rbd->rbd_ring = &sc->sc_rx_ring[1];
/* Create DMA maps for standard RX buffers, ring 1. */
for (i = 0; i < ET_RX_NDESC; i++) {
error = bus_dmamap_create(sc->sc_rx_tag, 0,
&rbd->rbd_buf[i].rb_dmap);
if (error) {
device_printf(sc->dev,
"could not create DMA map for mini RX mbufs\n");
return (error);
}
}
/* Create a spare DMA map for standard RX buffers, ring 1. */
error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
if (error) {
device_printf(sc->dev,
"could not create spare DMA map for RX mbuf\n");
return (error);
}
/* Create DMA maps for TX buffers. */
tbd = &sc->sc_tx_data;
for (i = 0; i < ET_TX_NDESC; i++) {
error = bus_dmamap_create(sc->sc_tx_tag, 0,
&tbd->tbd_buf[i].tb_dmap);
if (error) {
device_printf(sc->dev,
"could not create DMA map for TX mbufs\n");
return (error);
}
}
return (0);
}
static void
et_dma_free(struct et_softc *sc)
{
struct et_txdesc_ring *tx_ring;
struct et_rxdesc_ring *rx_ring;
struct et_txstatus_data *txsd;
struct et_rxstat_ring *rxst_ring;
struct et_rxbuf_data *rbd;
struct et_txbuf_data *tbd;
int i;
/* Destroy DMA maps for mini RX buffers, ring 0. */
rbd = &sc->sc_rx_data[0];
for (i = 0; i < ET_RX_NDESC; i++) {
if (rbd->rbd_buf[i].rb_dmap) {
bus_dmamap_destroy(sc->sc_rx_mini_tag,
rbd->rbd_buf[i].rb_dmap);
rbd->rbd_buf[i].rb_dmap = NULL;
}
}
if (sc->sc_rx_mini_sparemap) {
bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
sc->sc_rx_mini_sparemap = NULL;
}
if (sc->sc_rx_mini_tag) {
bus_dma_tag_destroy(sc->sc_rx_mini_tag);
sc->sc_rx_mini_tag = NULL;
}
/* Destroy DMA maps for standard RX buffers, ring 1. */
rbd = &sc->sc_rx_data[1];
for (i = 0; i < ET_RX_NDESC; i++) {
if (rbd->rbd_buf[i].rb_dmap) {
bus_dmamap_destroy(sc->sc_rx_tag,
rbd->rbd_buf[i].rb_dmap);
rbd->rbd_buf[i].rb_dmap = NULL;
}
}
if (sc->sc_rx_sparemap) {
bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
sc->sc_rx_sparemap = NULL;
}
if (sc->sc_rx_tag) {
bus_dma_tag_destroy(sc->sc_rx_tag);
sc->sc_rx_tag = NULL;
}
/* Destroy DMA maps for TX buffers. */
tbd = &sc->sc_tx_data;
for (i = 0; i < ET_TX_NDESC; i++) {
if (tbd->tbd_buf[i].tb_dmap) {
bus_dmamap_destroy(sc->sc_tx_tag,
tbd->tbd_buf[i].tb_dmap);
tbd->tbd_buf[i].tb_dmap = NULL;
}
}
if (sc->sc_tx_tag) {
bus_dma_tag_destroy(sc->sc_tx_tag);
sc->sc_tx_tag = NULL;
}
/* Destroy mini RX ring, ring 0. */
rx_ring = &sc->sc_rx_ring[0];
et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
rx_ring->rr_dmap, &rx_ring->rr_paddr);
/* Destroy standard RX ring, ring 1. */
rx_ring = &sc->sc_rx_ring[1];
et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
rx_ring->rr_dmap, &rx_ring->rr_paddr);
/* Destroy RX stat ring. */
rxst_ring = &sc->sc_rxstat_ring;
et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
/* Destroy RX status block. */
et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
/* Destroy TX ring. */
tx_ring = &sc->sc_tx_ring;
et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
tx_ring->tr_dmap, &tx_ring->tr_paddr);
/* Destroy TX status block. */
txsd = &sc->sc_tx_status;
et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
txsd->txsd_dmap, &txsd->txsd_paddr);
/* Destroy the parent tag. */
if (sc->sc_dtag) {
bus_dma_tag_destroy(sc->sc_dtag);
sc->sc_dtag = NULL;
}
}
static void
et_chip_attach(struct et_softc *sc)
{
uint32_t val;
/*
* Perform minimal initialization
*/
/* Disable loopback */
CSR_WRITE_4(sc, ET_LOOPBACK, 0);
/* Reset MAC */
CSR_WRITE_4(sc, ET_MAC_CFG1,
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
/*
* Setup half duplex mode
*/
val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
(15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
(55 << ET_MAC_HDX_COLLWIN_SHIFT) |
ET_MAC_HDX_EXC_DEFER;
CSR_WRITE_4(sc, ET_MAC_HDX, val);
/* Clear MAC control */
CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
/* Reset MII */
CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
/* Bring MAC out of reset state */
CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
/* Enable memory controllers */
CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
}
static void
et_intr(void *xsc)
{
struct et_softc *sc;
if_t ifp;
uint32_t status;
sc = xsc;
ET_LOCK(sc);
ifp = sc->ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
goto done;
status = CSR_READ_4(sc, ET_INTR_STATUS);
if ((status & ET_INTRS) == 0)
goto done;
/* Disable further interrupts. */
CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
status);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
et_init_locked(sc);
ET_UNLOCK(sc);
return;
}
if (status & ET_INTR_RXDMA)
et_rxeof(sc);
if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
et_txeof(sc);
if (status & ET_INTR_TIMER)
CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
if (!if_sendq_empty(ifp))
et_start_locked(ifp);
}
done:
ET_UNLOCK(sc);
}
static void
et_init_locked(struct et_softc *sc)
{
if_t ifp;
int error;
ET_LOCK_ASSERT(sc);
ifp = sc->ifp;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
et_stop(sc);
et_reset(sc);
et_init_tx_ring(sc);
error = et_init_rx_ring(sc);
if (error)
return;
error = et_chip_init(sc);
if (error)
goto fail;
/*
* Start TX/RX DMA engine
*/
error = et_start_rxdma(sc);
if (error)
return;
error = et_start_txdma(sc);
if (error)
return;
/* Enable interrupts. */
CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->sc_flags &= ~ET_FLAG_LINK;
et_ifmedia_upd_locked(ifp);
callout_reset(&sc->sc_tick, hz, et_tick, sc);
fail:
if (error)
et_stop(sc);
}
static void
et_init(void *xsc)
{
struct et_softc *sc = xsc;
ET_LOCK(sc);
et_init_locked(sc);
ET_UNLOCK(sc);
}
static int
et_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct et_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int error, mask, max_framelen;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
/* XXX LOCKSUSED */
switch (cmd) {
case SIOCSIFFLAGS:
ET_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ sc->sc_if_flags) &
(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
et_setmulti(sc);
} else {
et_init_locked(sc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
et_stop(sc);
}
sc->sc_if_flags = if_getflags(ifp);
ET_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->sc_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
ET_LOCK(sc);
et_setmulti(sc);
ET_UNLOCK(sc);
}
break;
case SIOCSIFMTU:
ET_LOCK(sc);
#if 0
if (sc->sc_flags & ET_FLAG_JUMBO)
max_framelen = ET_JUMBO_FRAMELEN;
else
#endif
max_framelen = MCLBYTES - 1;
if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
error = EOPNOTSUPP;
ET_UNLOCK(sc);
break;
}
if (if_getmtu(ifp) != ifr->ifr_mtu) {
if_setmtu(ifp, ifr->ifr_mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
et_init_locked(sc);
}
}
ET_UNLOCK(sc);
break;
case SIOCSIFCAP:
ET_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, ET_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, ET_CSUM_FEATURES);
}
ET_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
et_start_locked(if_t ifp)
{
struct et_softc *sc;
struct mbuf *m_head = NULL;
struct et_txdesc_ring *tx_ring;
struct et_txbuf_data *tbd;
uint32_t tx_ready_pos;
int enq;
sc = if_getsoftc(ifp);
ET_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING ||
(sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
(ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
return;
/*
* Driver does not request TX completion interrupt for every
* queued frames to prevent generating excessive interrupts.
* This means driver may wait for TX completion interrupt even
* though some frames were successfully transmitted. Reclaiming
* transmitted frames will ensure driver see all available
* descriptors.
*/
tbd = &sc->sc_tx_data;
if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
et_txeof(sc);
for (enq = 0; !if_sendq_empty(ifp); ) {
if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
if (et_encap(sc, &m_head)) {
if (m_head == NULL) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
break;
}
if_sendq_prepend(ifp, m_head);
if (tbd->tbd_used > 0)
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
tx_ring = &sc->sc_tx_ring;
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
BUS_DMASYNC_PREWRITE);
tx_ready_pos = tx_ring->tr_ready_index &
ET_TX_READY_POS_INDEX_MASK;
if (tx_ring->tr_ready_wrap)
tx_ready_pos |= ET_TX_READY_POS_WRAP;
CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
sc->watchdog_timer = 5;
}
}
static void
et_start(if_t ifp)
{
struct et_softc *sc;
sc = if_getsoftc(ifp);
ET_LOCK(sc);
et_start_locked(ifp);
ET_UNLOCK(sc);
}
static int
et_watchdog(struct et_softc *sc)
{
uint32_t status;
ET_LOCK_ASSERT(sc);
if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
return (0);
bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
BUS_DMASYNC_POSTREAD);
status = le32toh(*(sc->sc_tx_status.txsd_status));
if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
status);
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
et_init_locked(sc);
return (EJUSTRETURN);
}
static int
et_stop_rxdma(struct et_softc *sc)
{
CSR_WRITE_4(sc, ET_RXDMA_CTRL,
ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
DELAY(5);
if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
if_printf(sc->ifp, "can't stop RX DMA engine\n");
return (ETIMEDOUT);
}
return (0);
}
static int
et_stop_txdma(struct et_softc *sc)
{
CSR_WRITE_4(sc, ET_TXDMA_CTRL,
ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
return (0);
}
static void
et_free_tx_ring(struct et_softc *sc)
{
struct et_txbuf_data *tbd;
struct et_txbuf *tb;
int i;
tbd = &sc->sc_tx_data;
for (i = 0; i < ET_TX_NDESC; ++i) {
tb = &tbd->tbd_buf[i];
if (tb->tb_mbuf != NULL) {
bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
m_freem(tb->tb_mbuf);
tb->tb_mbuf = NULL;
}
}
}
static void
et_free_rx_ring(struct et_softc *sc)
{
struct et_rxbuf_data *rbd;
struct et_rxdesc_ring *rx_ring;
struct et_rxbuf *rb;
int i;
/* Ring 0 */
rx_ring = &sc->sc_rx_ring[0];
rbd = &sc->sc_rx_data[0];
for (i = 0; i < ET_RX_NDESC; ++i) {
rb = &rbd->rbd_buf[i];
if (rb->rb_mbuf != NULL) {
bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
m_freem(rb->rb_mbuf);
rb->rb_mbuf = NULL;
}
}
/* Ring 1 */
rx_ring = &sc->sc_rx_ring[1];
rbd = &sc->sc_rx_data[1];
for (i = 0; i < ET_RX_NDESC; ++i) {
rb = &rbd->rbd_buf[i];
if (rb->rb_mbuf != NULL) {
bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
m_freem(rb->rb_mbuf);
rb->rb_mbuf = NULL;
}
}
}
static u_int
et_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t h, *hp, *hash = arg;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
h = (h & 0x3f800000) >> 23;
hp = &hash[0];
if (h >= 32 && h < 64) {
h -= 32;
hp = &hash[1];
} else if (h >= 64 && h < 96) {
h -= 64;
hp = &hash[2];
} else if (h >= 96) {
h -= 96;
hp = &hash[3];
}
*hp |= (1 << h);
return (1);
}
static void
et_setmulti(struct et_softc *sc)
{
if_t ifp;
uint32_t hash[4] = { 0, 0, 0, 0 };
uint32_t rxmac_ctrl, pktfilt;
int i, count;
ET_LOCK_ASSERT(sc);
ifp = sc->ifp;
pktfilt = CSR_READ_4(sc, ET_PKTFILT);
rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
if (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) {
rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
goto back;
}
count = if_foreach_llmaddr(ifp, et_hash_maddr, &hash);
for (i = 0; i < 4; ++i)
CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
if (count > 0)
pktfilt |= ET_PKTFILT_MCAST;
rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
back:
CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
}
static int
et_chip_init(struct et_softc *sc)
{
if_t ifp;
uint32_t rxq_end;
int error, frame_len, rxmem_size;
ifp = sc->ifp;
/*
* Split 16Kbytes internal memory between TX and RX
* according to frame length.
*/
frame_len = ET_FRAMELEN(if_getmtu(ifp));
if (frame_len < 2048) {
rxmem_size = ET_MEM_RXSIZE_DEFAULT;
} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
rxmem_size = ET_MEM_SIZE / 2;
} else {
rxmem_size = ET_MEM_SIZE -
roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
}
rxq_end = ET_QUEUE_ADDR(rxmem_size);
CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
/* No loopback */
CSR_WRITE_4(sc, ET_LOOPBACK, 0);
/* Clear MSI configure */
if ((sc->sc_flags & ET_FLAG_MSI) == 0)
CSR_WRITE_4(sc, ET_MSI_CFG, 0);
/* Disable timer */
CSR_WRITE_4(sc, ET_TIMER, 0);
/* Initialize MAC */
et_init_mac(sc);
/* Enable memory controllers */
CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
/* Initialize RX MAC */
et_init_rxmac(sc);
/* Initialize TX MAC */
et_init_txmac(sc);
/* Initialize RX DMA engine */
error = et_init_rxdma(sc);
if (error)
return (error);
/* Initialize TX DMA engine */
error = et_init_txdma(sc);
if (error)
return (error);
return (0);
}
static void
et_init_tx_ring(struct et_softc *sc)
{
struct et_txdesc_ring *tx_ring;
struct et_txbuf_data *tbd;
struct et_txstatus_data *txsd;
tx_ring = &sc->sc_tx_ring;
bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
BUS_DMASYNC_PREWRITE);
tbd = &sc->sc_tx_data;
tbd->tbd_start_index = 0;
tbd->tbd_start_wrap = 0;
tbd->tbd_used = 0;
txsd = &sc->sc_tx_status;
bzero(txsd->txsd_status, sizeof(uint32_t));
bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
et_init_rx_ring(struct et_softc *sc)
{
struct et_rxstatus_data *rxsd;
struct et_rxstat_ring *rxst_ring;
struct et_rxbuf_data *rbd;
int i, error, n;
for (n = 0; n < ET_RX_NRING; ++n) {
rbd = &sc->sc_rx_data[n];
for (i = 0; i < ET_RX_NDESC; ++i) {
error = rbd->rbd_newbuf(rbd, i);
if (error) {
if_printf(sc->ifp, "%d ring %d buf, "
"newbuf failed: %d\n", n, i, error);
return (error);
}
}
}
rxsd = &sc->sc_rx_status;
bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
rxst_ring = &sc->sc_rxstat_ring;
bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static int
et_init_rxdma(struct et_softc *sc)
{
struct et_rxstatus_data *rxsd;
struct et_rxstat_ring *rxst_ring;
struct et_rxdesc_ring *rx_ring;
int error;
error = et_stop_rxdma(sc);
if (error) {
if_printf(sc->ifp, "can't init RX DMA engine\n");
return (error);
}
/*
* Install RX status
*/
rxsd = &sc->sc_rx_status;
CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
/*
* Install RX stat ring
*/
rxst_ring = &sc->sc_rxstat_ring;
CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
/* Match ET_RXSTAT_POS */
rxst_ring->rsr_index = 0;
rxst_ring->rsr_wrap = 0;
/*
* Install the 2nd RX descriptor ring
*/
rx_ring = &sc->sc_rx_ring[1];
CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
/* Match ET_RX_RING1_POS */
rx_ring->rr_index = 0;
rx_ring->rr_wrap = 1;
/*
* Install the 1st RX descriptor ring
*/
rx_ring = &sc->sc_rx_ring[0];
CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
/* Match ET_RX_RING0_POS */
rx_ring->rr_index = 0;
rx_ring->rr_wrap = 1;
/*
* RX intr moderation
*/
CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
return (0);
}
static int
et_init_txdma(struct et_softc *sc)
{
struct et_txdesc_ring *tx_ring;
struct et_txstatus_data *txsd;
int error;
error = et_stop_txdma(sc);
if (error) {
if_printf(sc->ifp, "can't init TX DMA engine\n");
return (error);
}
/*
* Install TX descriptor ring
*/
tx_ring = &sc->sc_tx_ring;
CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
/*
* Install TX status
*/
txsd = &sc->sc_tx_status;
CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
/* Match ET_TX_READY_POS */
tx_ring->tr_ready_index = 0;
tx_ring->tr_ready_wrap = 0;
return (0);
}
static void
et_init_mac(struct et_softc *sc)
{
if_t ifp;
const uint8_t *eaddr;
uint32_t val;
/* Reset MAC */
CSR_WRITE_4(sc, ET_MAC_CFG1,
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
/*
* Setup inter packet gap
*/
val = (56 << ET_IPG_NONB2B_1_SHIFT) |
(88 << ET_IPG_NONB2B_2_SHIFT) |
(80 << ET_IPG_MINIFG_SHIFT) |
(96 << ET_IPG_B2B_SHIFT);
CSR_WRITE_4(sc, ET_IPG, val);
/*
* Setup half duplex mode
*/
val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
(15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
(55 << ET_MAC_HDX_COLLWIN_SHIFT) |
ET_MAC_HDX_EXC_DEFER;
CSR_WRITE_4(sc, ET_MAC_HDX, val);
/* Clear MAC control */
CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
/* Reset MII */
CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
/*
* Set MAC address
*/
ifp = sc->ifp;
eaddr = if_getlladdr(ifp);
val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
val = (eaddr[0] << 16) | (eaddr[1] << 24);
CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
/* Set max frame length */
CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(if_getmtu(ifp)));
/* Bring MAC out of reset state */
CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
}
static void
et_init_rxmac(struct et_softc *sc)
{
if_t ifp;
const uint8_t *eaddr;
uint32_t val;
int i;
/* Disable RX MAC and WOL */
CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
/*
* Clear all WOL related registers
*/
for (i = 0; i < 3; ++i)
CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
for (i = 0; i < 20; ++i)
CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
/*
* Set WOL source address. XXX is this necessary?
*/
ifp = sc->ifp;
eaddr = if_getlladdr(ifp);
val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
val = (eaddr[0] << 8) | eaddr[1];
CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
/* Clear packet filters */
CSR_WRITE_4(sc, ET_PKTFILT, 0);
/* No ucast filtering */
CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
if (ET_FRAMELEN(if_getmtu(ifp)) > ET_RXMAC_CUT_THRU_FRMLEN) {
/*
* In order to transmit jumbo packets greater than
* ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
* RX MAC and RX DMA needs to be reduced in size to
* (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
* order to implement this, we must use "cut through"
* mode in the RX MAC, which chops packets down into
* segments. In this case we selected 256 bytes,
* since this is the size of the PCI-Express TLP's
* that the ET1310 uses.
*/
val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
ET_RXMAC_MC_SEGSZ_ENABLE;
} else {
val = 0;
}
CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
/* Initialize RX MAC management register */
CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
CSR_WRITE_4(sc, ET_RXMAC_MGT,
ET_RXMAC_MGT_PASS_ECRC |
ET_RXMAC_MGT_PASS_ELEN |
ET_RXMAC_MGT_PASS_ETRUNC |
ET_RXMAC_MGT_CHECK_PKT);
/*
* Configure runt filtering (may not work on certain chip generation)
*/
val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
ET_PKTFILT_MINLEN_MASK;
val |= ET_PKTFILT_FRAG;
CSR_WRITE_4(sc, ET_PKTFILT, val);
/* Enable RX MAC but leave WOL disabled */
CSR_WRITE_4(sc, ET_RXMAC_CTRL,
ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
/*
* Setup multicast hash and allmulti/promisc mode
*/
et_setmulti(sc);
}
static void
et_init_txmac(struct et_softc *sc)
{
/* Disable TX MAC and FC(?) */
CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
/*
* Initialize pause time.
* This register should be set before XON/XOFF frame is
* sent by driver.
*/
CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
/* Enable TX MAC but leave FC(?) disabled */
CSR_WRITE_4(sc, ET_TXMAC_CTRL,
ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
}
static int
et_start_rxdma(struct et_softc *sc)
{
uint32_t val;
val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
ET_RXDMA_CTRL_RING0_ENABLE;
val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
ET_RXDMA_CTRL_RING1_ENABLE;
CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
DELAY(5);
if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
if_printf(sc->ifp, "can't start RX DMA engine\n");
return (ETIMEDOUT);
}
return (0);
}
static int
et_start_txdma(struct et_softc *sc)
{
CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
return (0);
}
static void
et_rxeof(struct et_softc *sc)
{
struct et_rxstatus_data *rxsd;
struct et_rxstat_ring *rxst_ring;
struct et_rxbuf_data *rbd;
struct et_rxdesc_ring *rx_ring;
struct et_rxstat *st;
if_t ifp;
struct mbuf *m;
uint32_t rxstat_pos, rxring_pos;
uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
int buflen, buf_idx, npost[2], ring_idx;
int rxst_index, rxst_wrap;
ET_LOCK_ASSERT(sc);
ifp = sc->ifp;
rxsd = &sc->sc_rx_status;
rxst_ring = &sc->sc_rxstat_ring;
if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
return;
bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
BUS_DMASYNC_POSTREAD);
npost[0] = npost[1] = 0;
rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
ET_RXS_STATRING_INDEX_SHIFT;
while (rxst_index != rxst_ring->rsr_index ||
rxst_wrap != rxst_ring->rsr_wrap) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
rxst_info1 = le32toh(st->rxst_info1);
rxst_info2 = le32toh(st->rxst_info2);
buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
ET_RXST_INFO2_LEN_SHIFT;
buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
ET_RXST_INFO2_BUFIDX_SHIFT;
ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
ET_RXST_INFO2_RINGIDX_SHIFT;
if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
rxst_ring->rsr_index = 0;
rxst_ring->rsr_wrap ^= 1;
}
rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
if (rxst_ring->rsr_wrap)
rxstat_pos |= ET_RXSTAT_POS_WRAP;
CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
if (ring_idx >= ET_RX_NRING) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if_printf(ifp, "invalid ring index %d\n", ring_idx);
continue;
}
if (buf_idx >= ET_RX_NDESC) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if_printf(ifp, "invalid buf index %d\n", buf_idx);
continue;
}
rbd = &sc->sc_rx_data[ring_idx];
m = rbd->rbd_buf[buf_idx].rb_mbuf;
if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
/* Discard errored frame. */
rbd->rbd_discard(rbd, buf_idx);
} else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
/* No available mbufs, discard it. */
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
rbd->rbd_discard(rbd, buf_idx);
} else {
buflen -= ETHER_CRC_LEN;
if (buflen < ETHER_HDR_LEN) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
} else {
m->m_pkthdr.len = m->m_len = buflen;
m->m_pkthdr.rcvif = ifp;
ET_UNLOCK(sc);
if_input(ifp, m);
ET_LOCK(sc);
}
}
rx_ring = &sc->sc_rx_ring[ring_idx];
if (buf_idx != rx_ring->rr_index) {
if_printf(ifp,
"WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
ring_idx, buf_idx, rx_ring->rr_index);
}
MPASS(rx_ring->rr_index < ET_RX_NDESC);
if (++rx_ring->rr_index == ET_RX_NDESC) {
rx_ring->rr_index = 0;
rx_ring->rr_wrap ^= 1;
}
rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
if (rx_ring->rr_wrap)
rxring_pos |= ET_RX_RING_POS_WRAP;
CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
}
bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
BUS_DMASYNC_PREREAD);
bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
BUS_DMASYNC_PREREAD);
}
static int
et_encap(struct et_softc *sc, struct mbuf **m0)
{
struct et_txdesc_ring *tx_ring;
struct et_txbuf_data *tbd;
struct et_txdesc *td;
struct mbuf *m;
bus_dma_segment_t segs[ET_NSEG_MAX];
bus_dmamap_t map;
uint32_t csum_flags, last_td_ctrl2;
int error, i, idx, first_idx, last_idx, nsegs;
tx_ring = &sc->sc_tx_ring;
MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
tbd = &sc->sc_tx_data;
first_idx = tx_ring->tr_ready_index;
map = tbd->tbd_buf[first_idx].tb_dmap;
error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
0);
if (error == EFBIG) {
m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
if (m == NULL) {
m_freem(*m0);
*m0 = NULL;
return (ENOMEM);
}
*m0 = m;
error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
&nsegs, 0);
if (error != 0) {
m_freem(*m0);
*m0 = NULL;
return (error);
}
} else if (error != 0)
return (error);
/* Check for descriptor overruns. */
if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
bus_dmamap_unload(sc->sc_tx_tag, map);
return (ENOBUFS);
}
bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
sc->sc_tx += nsegs;
if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
last_td_ctrl2 |= ET_TDCTRL2_INTR;
}
m = *m0;
csum_flags = 0;
if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
csum_flags |= ET_TDCTRL2_CSUM_IP;
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
csum_flags |= ET_TDCTRL2_CSUM_UDP;
else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
csum_flags |= ET_TDCTRL2_CSUM_TCP;
}
last_idx = -1;
for (i = 0; i < nsegs; ++i) {
idx = (first_idx + i) % ET_TX_NDESC;
td = &tx_ring->tr_desc[idx];
td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
if (i == nsegs - 1) {
/* Last frag */
td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
last_idx = idx;
} else
td->td_ctrl2 = htole32(csum_flags);
MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
tx_ring->tr_ready_index = 0;
tx_ring->tr_ready_wrap ^= 1;
}
}
td = &tx_ring->tr_desc[first_idx];
/* First frag */
td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
MPASS(last_idx >= 0);
tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
tbd->tbd_buf[last_idx].tb_dmap = map;
tbd->tbd_buf[last_idx].tb_mbuf = m;
tbd->tbd_used += nsegs;
MPASS(tbd->tbd_used <= ET_TX_NDESC);
return (0);
}
static void
et_txeof(struct et_softc *sc)
{
struct et_txdesc_ring *tx_ring;
struct et_txbuf_data *tbd;
struct et_txbuf *tb;
if_t ifp;
uint32_t tx_done;
int end, wrap;
ET_LOCK_ASSERT(sc);
ifp = sc->ifp;
tx_ring = &sc->sc_tx_ring;
tbd = &sc->sc_tx_data;
if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
return;
if (tbd->tbd_used == 0)
return;
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
BUS_DMASYNC_POSTWRITE);
tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
MPASS(tbd->tbd_start_index < ET_TX_NDESC);
tb = &tbd->tbd_buf[tbd->tbd_start_index];
if (tb->tb_mbuf != NULL) {
bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
m_freem(tb->tb_mbuf);
tb->tb_mbuf = NULL;
}
if (++tbd->tbd_start_index == ET_TX_NDESC) {
tbd->tbd_start_index = 0;
tbd->tbd_start_wrap ^= 1;
}
MPASS(tbd->tbd_used > 0);
tbd->tbd_used--;
}
if (tbd->tbd_used == 0)
sc->watchdog_timer = 0;
if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
static void
et_tick(void *xsc)
{
struct et_softc *sc;
struct mii_data *mii;
sc = xsc;
ET_LOCK_ASSERT(sc);
mii = device_get_softc(sc->sc_miibus);
mii_tick(mii);
et_stats_update(sc);
if (et_watchdog(sc) == EJUSTRETURN)
return;
callout_reset(&sc->sc_tick, hz, et_tick, sc);
}
static int
et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
{
struct et_softc *sc;
struct et_rxdesc *desc;
struct et_rxbuf *rb;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t dmap;
int nsegs;
MPASS(buf_idx < ET_RX_NDESC);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, ETHER_ALIGN);
sc = rbd->rbd_softc;
rb = &rbd->rbd_buf[buf_idx];
if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rb->rb_mbuf != NULL) {
bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
}
dmap = rb->rb_dmap;
rb->rb_dmap = sc->sc_rx_sparemap;
sc->sc_rx_sparemap = dmap;
bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
rb->rb_mbuf = m;
desc = &rbd->rbd_ring->rr_desc[buf_idx];
desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
BUS_DMASYNC_PREWRITE);
return (0);
}
static void
et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
{
struct et_rxdesc *desc;
desc = &rbd->rbd_ring->rr_desc[buf_idx];
desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
BUS_DMASYNC_PREWRITE);
}
static int
et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
{
struct et_softc *sc;
struct et_rxdesc *desc;
struct et_rxbuf *rb;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t dmap;
int nsegs;
MPASS(buf_idx < ET_RX_NDESC);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MHLEN;
m_adj(m, ETHER_ALIGN);
sc = rbd->rbd_softc;
rb = &rbd->rbd_buf[buf_idx];
if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rb->rb_mbuf != NULL) {
bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
}
dmap = rb->rb_dmap;
rb->rb_dmap = sc->sc_rx_mini_sparemap;
sc->sc_rx_mini_sparemap = dmap;
bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
rb->rb_mbuf = m;
desc = &rbd->rbd_ring->rr_desc[buf_idx];
desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
BUS_DMASYNC_PREWRITE);
return (0);
}
#define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
#define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \
SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
/*
* Create sysctl tree
*/
static void
et_add_sysctls(struct et_softc * sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children, *parent;
struct sysctl_oid *tree;
struct et_hw_stats *stats;
ctx = device_get_sysctl_ctx(sc->dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
et_sysctl_rx_intr_npkts, "I", "RX IM, # packets per RX interrupt");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
et_sysctl_rx_intr_delay, "I",
"RX IM, RX interrupt delay (x10 usec)");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
"TX IM, # segments per TX interrupt");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ET statistics");
parent = SYSCTL_CHILDREN(tree);
/* TX/RX statistics. */
stats = &sc->sc_stats;
ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
"0 to 64 bytes frames");
ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
"65 to 127 bytes frames");
ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
"128 to 255 bytes frames");
ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
"256 to 511 bytes frames");
ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
"512 to 1023 bytes frames");
ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
"1024 to 1518 bytes frames");
ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
"1519 to 1522 bytes frames");
/* RX statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
children = SYSCTL_CHILDREN(tree);
ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
&stats->rx_bytes, "Good bytes");
ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
&stats->rx_frames, "Good frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
&stats->rx_crcerrs, "CRC errors");
ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
&stats->rx_mcast, "Multicast frames");
ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
&stats->rx_bcast, "Broadcast frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "control",
&stats->rx_control, "Control frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
&stats->rx_pause, "Pause frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
&stats->rx_unknown_control, "Unknown control frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
&stats->rx_alignerrs, "Alignment errors");
ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
&stats->rx_lenerrs, "Frames with length mismatched");
ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
&stats->rx_codeerrs, "Frames with code error");
ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
&stats->rx_cserrs, "Frames with carrier sense error");
ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
&stats->rx_runts, "Too short frames");
ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
&stats->rx_oversize, "Oversized frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
&stats->rx_fragments, "Fragmented frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
&stats->rx_jabbers, "Frames with jabber error");
ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
&stats->rx_drop, "Dropped frames");
/* TX statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
children = SYSCTL_CHILDREN(tree);
ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
&stats->tx_bytes, "Good bytes");
ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
&stats->tx_frames, "Good frames");
ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
&stats->tx_mcast, "Multicast frames");
ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
&stats->tx_bcast, "Broadcast frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
&stats->tx_pause, "Pause frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
&stats->tx_deferred, "Deferred frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
&stats->tx_excess_deferred, "Excessively deferred frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
&stats->tx_single_colls, "Single collisions");
ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
&stats->tx_multi_colls, "Multiple collisions");
ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
&stats->tx_late_colls, "Late collisions");
ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
&stats->tx_excess_colls, "Excess collisions");
ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
&stats->tx_total_colls, "Total collisions");
ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
&stats->tx_pause_honored, "Honored pause frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
&stats->tx_drop, "Dropped frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
&stats->tx_jabbers, "Frames with jabber errors");
ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
&stats->tx_crcerrs, "Frames with CRC errors");
ET_SYSCTL_STAT_ADD32(ctx, children, "control",
&stats->tx_control, "Control frames");
ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
&stats->tx_oversize, "Oversized frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
&stats->tx_undersize, "Undersized frames");
ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
&stats->tx_fragments, "Fragmented frames");
}
#undef ET_SYSCTL_STAT_ADD32
#undef ET_SYSCTL_STAT_ADD64
static int
et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
{
struct et_softc *sc;
if_t ifp;
int error, v;
sc = arg1;
ifp = sc->ifp;
v = sc->sc_rx_intr_npkts;
error = sysctl_handle_int(oidp, &v, 0, req);
if (error || req->newptr == NULL)
goto back;
if (v <= 0) {
error = EINVAL;
goto back;
}
if (sc->sc_rx_intr_npkts != v) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
sc->sc_rx_intr_npkts = v;
}
back:
return (error);
}
static int
et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
{
struct et_softc *sc;
if_t ifp;
int error, v;
sc = arg1;
ifp = sc->ifp;
v = sc->sc_rx_intr_delay;
error = sysctl_handle_int(oidp, &v, 0, req);
if (error || req->newptr == NULL)
goto back;
if (v <= 0) {
error = EINVAL;
goto back;
}
if (sc->sc_rx_intr_delay != v) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
sc->sc_rx_intr_delay = v;
}
back:
return (error);
}
static void
et_stats_update(struct et_softc *sc)
{
struct et_hw_stats *stats;
stats = &sc->sc_stats;
stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
}
static uint64_t
et_get_counter(if_t ifp, ift_counter cnt)
{
struct et_softc *sc;
struct et_hw_stats *stats;
sc = if_getsoftc(ifp);
stats = &sc->sc_stats;
switch (cnt) {
case IFCOUNTER_OPACKETS:
return (stats->tx_frames);
case IFCOUNTER_COLLISIONS:
return (stats->tx_total_colls);
case IFCOUNTER_OERRORS:
return (stats->tx_drop + stats->tx_jabbers +
stats->tx_crcerrs + stats->tx_excess_deferred +
stats->tx_late_colls);
case IFCOUNTER_IPACKETS:
return (stats->rx_frames);
case IFCOUNTER_IERRORS:
return (stats->rx_crcerrs + stats->rx_alignerrs +
stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
stats->rx_runts + stats->rx_jabbers + stats->rx_drop);
default:
return (if_get_counter_default(ifp, cnt));
}
}
static int
et_suspend(device_t dev)
{
struct et_softc *sc;
uint32_t pmcfg;
sc = device_get_softc(dev);
ET_LOCK(sc);
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
et_stop(sc);
/* Diable all clocks and put PHY into COMA. */
pmcfg = CSR_READ_4(sc, ET_PM);
pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
ET_PM_RXCLK_GATE);
pmcfg |= ET_PM_PHY_SW_COMA;
CSR_WRITE_4(sc, ET_PM, pmcfg);
ET_UNLOCK(sc);
return (0);
}
static int
et_resume(device_t dev)
{
struct et_softc *sc;
uint32_t pmcfg;
sc = device_get_softc(dev);
ET_LOCK(sc);
/* Take PHY out of COMA and enable clocks. */
pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
pmcfg |= EM_PM_GIGEPHY_ENB;
CSR_WRITE_4(sc, ET_PM, pmcfg);
if ((if_getflags(sc->ifp) & IFF_UP) != 0)
et_init_locked(sc);
ET_UNLOCK(sc);
return (0);
}
diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_phy.c b/sys/dev/etherswitch/ar40xx/ar40xx_phy.c
index b9a308e3620d..f27e7503eb28 100644
--- a/sys/dev/etherswitch/ar40xx/ar40xx_phy.c
+++ b/sys/dev/etherswitch/ar40xx/ar40xx_phy.c
@@ -1,252 +1,244 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Adrian Chadd <adrian@FreeBSD.org>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/iicbus/iic.h>
#include <dev/iicbus/iiconf.h>
#include <dev/iicbus/iicbus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/clk/clk.h>
#include <dev/hwreset/hwreset.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/etherswitch/ar40xx/ar40xx_var.h>
#include <dev/etherswitch/ar40xx/ar40xx_reg.h>
#include <dev/etherswitch/ar40xx/ar40xx_hw.h>
#include <dev/etherswitch/ar40xx/ar40xx_hw_mdio.h>
#include <dev/etherswitch/ar40xx/ar40xx_hw_port.h>
#include <dev/etherswitch/ar40xx/ar40xx_hw_atu.h>
#include <dev/etherswitch/ar40xx/ar40xx_phy.h>
#include <dev/etherswitch/ar40xx/ar40xx_debug.h>
#include "mdio_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
int
ar40xx_phy_tick(struct ar40xx_softc *sc)
{
struct mii_softc *miisc;
struct mii_data *mii;
int phy;
uint32_t reg;
AR40XX_LOCK_ASSERT(sc);
AR40XX_REG_BARRIER_READ(sc);
/*
* Loop over; update phy port status here
*/
for (phy = 0; phy < AR40XX_NUM_PHYS; phy++) {
/*
* Port here is PHY, not port!
*/
reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_STATUS(phy + 1));
mii = device_get_softc(sc->sc_phys.miibus[phy]);
/*
* Compare the current link status to the previous link
* status. We may need to clear ATU / change phy config.
*/
if (((reg & AR40XX_PORT_STATUS_LINK_UP) != 0) &&
(mii->mii_media_status & IFM_ACTIVE) == 0) {
AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS,
"%s: PHY %d: down -> up\n", __func__, phy);
ar40xx_hw_port_link_up(sc, phy + 1);
ar40xx_hw_atu_flush_port(sc, phy + 1);
}
if (((reg & AR40XX_PORT_STATUS_LINK_UP) == 0) &&
(mii->mii_media_status & IFM_ACTIVE) != 0) {
AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS,
"%s: PHY %d: up -> down\n", __func__, phy);
ar40xx_hw_port_link_down(sc, phy + 1);
ar40xx_hw_atu_flush_port(sc, phy + 1);
}
mii_tick(mii);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
ukphy_status(miisc);
mii_phy_update(miisc, MII_POLLSTAT);
}
}
return (0);
}
static inline int
ar40xx_portforphy(int phy)
{
return (phy+1);
}
struct mii_data *
ar40xx_phy_miiforport(struct ar40xx_softc *sc, int port)
{
int phy;
phy = port-1;
if (phy < 0 || phy >= AR40XX_NUM_PHYS)
return (NULL);
return (device_get_softc(sc->sc_phys.miibus[phy]));
}
if_t
ar40xx_phy_ifpforport(struct ar40xx_softc *sc, int port)
{
int phy;
phy = port-1;
if (phy < 0 || phy >= AR40XX_NUM_PHYS)
return (NULL);
return (sc->sc_phys.ifp[phy]);
}
static int
ar40xx_ifmedia_upd(if_t ifp)
{
struct ar40xx_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = ar40xx_phy_miiforport(sc, if_getdunit(ifp));
AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s: called, PHY %d\n",
__func__, if_getdunit(ifp));
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
ar40xx_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct ar40xx_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = ar40xx_phy_miiforport(sc, if_getdunit(ifp));
AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s: called, PHY %d\n",
__func__, if_getdunit(ifp));
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
int
ar40xx_attach_phys(struct ar40xx_softc *sc)
{
int phy, err = 0;
char name[IFNAMSIZ];
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < AR40XX_NUM_PHYS; phy++) {
sc->sc_phys.ifp[phy] = if_alloc(IFT_ETHER);
- if (sc->sc_phys.ifp[phy] == NULL) {
- device_printf(sc->sc_dev,
- "PHY %d: couldn't allocate ifnet structure\n",
- phy);
- err = ENOMEM;
- break;
- }
-
sc->sc_phys.ifp[phy]->if_softc = sc;
sc->sc_phys.ifp[phy]->if_flags |= IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX;
sc->sc_phys.ifname[phy] = malloc(strlen(name)+1, M_DEVBUF,
M_WAITOK);
bcopy(name, sc->sc_phys.ifname[phy], strlen(name)+1);
if_initname(sc->sc_phys.ifp[phy], sc->sc_phys.ifname[phy],
ar40xx_portforphy(phy));
err = mii_attach(sc->sc_dev, &sc->sc_phys.miibus[phy],
sc->sc_phys.ifp[phy], ar40xx_ifmedia_upd,
ar40xx_ifmedia_sts, BMSR_DEFCAPMASK,
phy, MII_OFFSET_ANY, 0);
device_printf(sc->sc_dev,
"%s attached to pseudo interface %s\n",
device_get_nameunit(sc->sc_phys.miibus[phy]),
sc->sc_phys.ifp[phy]->if_xname);
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
return (err);
}
}
return (0);
}
int
ar40xx_hw_phy_get_ids(struct ar40xx_softc *sc)
{
int phy;
uint32_t id1, id2;
for (phy = 0; phy < AR40XX_NUM_PHYS; phy++) {
id1 = MDIO_READREG(sc->sc_mdio_dev, phy, 2);
id2 = MDIO_READREG(sc->sc_mdio_dev, phy, 3);
device_printf(sc->sc_dev,
"%s: PHY %d: ID1=0x%04x, ID2=0x%04x\n",
__func__, phy, id1, id2);
}
return (0);
}
diff --git a/sys/dev/etherswitch/arswitch/arswitch.c b/sys/dev/etherswitch/arswitch/arswitch.c
index 70f3ad501d5d..f1e6d1944038 100644
--- a/sys/dev/etherswitch/arswitch/arswitch.c
+++ b/sys/dev/etherswitch/arswitch/arswitch.c
@@ -1,1323 +1,1317 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011-2012 Stefan Bethke.
* Copyright (c) 2012 Adrian Chadd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/iicbus/iic.h>
#include <dev/iicbus/iiconf.h>
#include <dev/iicbus/iicbus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/etherswitch/arswitch/arswitchreg.h>
#include <dev/etherswitch/arswitch/arswitchvar.h>
#include <dev/etherswitch/arswitch/arswitch_reg.h>
#include <dev/etherswitch/arswitch/arswitch_phy.h>
#include <dev/etherswitch/arswitch/arswitch_vlans.h>
#include <dev/etherswitch/arswitch/arswitch_8216.h>
#include <dev/etherswitch/arswitch/arswitch_8226.h>
#include <dev/etherswitch/arswitch/arswitch_8316.h>
#include <dev/etherswitch/arswitch/arswitch_8327.h>
#include "mdio_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
/* Map ETHERSWITCH_PORT_LED_* to Atheros pattern codes */
static int led_pattern_table[] = {
[ETHERSWITCH_PORT_LED_DEFAULT] = 0x3,
[ETHERSWITCH_PORT_LED_ON] = 0x2,
[ETHERSWITCH_PORT_LED_OFF] = 0x0,
[ETHERSWITCH_PORT_LED_BLINK] = 0x1
};
static inline int arswitch_portforphy(int phy);
static void arswitch_tick(void *arg);
static int arswitch_ifmedia_upd(if_t);
static void arswitch_ifmedia_sts(if_t, struct ifmediareq *);
static int ar8xxx_port_vlan_setup(struct arswitch_softc *sc,
etherswitch_port_t *p);
static int ar8xxx_port_vlan_get(struct arswitch_softc *sc,
etherswitch_port_t *p);
static int arswitch_setled(struct arswitch_softc *sc, int phy, int led,
int style);
static int
arswitch_probe(device_t dev)
{
struct arswitch_softc *sc;
uint32_t id;
char *chipname;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
sc->page = -1;
/* AR8xxx probe */
id = arswitch_readreg(dev, AR8X16_REG_MASK_CTRL);
sc->chip_rev = (id & AR8X16_MASK_CTRL_REV_MASK);
sc->chip_ver = (id & AR8X16_MASK_CTRL_VER_MASK) >> AR8X16_MASK_CTRL_VER_SHIFT;
switch (id & (AR8X16_MASK_CTRL_VER_MASK | AR8X16_MASK_CTRL_REV_MASK)) {
case 0x0101:
chipname = "AR8216";
sc->sc_switchtype = AR8X16_SWITCH_AR8216;
break;
case 0x0201:
chipname = "AR8226";
sc->sc_switchtype = AR8X16_SWITCH_AR8226;
break;
/* 0x0301 - AR8236 */
case 0x1000:
case 0x1001:
chipname = "AR8316";
sc->sc_switchtype = AR8X16_SWITCH_AR8316;
break;
case 0x1202:
case 0x1204:
chipname = "AR8327";
sc->sc_switchtype = AR8X16_SWITCH_AR8327;
sc->mii_lo_first = 1;
break;
default:
chipname = NULL;
}
DPRINTF(sc, ARSWITCH_DBG_ANY, "chipname=%s, id=%08x\n", chipname, id);
if (chipname != NULL) {
device_set_descf(dev,
"Atheros %s Ethernet Switch (ver %d rev %d)",
chipname, sc->chip_ver, sc->chip_rev);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
arswitch_attach_phys(struct arswitch_softc *sc)
{
int phy, err = 0;
char name[IFNAMSIZ];
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < sc->numphys; phy++) {
sc->ifp[phy] = if_alloc(IFT_ETHER);
- if (sc->ifp[phy] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[phy], sc);
if_setflagbits(sc->ifp[phy], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
sc->ifname[phy] = malloc(strlen(name)+1, M_DEVBUF, M_WAITOK);
bcopy(name, sc->ifname[phy], strlen(name)+1);
if_initname(sc->ifp[phy], sc->ifname[phy],
arswitch_portforphy(phy));
err = mii_attach(sc->sc_dev, &sc->miibus[phy], sc->ifp[phy],
arswitch_ifmedia_upd, arswitch_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
#if 0
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(sc->miibus[phy]),
sc->ifp[phy]->if_xname);
#endif
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
return (err);
}
if (AR8X16_IS_SWITCH(sc, AR8327)) {
int led;
char ledname[IFNAMSIZ+4];
for (led = 0; led < 3; led++) {
sprintf(ledname, "%s%dled%d", name,
arswitch_portforphy(phy), led+1);
sc->dev_led[phy][led].sc = sc;
sc->dev_led[phy][led].phy = phy;
sc->dev_led[phy][led].lednum = led;
}
}
}
return (0);
}
static int
arswitch_reset(device_t dev)
{
arswitch_writereg(dev, AR8X16_REG_MASK_CTRL,
AR8X16_MASK_CTRL_SOFT_RESET);
DELAY(1000);
if (arswitch_readreg(dev, AR8X16_REG_MASK_CTRL) &
AR8X16_MASK_CTRL_SOFT_RESET) {
device_printf(dev, "unable to reset switch\n");
return (-1);
}
return (0);
}
static int
arswitch_set_vlan_mode(struct arswitch_softc *sc, uint32_t mode)
{
/* Check for invalid modes. */
if ((mode & sc->info.es_vlan_caps) != mode)
return (EINVAL);
switch (mode) {
case ETHERSWITCH_VLAN_DOT1Q:
sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
break;
case ETHERSWITCH_VLAN_PORT:
sc->vlan_mode = ETHERSWITCH_VLAN_PORT;
break;
default:
sc->vlan_mode = 0;
}
/* Reset VLANs. */
sc->hal.arswitch_vlan_init_hw(sc);
return (0);
}
static void
ar8xxx_port_init(struct arswitch_softc *sc, int port)
{
/* Port0 - CPU */
if (port == AR8X16_PORT_CPU) {
arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_STS(0),
(AR8X16_IS_SWITCH(sc, AR8216) ?
AR8X16_PORT_STS_SPEED_100 : AR8X16_PORT_STS_SPEED_1000) |
(AR8X16_IS_SWITCH(sc, AR8216) ? 0 : AR8X16_PORT_STS_RXFLOW) |
(AR8X16_IS_SWITCH(sc, AR8216) ? 0 : AR8X16_PORT_STS_TXFLOW) |
AR8X16_PORT_STS_RXMAC |
AR8X16_PORT_STS_TXMAC |
AR8X16_PORT_STS_DUPLEX);
arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_CTRL(0),
arswitch_readreg(sc->sc_dev, AR8X16_REG_PORT_CTRL(0)) &
~AR8X16_PORT_CTRL_HEADER);
} else {
/* Set ports to auto negotiation. */
arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_STS(port),
AR8X16_PORT_STS_LINK_AUTO);
arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_CTRL(port),
arswitch_readreg(sc->sc_dev, AR8X16_REG_PORT_CTRL(port)) &
~AR8X16_PORT_CTRL_HEADER);
}
}
static int
ar8xxx_atu_wait_ready(struct arswitch_softc *sc)
{
int ret;
ARSWITCH_LOCK_ASSERT(sc, MA_OWNED);
ret = arswitch_waitreg(sc->sc_dev,
AR8216_REG_ATU,
AR8216_ATU_ACTIVE,
0,
1000);
return (ret);
}
/*
* Flush all ATU entries.
*/
static int
ar8xxx_atu_flush(struct arswitch_softc *sc)
{
int ret;
ARSWITCH_LOCK_ASSERT(sc, MA_OWNED);
DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: flushing all ports\n", __func__);
ret = ar8xxx_atu_wait_ready(sc);
if (ret)
device_printf(sc->sc_dev, "%s: waitreg failed\n", __func__);
if (!ret)
arswitch_writereg(sc->sc_dev,
AR8216_REG_ATU,
AR8216_ATU_OP_FLUSH | AR8216_ATU_ACTIVE);
return (ret);
}
/*
* Flush ATU entries for a single port.
*/
static int
ar8xxx_atu_flush_port(struct arswitch_softc *sc, int port)
{
int ret, val;
DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: flushing port %d\n", __func__,
port);
ARSWITCH_LOCK_ASSERT(sc, MA_OWNED);
/* Flush unicast entries on port */
val = AR8216_ATU_OP_FLUSH_UNICAST;
/* TODO: bit 4 indicates whether to flush dynamic (0) or static (1) */
/* Which port */
val |= SM(port, AR8216_ATU_PORT_NUM);
ret = ar8xxx_atu_wait_ready(sc);
if (ret)
device_printf(sc->sc_dev, "%s: waitreg failed\n", __func__);
if (!ret)
arswitch_writereg(sc->sc_dev,
AR8216_REG_ATU,
val | AR8216_ATU_ACTIVE);
return (ret);
}
/*
* XXX TODO: flush a single MAC address.
*/
/*
* Fetch a single entry from the ATU.
*/
static int
ar8xxx_atu_fetch_table(struct arswitch_softc *sc, etherswitch_atu_entry_t *e,
int atu_fetch_op)
{
uint32_t ret0, ret1, ret2, val;
ARSWITCH_LOCK_ASSERT(sc, MA_OWNED);
switch (atu_fetch_op) {
case 0:
/* Initialise things for the first fetch */
DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: initializing\n", __func__);
(void) ar8xxx_atu_wait_ready(sc);
arswitch_writereg(sc->sc_dev,
AR8216_REG_ATU, AR8216_ATU_OP_GET_NEXT);
arswitch_writereg(sc->sc_dev,
AR8216_REG_ATU_DATA, 0);
arswitch_writereg(sc->sc_dev,
AR8216_REG_ATU_CTRL2, 0);
return (0);
case 1:
DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: reading next\n", __func__);
/*
* Attempt to read the next address entry; don't modify what
* is there in AT_ADDR{4,5} as its used for the next fetch
*/
(void) ar8xxx_atu_wait_ready(sc);
/* Begin the next read event; not modifying anything */
val = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU);
val |= AR8216_ATU_ACTIVE;
arswitch_writereg(sc->sc_dev, AR8216_REG_ATU, val);
/* Wait for it to complete */
(void) ar8xxx_atu_wait_ready(sc);
/* Fetch the ethernet address and ATU status */
ret0 = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU);
ret1 = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU_DATA);
ret2 = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU_CTRL2);
/* If the status is zero, then we're done */
if (MS(ret2, AR8216_ATU_CTRL2_AT_STATUS) == 0)
return (-1);
/* MAC address */
e->es_macaddr[5] = MS(ret0, AR8216_ATU_ADDR5);
e->es_macaddr[4] = MS(ret0, AR8216_ATU_ADDR4);
e->es_macaddr[3] = MS(ret1, AR8216_ATU_ADDR3);
e->es_macaddr[2] = MS(ret1, AR8216_ATU_ADDR2);
e->es_macaddr[1] = MS(ret1, AR8216_ATU_ADDR1);
e->es_macaddr[0] = MS(ret1, AR8216_ATU_ADDR0);
/* Bitmask of ports this entry is for */
e->es_portmask = MS(ret2, AR8216_ATU_CTRL2_DESPORT);
/* TODO: other flags that are interesting */
DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: MAC %6D portmask 0x%08x\n",
__func__,
e->es_macaddr, ":", e->es_portmask);
return (0);
default:
return (-1);
}
return (-1);
}
/*
* Configure aging register defaults.
*/
static int
ar8xxx_atu_learn_default(struct arswitch_softc *sc)
{
int ret;
uint32_t val;
DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: resetting learning\n", __func__);
/*
* For now, configure the aging defaults:
*
* + ARP_EN - enable "acknowledgement" of ARP frames - they are
* forwarded to the CPU port
* + LEARN_CHANGE_EN - hash table violations when learning MAC addresses
* will force an entry to be expired/updated and a new one to be
* programmed in.
* + AGE_EN - enable address table aging
* + AGE_TIME - set to 5 minutes
*/
val = 0;
val |= AR8216_ATU_CTRL_ARP_EN;
val |= AR8216_ATU_CTRL_LEARN_CHANGE;
val |= AR8216_ATU_CTRL_AGE_EN;
val |= 0x2b; /* 5 minutes; bits 15:0 */
ret = arswitch_writereg(sc->sc_dev,
AR8216_REG_ATU_CTRL,
val);
if (ret)
device_printf(sc->sc_dev, "%s: writereg failed\n", __func__);
return (ret);
}
/*
* XXX TODO: add another routine to configure the leaky behaviour
* when unknown frames are received. These must be consistent
* between ethernet switches.
*/
/*
* Fetch the configured switch MAC address.
*/
static int
ar8xxx_hw_get_switch_macaddr(struct arswitch_softc *sc, struct ether_addr *ea)
{
uint32_t ret0, ret1;
char *s;
s = (void *) ea;
ret0 = arswitch_readreg(sc->sc_dev, AR8X16_REG_SW_MAC_ADDR0);
ret1 = arswitch_readreg(sc->sc_dev, AR8X16_REG_SW_MAC_ADDR1);
s[5] = MS(ret0, AR8X16_REG_SW_MAC_ADDR0_BYTE5);
s[4] = MS(ret0, AR8X16_REG_SW_MAC_ADDR0_BYTE4);
s[3] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE3);
s[2] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE2);
s[1] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE1);
s[0] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE0);
return (0);
}
/*
* Set the switch mac address.
*/
static int
ar8xxx_hw_set_switch_macaddr(struct arswitch_softc *sc,
const struct ether_addr *ea)
{
return (ENXIO);
}
/*
* XXX TODO: this attach routine does NOT free all memory, resources
* upon failure!
*/
static int
arswitch_attach(device_t dev)
{
struct arswitch_softc *sc = device_get_softc(dev);
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
int err = 0;
int port;
/* sc->sc_switchtype is already decided in arswitch_probe() */
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "arswitch", NULL, MTX_DEF);
sc->page = -1;
strlcpy(sc->info.es_name, device_get_desc(dev),
sizeof(sc->info.es_name));
/* Debugging */
ctx = device_get_sysctl_ctx(sc->sc_dev);
tree = device_get_sysctl_tree(sc->sc_dev);
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0,
"control debugging printfs");
/* Allocate a 128 entry ATU table; hopefully its big enough! */
/* XXX TODO: make this per chip */
sc->atu.entries = malloc(sizeof(etherswitch_atu_entry_t) * 128,
M_DEVBUF, M_NOWAIT);
if (sc->atu.entries == NULL) {
device_printf(sc->sc_dev, "%s: failed to allocate ATU table\n",
__func__);
return (ENXIO);
}
sc->atu.count = 0;
sc->atu.size = 128;
/* Default HAL methods */
sc->hal.arswitch_port_init = ar8xxx_port_init;
sc->hal.arswitch_port_vlan_setup = ar8xxx_port_vlan_setup;
sc->hal.arswitch_port_vlan_get = ar8xxx_port_vlan_get;
sc->hal.arswitch_vlan_init_hw = ar8xxx_reset_vlans;
sc->hal.arswitch_hw_get_switch_macaddr = ar8xxx_hw_get_switch_macaddr;
sc->hal.arswitch_hw_set_switch_macaddr = ar8xxx_hw_set_switch_macaddr;
sc->hal.arswitch_vlan_getvgroup = ar8xxx_getvgroup;
sc->hal.arswitch_vlan_setvgroup = ar8xxx_setvgroup;
sc->hal.arswitch_vlan_get_pvid = ar8xxx_get_pvid;
sc->hal.arswitch_vlan_set_pvid = ar8xxx_set_pvid;
sc->hal.arswitch_get_dot1q_vlan = ar8xxx_get_dot1q_vlan;
sc->hal.arswitch_set_dot1q_vlan = ar8xxx_set_dot1q_vlan;
sc->hal.arswitch_flush_dot1q_vlan = ar8xxx_flush_dot1q_vlan;
sc->hal.arswitch_purge_dot1q_vlan = ar8xxx_purge_dot1q_vlan;
sc->hal.arswitch_get_port_vlan = ar8xxx_get_port_vlan;
sc->hal.arswitch_set_port_vlan = ar8xxx_set_port_vlan;
sc->hal.arswitch_atu_flush = ar8xxx_atu_flush;
sc->hal.arswitch_atu_flush_port = ar8xxx_atu_flush_port;
sc->hal.arswitch_atu_learn_default = ar8xxx_atu_learn_default;
sc->hal.arswitch_atu_fetch_table = ar8xxx_atu_fetch_table;
sc->hal.arswitch_phy_read = arswitch_readphy_internal;
sc->hal.arswitch_phy_write = arswitch_writephy_internal;
/*
* Attach switch related functions
*/
if (AR8X16_IS_SWITCH(sc, AR8216))
ar8216_attach(sc);
else if (AR8X16_IS_SWITCH(sc, AR8226))
ar8226_attach(sc);
else if (AR8X16_IS_SWITCH(sc, AR8316))
ar8316_attach(sc);
else if (AR8X16_IS_SWITCH(sc, AR8327))
ar8327_attach(sc);
else {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: unknown switch (%d)?\n", __func__, sc->sc_switchtype);
return (ENXIO);
}
/* Common defaults. */
sc->info.es_nports = 5; /* XXX technically 6, but 6th not used */
/* XXX Defaults for externally connected AR8316 */
sc->numphys = 4;
sc->phy4cpu = 1;
sc->is_rgmii = 1;
sc->is_gmii = 0;
sc->is_mii = 0;
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"numphys", &sc->numphys);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"phy4cpu", &sc->phy4cpu);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"is_rgmii", &sc->is_rgmii);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"is_gmii", &sc->is_gmii);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"is_mii", &sc->is_mii);
if (sc->numphys > AR8X16_NUM_PHYS)
sc->numphys = AR8X16_NUM_PHYS;
/* Reset the switch. */
if (arswitch_reset(dev)) {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: arswitch_reset: failed\n", __func__);
return (ENXIO);
}
err = sc->hal.arswitch_hw_setup(sc);
if (err != 0) {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: hw_setup: err=%d\n", __func__, err);
return (err);
}
err = sc->hal.arswitch_hw_global_setup(sc);
if (err != 0) {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: hw_global_setup: err=%d\n", __func__, err);
return (err);
}
/*
* Configure the default address table learning parameters for this
* switch.
*/
err = sc->hal.arswitch_atu_learn_default(sc);
if (err != 0) {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: atu_learn_default: err=%d\n", __func__, err);
return (err);
}
/* Initialize the switch ports. */
for (port = 0; port <= sc->numphys; port++) {
sc->hal.arswitch_port_init(sc, port);
}
/*
* Attach the PHYs and complete the bus enumeration.
*/
err = arswitch_attach_phys(sc);
if (err != 0) {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: attach_phys: err=%d\n", __func__, err);
return (err);
}
/* Default to ingress filters off. */
err = arswitch_set_vlan_mode(sc, 0);
if (err != 0) {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: set_vlan_mode: err=%d\n", __func__, err);
return (err);
}
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
if (err != 0) {
DPRINTF(sc, ARSWITCH_DBG_ANY,
"%s: bus_generic_attach: err=%d\n", __func__, err);
return (err);
}
callout_init_mtx(&sc->callout_tick, &sc->sc_mtx, 0);
ARSWITCH_LOCK(sc);
arswitch_tick(sc);
ARSWITCH_UNLOCK(sc);
return (err);
}
static int
arswitch_detach(device_t dev)
{
struct arswitch_softc *sc = device_get_softc(dev);
int i;
callout_drain(&sc->callout_tick);
for (i=0; i < sc->numphys; i++) {
if (sc->miibus[i] != NULL)
device_delete_child(dev, sc->miibus[i]);
if (sc->ifp[i] != NULL)
if_free(sc->ifp[i]);
free(sc->ifname[i], M_DEVBUF);
}
free(sc->atu.entries, M_DEVBUF);
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
}
/*
* Convert PHY number to port number. PHY0 is connected to port 1, PHY1 to
* port 2, etc.
*/
static inline int
arswitch_portforphy(int phy)
{
return (phy+1);
}
static inline struct mii_data *
arswitch_miiforport(struct arswitch_softc *sc, int port)
{
int phy = port-1;
if (phy < 0 || phy >= sc->numphys)
return (NULL);
return (device_get_softc(sc->miibus[phy]));
}
static inline if_t
arswitch_ifpforport(struct arswitch_softc *sc, int port)
{
int phy = port-1;
if (phy < 0 || phy >= sc->numphys)
return (NULL);
return (sc->ifp[phy]);
}
/*
* Convert port status to ifmedia.
*/
static void
arswitch_update_ifmedia(int portstatus, u_int *media_status, u_int *media_active)
{
*media_active = IFM_ETHER;
*media_status = IFM_AVALID;
if ((portstatus & AR8X16_PORT_STS_LINK_UP) != 0)
*media_status |= IFM_ACTIVE;
else {
*media_active |= IFM_NONE;
return;
}
switch (portstatus & AR8X16_PORT_STS_SPEED_MASK) {
case AR8X16_PORT_STS_SPEED_10:
*media_active |= IFM_10_T;
break;
case AR8X16_PORT_STS_SPEED_100:
*media_active |= IFM_100_TX;
break;
case AR8X16_PORT_STS_SPEED_1000:
*media_active |= IFM_1000_T;
break;
}
if ((portstatus & AR8X16_PORT_STS_DUPLEX) == 0)
*media_active |= IFM_FDX;
else
*media_active |= IFM_HDX;
if ((portstatus & AR8X16_PORT_STS_TXFLOW) != 0)
*media_active |= IFM_ETH_TXPAUSE;
if ((portstatus & AR8X16_PORT_STS_RXFLOW) != 0)
*media_active |= IFM_ETH_RXPAUSE;
}
/*
* Poll the status for all PHYs. We're using the switch port status because
* thats a lot quicker to read than talking to all the PHYs. Care must be
* taken that the resulting ifmedia_active is identical to what the PHY will
* compute, or gratuitous link status changes will occur whenever the PHYs
* update function is called.
*/
static void
arswitch_miipollstat(struct arswitch_softc *sc)
{
int i;
struct mii_data *mii;
struct mii_softc *miisc;
int portstatus;
int port_flap = 0;
ARSWITCH_LOCK_ASSERT(sc, MA_OWNED);
for (i = 0; i < sc->numphys; i++) {
if (sc->miibus[i] == NULL)
continue;
mii = device_get_softc(sc->miibus[i]);
/* XXX This would be nice to have abstracted out to be per-chip */
/* AR8327/AR8337 has a different register base */
if (AR8X16_IS_SWITCH(sc, AR8327))
portstatus = arswitch_readreg(sc->sc_dev,
AR8327_REG_PORT_STATUS(arswitch_portforphy(i)));
else
portstatus = arswitch_readreg(sc->sc_dev,
AR8X16_REG_PORT_STS(arswitch_portforphy(i)));
#if 1
DPRINTF(sc, ARSWITCH_DBG_POLL, "p[%d]=0x%08x (%b)\n",
i,
portstatus,
portstatus,
"\20\3TXMAC\4RXMAC\5TXFLOW\6RXFLOW\7"
"DUPLEX\11LINK_UP\12LINK_AUTO\13LINK_PAUSE");
#endif
/*
* If the current status is down, but we have a link
* status showing up, we need to do an ATU flush.
*/
if ((mii->mii_media_status & IFM_ACTIVE) == 0 &&
(portstatus & AR8X16_PORT_STS_LINK_UP) != 0) {
device_printf(sc->sc_dev, "%s: port %d: port -> UP\n",
__func__,
i);
port_flap = 1;
}
/*
* and maybe if a port goes up->down?
*/
if ((mii->mii_media_status & IFM_ACTIVE) != 0 &&
(portstatus & AR8X16_PORT_STS_LINK_UP) == 0) {
device_printf(sc->sc_dev, "%s: port %d: port -> DOWN\n",
__func__,
i);
port_flap = 1;
}
arswitch_update_ifmedia(portstatus, &mii->mii_media_status,
&mii->mii_media_active);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
mii_phy_update(miisc, MII_POLLSTAT);
}
}
/* If a port went from down->up, flush the ATU */
if (port_flap)
sc->hal.arswitch_atu_flush(sc);
}
static void
arswitch_tick(void *arg)
{
struct arswitch_softc *sc = arg;
arswitch_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, arswitch_tick, sc);
}
static void
arswitch_lock(device_t dev)
{
struct arswitch_softc *sc = device_get_softc(dev);
ARSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED);
ARSWITCH_LOCK(sc);
}
static void
arswitch_unlock(device_t dev)
{
struct arswitch_softc *sc = device_get_softc(dev);
ARSWITCH_LOCK_ASSERT(sc, MA_OWNED);
ARSWITCH_UNLOCK(sc);
}
static etherswitch_info_t *
arswitch_getinfo(device_t dev)
{
struct arswitch_softc *sc = device_get_softc(dev);
return (&sc->info);
}
static int
ar8xxx_port_vlan_get(struct arswitch_softc *sc, etherswitch_port_t *p)
{
uint32_t reg;
ARSWITCH_LOCK(sc);
/* Retrieve the PVID. */
sc->hal.arswitch_vlan_get_pvid(sc, p->es_port, &p->es_pvid);
/* Port flags. */
reg = arswitch_readreg(sc->sc_dev, AR8X16_REG_PORT_CTRL(p->es_port));
if (reg & AR8X16_PORT_CTRL_DOUBLE_TAG)
p->es_flags |= ETHERSWITCH_PORT_DOUBLE_TAG;
reg >>= AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT;
if ((reg & 0x3) == AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_ADD)
p->es_flags |= ETHERSWITCH_PORT_ADDTAG;
if ((reg & 0x3) == AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_STRIP)
p->es_flags |= ETHERSWITCH_PORT_STRIPTAG;
ARSWITCH_UNLOCK(sc);
return (0);
}
static int
arswitch_is_cpuport(struct arswitch_softc *sc, int port)
{
return ((port == AR8X16_PORT_CPU) ||
((AR8X16_IS_SWITCH(sc, AR8327) &&
port == AR8327_PORT_GMAC6)));
}
static int
arswitch_getport(device_t dev, etherswitch_port_t *p)
{
struct arswitch_softc *sc;
struct mii_data *mii;
struct ifmediareq *ifmr;
int err;
sc = device_get_softc(dev);
/* XXX +1 is for AR8327; should make this configurable! */
if (p->es_port < 0 || p->es_port > sc->info.es_nports)
return (ENXIO);
err = sc->hal.arswitch_port_vlan_get(sc, p);
if (err != 0)
return (err);
mii = arswitch_miiforport(sc, p->es_port);
if (arswitch_is_cpuport(sc, p->es_port)) {
/* fill in fixed values for CPU port */
/* XXX is this valid in all cases? */
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr = &p->es_ifmr;
ifmr->ifm_count = 0;
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
} else if (mii != NULL) {
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
if (err)
return (err);
} else {
return (ENXIO);
}
if (!arswitch_is_cpuport(sc, p->es_port) &&
AR8X16_IS_SWITCH(sc, AR8327)) {
int led;
p->es_nleds = 3;
for (led = 0; led < p->es_nleds; led++)
{
int style;
uint32_t val;
/* Find the right style enum for our pattern */
val = arswitch_readreg(dev,
ar8327_led_mapping[p->es_port-1][led].reg);
val = (val>>ar8327_led_mapping[p->es_port-1][led].shift)&0x03;
for (style = 0; style < ETHERSWITCH_PORT_LED_MAX; style++)
{
if (led_pattern_table[style] == val) break;
}
/* can't happen */
if (style == ETHERSWITCH_PORT_LED_MAX)
style = ETHERSWITCH_PORT_LED_DEFAULT;
p->es_led[led] = style;
}
} else
{
p->es_nleds = 0;
}
return (0);
}
static int
ar8xxx_port_vlan_setup(struct arswitch_softc *sc, etherswitch_port_t *p)
{
uint32_t reg;
int err;
ARSWITCH_LOCK(sc);
/* Set the PVID. */
if (p->es_pvid != 0)
sc->hal.arswitch_vlan_set_pvid(sc, p->es_port, p->es_pvid);
/* Mutually exclusive. */
if (p->es_flags & ETHERSWITCH_PORT_ADDTAG &&
p->es_flags & ETHERSWITCH_PORT_STRIPTAG) {
ARSWITCH_UNLOCK(sc);
return (EINVAL);
}
reg = 0;
if (p->es_flags & ETHERSWITCH_PORT_DOUBLE_TAG)
reg |= AR8X16_PORT_CTRL_DOUBLE_TAG;
if (p->es_flags & ETHERSWITCH_PORT_ADDTAG)
reg |= AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_ADD <<
AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT;
if (p->es_flags & ETHERSWITCH_PORT_STRIPTAG)
reg |= AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_STRIP <<
AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT;
err = arswitch_modifyreg(sc->sc_dev,
AR8X16_REG_PORT_CTRL(p->es_port),
0x3 << AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT |
AR8X16_PORT_CTRL_DOUBLE_TAG, reg);
ARSWITCH_UNLOCK(sc);
return (err);
}
static int
arswitch_setport(device_t dev, etherswitch_port_t *p)
{
int err, i;
struct arswitch_softc *sc;
struct ifmedia *ifm;
struct mii_data *mii;
if_t ifp;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port > sc->info.es_nports)
return (ENXIO);
/* Port flags. */
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
err = sc->hal.arswitch_port_vlan_setup(sc, p);
if (err)
return (err);
}
/* Do not allow media or led changes on CPU port. */
if (arswitch_is_cpuport(sc, p->es_port))
return (0);
if (AR8X16_IS_SWITCH(sc, AR8327))
{
for (i = 0; i < 3; i++)
{
int err;
err = arswitch_setled(sc, p->es_port-1, i, p->es_led[i]);
if (err)
return (err);
}
}
mii = arswitch_miiforport(sc, p->es_port);
if (mii == NULL)
return (ENXIO);
ifp = arswitch_ifpforport(sc, p->es_port);
ifm = &mii->mii_media;
return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
}
static int
arswitch_setled(struct arswitch_softc *sc, int phy, int led, int style)
{
int shift;
int err;
if (phy < 0 || phy > sc->numphys)
return EINVAL;
if (style < 0 || style > ETHERSWITCH_PORT_LED_MAX)
return (EINVAL);
ARSWITCH_LOCK(sc);
shift = ar8327_led_mapping[phy][led].shift;
err = (arswitch_modifyreg(sc->sc_dev,
ar8327_led_mapping[phy][led].reg,
0x03 << shift, led_pattern_table[style] << shift));
ARSWITCH_UNLOCK(sc);
return (err);
}
static void
arswitch_statchg(device_t dev)
{
struct arswitch_softc *sc = device_get_softc(dev);
DPRINTF(sc, ARSWITCH_DBG_POLL, "%s\n", __func__);
}
static int
arswitch_ifmedia_upd(if_t ifp)
{
struct arswitch_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = arswitch_miiforport(sc, if_getdunit(ifp));
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
arswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct arswitch_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = arswitch_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc, ARSWITCH_DBG_POLL, "%s\n", __func__);
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
arswitch_getconf(device_t dev, etherswitch_conf_t *conf)
{
struct arswitch_softc *sc;
int ret;
sc = device_get_softc(dev);
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->vlan_mode;
/* Return the switch ethernet address. */
ret = sc->hal.arswitch_hw_get_switch_macaddr(sc,
&conf->switch_macaddr);
if (ret == 0) {
conf->cmd |= ETHERSWITCH_CONF_SWITCH_MACADDR;
}
return (0);
}
static int
arswitch_setconf(device_t dev, etherswitch_conf_t *conf)
{
struct arswitch_softc *sc;
int err;
sc = device_get_softc(dev);
/* Set the VLAN mode. */
if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) {
err = arswitch_set_vlan_mode(sc, conf->vlan_mode);
if (err != 0)
return (err);
}
/* TODO: Set the switch ethernet address. */
return (0);
}
static int
arswitch_atu_flush_all(device_t dev)
{
struct arswitch_softc *sc;
int err;
sc = device_get_softc(dev);
ARSWITCH_LOCK(sc);
err = sc->hal.arswitch_atu_flush(sc);
/* Invalidate cached ATU */
sc->atu.count = 0;
ARSWITCH_UNLOCK(sc);
return (err);
}
static int
arswitch_atu_flush_port(device_t dev, int port)
{
struct arswitch_softc *sc;
int err;
sc = device_get_softc(dev);
ARSWITCH_LOCK(sc);
err = sc->hal.arswitch_atu_flush_port(sc, port);
/* Invalidate cached ATU */
sc->atu.count = 0;
ARSWITCH_UNLOCK(sc);
return (err);
}
static int
arswitch_atu_fetch_table(device_t dev, etherswitch_atu_table_t *table)
{
struct arswitch_softc *sc;
int err, nitems;
sc = device_get_softc(dev);
ARSWITCH_LOCK(sc);
/* Initial setup */
nitems = 0;
err = sc->hal.arswitch_atu_fetch_table(sc, NULL, 0);
/* fetch - ideally yes we'd fetch into a separate table then switch */
while (err == 0 && nitems < sc->atu.size) {
err = sc->hal.arswitch_atu_fetch_table(sc,
&sc->atu.entries[nitems], 1);
if (err == 0) {
sc->atu.entries[nitems].id = nitems;
nitems++;
}
}
sc->atu.count = nitems;
ARSWITCH_UNLOCK(sc);
table->es_nitems = nitems;
return (0);
}
static int
arswitch_atu_fetch_table_entry(device_t dev, etherswitch_atu_entry_t *e)
{
struct arswitch_softc *sc;
int id;
sc = device_get_softc(dev);
id = e->id;
ARSWITCH_LOCK(sc);
if (id > sc->atu.count) {
ARSWITCH_UNLOCK(sc);
return (ENOENT);
}
memcpy(e, &sc->atu.entries[id], sizeof(*e));
ARSWITCH_UNLOCK(sc);
return (0);
}
static int
arswitch_getvgroup(device_t dev, etherswitch_vlangroup_t *e)
{
struct arswitch_softc *sc = device_get_softc(dev);
return (sc->hal.arswitch_vlan_getvgroup(sc, e));
}
static int
arswitch_setvgroup(device_t dev, etherswitch_vlangroup_t *e)
{
struct arswitch_softc *sc = device_get_softc(dev);
return (sc->hal.arswitch_vlan_setvgroup(sc, e));
}
static int
arswitch_readphy(device_t dev, int phy, int reg)
{
struct arswitch_softc *sc = device_get_softc(dev);
return (sc->hal.arswitch_phy_read(dev, phy, reg));
}
static int
arswitch_writephy(device_t dev, int phy, int reg, int val)
{
struct arswitch_softc *sc = device_get_softc(dev);
return (sc->hal.arswitch_phy_write(dev, phy, reg, val));
}
static device_method_t arswitch_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, arswitch_probe),
DEVMETHOD(device_attach, arswitch_attach),
DEVMETHOD(device_detach, arswitch_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, arswitch_readphy),
DEVMETHOD(miibus_writereg, arswitch_writephy),
DEVMETHOD(miibus_statchg, arswitch_statchg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, arswitch_readphy),
DEVMETHOD(mdio_writereg, arswitch_writephy),
/* etherswitch interface */
DEVMETHOD(etherswitch_lock, arswitch_lock),
DEVMETHOD(etherswitch_unlock, arswitch_unlock),
DEVMETHOD(etherswitch_getinfo, arswitch_getinfo),
DEVMETHOD(etherswitch_readreg, arswitch_readreg),
DEVMETHOD(etherswitch_writereg, arswitch_writereg),
DEVMETHOD(etherswitch_readphyreg, arswitch_readphy),
DEVMETHOD(etherswitch_writephyreg, arswitch_writephy),
DEVMETHOD(etherswitch_getport, arswitch_getport),
DEVMETHOD(etherswitch_setport, arswitch_setport),
DEVMETHOD(etherswitch_getvgroup, arswitch_getvgroup),
DEVMETHOD(etherswitch_setvgroup, arswitch_setvgroup),
DEVMETHOD(etherswitch_getconf, arswitch_getconf),
DEVMETHOD(etherswitch_setconf, arswitch_setconf),
DEVMETHOD(etherswitch_flush_all, arswitch_atu_flush_all),
DEVMETHOD(etherswitch_flush_port, arswitch_atu_flush_port),
DEVMETHOD(etherswitch_fetch_table, arswitch_atu_fetch_table),
DEVMETHOD(etherswitch_fetch_table_entry, arswitch_atu_fetch_table_entry),
DEVMETHOD_END
};
DEFINE_CLASS_0(arswitch, arswitch_driver, arswitch_methods,
sizeof(struct arswitch_softc));
DRIVER_MODULE(arswitch, mdio, arswitch_driver, 0, 0);
DRIVER_MODULE(miibus, arswitch, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, arswitch, mdio_driver, 0, 0);
DRIVER_MODULE(etherswitch, arswitch, etherswitch_driver, 0, 0);
MODULE_VERSION(arswitch, 1);
MODULE_DEPEND(arswitch, miibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(arswitch, etherswitch, 1, 1, 1); /* XXX which versions? */
diff --git a/sys/dev/etherswitch/e6000sw/e6000sw.c b/sys/dev/etherswitch/e6000sw/e6000sw.c
index 95f1a2e96db6..3b309f7f36f8 100644
--- a/sys/dev/etherswitch/e6000sw/e6000sw.c
+++ b/sys/dev/etherswitch/e6000sw/e6000sw.c
@@ -1,1784 +1,1782 @@
/*-
* Copyright (c) 2015 Semihalf
* Copyright (c) 2015 Stormshield
* Copyright (c) 2018-2019, Rubicon Communications, LLC (Netgate)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/module.h>
#include <sys/taskqueue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#ifdef FDT
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#else
#include <machine/stdarg.h>
#endif
#include "e6000swreg.h"
#include "etherswitch_if.h"
#include "miibus_if.h"
#include "mdio_if.h"
MALLOC_DECLARE(M_E6000SW);
MALLOC_DEFINE(M_E6000SW, "e6000sw", "e6000sw switch");
#define E6000SW_LOCK(_sc) sx_xlock(&(_sc)->sx)
#define E6000SW_UNLOCK(_sc) sx_unlock(&(_sc)->sx)
#define E6000SW_LOCK_ASSERT(_sc, _what) sx_assert(&(_sc)->sx, (_what))
#define E6000SW_TRYLOCK(_sc) sx_tryxlock(&(_sc)->sx)
#define E6000SW_LOCKED(_sc) sx_xlocked(&(_sc)->sx)
#define E6000SW_WAITREADY(_sc, _reg, _bit) \
e6000sw_waitready((_sc), REG_GLOBAL, (_reg), (_bit))
#define E6000SW_WAITREADY2(_sc, _reg, _bit) \
e6000sw_waitready((_sc), REG_GLOBAL2, (_reg), (_bit))
#define MDIO_READ(dev, addr, reg) \
MDIO_READREG(device_get_parent(dev), (addr), (reg))
#define MDIO_WRITE(dev, addr, reg, val) \
MDIO_WRITEREG(device_get_parent(dev), (addr), (reg), (val))
typedef struct e6000sw_softc {
device_t dev;
#ifdef FDT
phandle_t node;
#endif
struct sx sx;
if_t ifp[E6000SW_MAX_PORTS];
char *ifname[E6000SW_MAX_PORTS];
device_t miibus[E6000SW_MAX_PORTS];
struct taskqueue *sc_tq;
struct timeout_task sc_tt;
int vlans[E6000SW_NUM_VLANS];
uint32_t swid;
uint32_t vlan_mode;
uint32_t cpuports_mask;
uint32_t fixed_mask;
uint32_t fixed25_mask;
uint32_t ports_mask;
int phy_base;
int sw_addr;
int num_ports;
} e6000sw_softc_t;
static etherswitch_info_t etherswitch_info = {
.es_nports = 0,
.es_nvlangroups = 0,
.es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q,
.es_name = "Marvell 6000 series switch"
};
static void e6000sw_identify(driver_t *, device_t);
static int e6000sw_probe(device_t);
#ifdef FDT
static int e6000sw_parse_fixed_link(e6000sw_softc_t *, phandle_t, uint32_t);
static int e6000sw_parse_ethernet(e6000sw_softc_t *, phandle_t, uint32_t);
#endif
static int e6000sw_attach(device_t);
static int e6000sw_detach(device_t);
static int e6000sw_read_xmdio(device_t, int, int, int);
static int e6000sw_write_xmdio(device_t, int, int, int, int);
static int e6000sw_readphy(device_t, int, int);
static int e6000sw_writephy(device_t, int, int, int);
static int e6000sw_readphy_locked(device_t, int, int);
static int e6000sw_writephy_locked(device_t, int, int, int);
static etherswitch_info_t* e6000sw_getinfo(device_t);
static int e6000sw_getconf(device_t, etherswitch_conf_t *);
static int e6000sw_setconf(device_t, etherswitch_conf_t *);
static void e6000sw_lock(device_t);
static void e6000sw_unlock(device_t);
static int e6000sw_getport(device_t, etherswitch_port_t *);
static int e6000sw_setport(device_t, etherswitch_port_t *);
static int e6000sw_set_vlan_mode(e6000sw_softc_t *, uint32_t);
static int e6000sw_readreg_wrapper(device_t, int);
static int e6000sw_writereg_wrapper(device_t, int, int);
static int e6000sw_getvgroup_wrapper(device_t, etherswitch_vlangroup_t *);
static int e6000sw_setvgroup_wrapper(device_t, etherswitch_vlangroup_t *);
static int e6000sw_setvgroup(device_t, etherswitch_vlangroup_t *);
static int e6000sw_getvgroup(device_t, etherswitch_vlangroup_t *);
static void e6000sw_setup(device_t, e6000sw_softc_t *);
static void e6000sw_tick(void *, int);
static void e6000sw_set_atustat(device_t, e6000sw_softc_t *, int, int);
static int e6000sw_atu_flush(device_t, e6000sw_softc_t *, int);
static int e6000sw_vtu_flush(e6000sw_softc_t *);
static int e6000sw_vtu_update(e6000sw_softc_t *, int, int, int, int, int);
static __inline void e6000sw_writereg(e6000sw_softc_t *, int, int, int);
static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *, int, int);
static int e6000sw_ifmedia_upd(if_t);
static void e6000sw_ifmedia_sts(if_t, struct ifmediareq *);
static int e6000sw_atu_mac_table(device_t, e6000sw_softc_t *, struct atu_opt *,
int);
static int e6000sw_get_pvid(e6000sw_softc_t *, int, int *);
static void e6000sw_set_pvid(e6000sw_softc_t *, int, int);
static __inline bool e6000sw_is_cpuport(e6000sw_softc_t *, int);
static __inline bool e6000sw_is_fixedport(e6000sw_softc_t *, int);
static __inline bool e6000sw_is_fixed25port(e6000sw_softc_t *, int);
static __inline bool e6000sw_is_phyport(e6000sw_softc_t *, int);
static __inline bool e6000sw_is_portenabled(e6000sw_softc_t *, int);
static __inline struct mii_data *e6000sw_miiforphy(e6000sw_softc_t *,
unsigned int);
static device_method_t e6000sw_methods[] = {
/* device interface */
DEVMETHOD(device_identify, e6000sw_identify),
DEVMETHOD(device_probe, e6000sw_probe),
DEVMETHOD(device_attach, e6000sw_attach),
DEVMETHOD(device_detach, e6000sw_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* mii interface */
DEVMETHOD(miibus_readreg, e6000sw_readphy),
DEVMETHOD(miibus_writereg, e6000sw_writephy),
/* etherswitch interface */
DEVMETHOD(etherswitch_getinfo, e6000sw_getinfo),
DEVMETHOD(etherswitch_getconf, e6000sw_getconf),
DEVMETHOD(etherswitch_setconf, e6000sw_setconf),
DEVMETHOD(etherswitch_lock, e6000sw_lock),
DEVMETHOD(etherswitch_unlock, e6000sw_unlock),
DEVMETHOD(etherswitch_getport, e6000sw_getport),
DEVMETHOD(etherswitch_setport, e6000sw_setport),
DEVMETHOD(etherswitch_readreg, e6000sw_readreg_wrapper),
DEVMETHOD(etherswitch_writereg, e6000sw_writereg_wrapper),
DEVMETHOD(etherswitch_readphyreg, e6000sw_readphy),
DEVMETHOD(etherswitch_writephyreg, e6000sw_writephy),
DEVMETHOD(etherswitch_setvgroup, e6000sw_setvgroup_wrapper),
DEVMETHOD(etherswitch_getvgroup, e6000sw_getvgroup_wrapper),
DEVMETHOD_END
};
DEFINE_CLASS_0(e6000sw, e6000sw_driver, e6000sw_methods,
sizeof(e6000sw_softc_t));
DRIVER_MODULE(e6000sw, mdio, e6000sw_driver, 0, 0);
DRIVER_MODULE(etherswitch, e6000sw, etherswitch_driver, 0, 0);
DRIVER_MODULE(miibus, e6000sw, miibus_driver, 0, 0);
MODULE_DEPEND(e6000sw, mdio, 1, 1, 1);
static void
e6000sw_identify(driver_t *driver, device_t parent)
{
if (device_find_child(parent, "e6000sw", -1) == NULL)
BUS_ADD_CHILD(parent, 0, "e6000sw", -1);
}
static int
e6000sw_probe(device_t dev)
{
e6000sw_softc_t *sc;
const char *description;
#ifdef FDT
phandle_t switch_node;
#else
int is_6190;
#endif
sc = device_get_softc(dev);
sc->dev = dev;
#ifdef FDT
switch_node = ofw_bus_find_compatible(OF_finddevice("/"),
"marvell,mv88e6085");
if (switch_node == 0) {
switch_node = ofw_bus_find_compatible(OF_finddevice("/"),
"marvell,mv88e6190");
if (switch_node == 0)
return (ENXIO);
/*
* Trust DTS and fix the port register offset for the MV88E6190
* detection bellow.
*/
sc->swid = MV88E6190;
}
if (bootverbose)
device_printf(dev, "Found switch_node: 0x%x\n", switch_node);
sc->node = switch_node;
if (OF_getencprop(sc->node, "reg", &sc->sw_addr,
sizeof(sc->sw_addr)) < 0)
return (ENXIO);
#else
if (resource_int_value(device_get_name(sc->dev),
device_get_unit(sc->dev), "addr", &sc->sw_addr) != 0)
return (ENXIO);
if (resource_int_value(device_get_name(sc->dev),
device_get_unit(sc->dev), "is6190", &is_6190) != 0)
/*
* Check "is8190" to keep backward compatibility with
* older setups.
*/
resource_int_value(device_get_name(sc->dev),
device_get_unit(sc->dev), "is8190", &is_6190);
if (is_6190 != 0)
sc->swid = MV88E6190;
#endif
if (sc->sw_addr < 0 || sc->sw_addr > 32)
return (ENXIO);
/*
* Create temporary lock, just to satisfy assertions,
* when obtaining the switch ID. Destroy immediately afterwards.
*/
sx_init(&sc->sx, "e6000sw_tmp");
E6000SW_LOCK(sc);
sc->swid = e6000sw_readreg(sc, REG_PORT(sc, 0), SWITCH_ID) & 0xfff0;
E6000SW_UNLOCK(sc);
sx_destroy(&sc->sx);
switch (sc->swid) {
case MV88E6141:
description = "Marvell 88E6141";
sc->phy_base = 0x10;
sc->num_ports = 6;
break;
case MV88E6341:
description = "Marvell 88E6341";
sc->phy_base = 0x10;
sc->num_ports = 6;
break;
case MV88E6352:
description = "Marvell 88E6352";
sc->num_ports = 7;
break;
case MV88E6172:
description = "Marvell 88E6172";
sc->num_ports = 7;
break;
case MV88E6176:
description = "Marvell 88E6176";
sc->num_ports = 7;
break;
case MV88E6190:
description = "Marvell 88E6190";
sc->num_ports = 11;
break;
default:
device_printf(dev, "Unrecognized device, id 0x%x.\n", sc->swid);
return (ENXIO);
}
device_set_desc(dev, description);
return (BUS_PROBE_DEFAULT);
}
#ifdef FDT
static int
e6000sw_parse_fixed_link(e6000sw_softc_t *sc, phandle_t node, uint32_t port)
{
int speed;
phandle_t fixed_link;
fixed_link = ofw_bus_find_child(node, "fixed-link");
if (fixed_link != 0) {
sc->fixed_mask |= (1 << port);
if (OF_getencprop(fixed_link,
"speed", &speed, sizeof(speed)) < 0) {
device_printf(sc->dev,
"Port %d has a fixed-link node without a speed "
"property\n", port);
return (ENXIO);
}
if (speed == 2500 && (MVSWITCH(sc, MV88E6141) ||
MVSWITCH(sc, MV88E6341) || MVSWITCH(sc, MV88E6190)))
sc->fixed25_mask |= (1 << port);
}
return (0);
}
static int
e6000sw_parse_ethernet(e6000sw_softc_t *sc, phandle_t port_handle, uint32_t port) {
phandle_t switch_eth, switch_eth_handle;
if (OF_getencprop(port_handle, "ethernet", (void*)&switch_eth_handle,
sizeof(switch_eth_handle)) > 0) {
if (switch_eth_handle > 0) {
switch_eth = OF_node_from_xref(switch_eth_handle);
device_printf(sc->dev, "CPU port at %d\n", port);
sc->cpuports_mask |= (1 << port);
return (e6000sw_parse_fixed_link(sc, switch_eth, port));
} else
device_printf(sc->dev,
"Port %d has ethernet property but it points "
"to an invalid location\n", port);
}
return (0);
}
static int
e6000sw_parse_child_fdt(e6000sw_softc_t *sc, phandle_t child, int *pport)
{
uint32_t port;
if (pport == NULL)
return (ENXIO);
if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0)
return (ENXIO);
if (port >= sc->num_ports)
return (ENXIO);
*pport = port;
if (e6000sw_parse_fixed_link(sc, child, port) != 0)
return (ENXIO);
if (e6000sw_parse_ethernet(sc, child, port) != 0)
return (ENXIO);
if ((sc->fixed_mask & (1 << port)) != 0)
device_printf(sc->dev, "fixed port at %d\n", port);
else
device_printf(sc->dev, "PHY at port %d\n", port);
return (0);
}
#else
static int
e6000sw_check_hint_val(device_t dev, int *val, char *fmt, ...)
{
char *resname;
int err, len;
va_list ap;
len = min(strlen(fmt) * 2, 128);
if (len == 0)
return (-1);
resname = malloc(len, M_E6000SW, M_WAITOK);
memset(resname, 0, len);
va_start(ap, fmt);
vsnprintf(resname, len - 1, fmt, ap);
va_end(ap);
err = resource_int_value(device_get_name(dev), device_get_unit(dev),
resname, val);
free(resname, M_E6000SW);
return (err);
}
static int
e6000sw_parse_hinted_port(e6000sw_softc_t *sc, int port)
{
int err, val;
err = e6000sw_check_hint_val(sc->dev, &val, "port%ddisabled", port);
if (err == 0 && val != 0)
return (1);
err = e6000sw_check_hint_val(sc->dev, &val, "port%dcpu", port);
if (err == 0 && val != 0) {
sc->cpuports_mask |= (1 << port);
sc->fixed_mask |= (1 << port);
if (bootverbose)
device_printf(sc->dev, "CPU port at %d\n", port);
}
err = e6000sw_check_hint_val(sc->dev, &val, "port%dspeed", port);
if (err == 0 && val != 0) {
sc->fixed_mask |= (1 << port);
if (val == 2500)
sc->fixed25_mask |= (1 << port);
}
if (bootverbose) {
if ((sc->fixed_mask & (1 << port)) != 0)
device_printf(sc->dev, "fixed port at %d\n", port);
else
device_printf(sc->dev, "PHY at port %d\n", port);
}
return (0);
}
#endif
static int
e6000sw_init_interface(e6000sw_softc_t *sc, int port)
{
char name[IFNAMSIZ];
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev));
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL)
- return (ENOMEM);
if_setsoftc(sc->ifp[port], sc);
if_setflagbits(sc->ifp[port], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
sc->ifname[port] = malloc(strlen(name) + 1, M_E6000SW, M_NOWAIT);
if (sc->ifname[port] == NULL) {
if_free(sc->ifp[port]);
return (ENOMEM);
}
memcpy(sc->ifname[port], name, strlen(name) + 1);
if_initname(sc->ifp[port], sc->ifname[port], port);
return (0);
}
static int
e6000sw_attach_miibus(e6000sw_softc_t *sc, int port)
{
int err;
err = mii_attach(sc->dev, &sc->miibus[port], sc->ifp[port],
e6000sw_ifmedia_upd, e6000sw_ifmedia_sts, BMSR_DEFCAPMASK,
port + sc->phy_base, MII_OFFSET_ANY, 0);
if (err != 0)
return (err);
return (0);
}
static void
e6000sw_serdes_power(device_t dev, int port, bool sgmii)
{
uint32_t reg;
/* SGMII */
reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV,
E6000SW_SERDES_SGMII_CTL);
if (sgmii)
reg &= ~E6000SW_SERDES_PDOWN;
else
reg |= E6000SW_SERDES_PDOWN;
e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV,
E6000SW_SERDES_SGMII_CTL, reg);
/* 10GBASE-R/10GBASE-X4/X2 */
reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV,
E6000SW_SERDES_PCS_CTL1);
if (sgmii)
reg |= E6000SW_SERDES_PDOWN;
else
reg &= ~E6000SW_SERDES_PDOWN;
e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV,
E6000SW_SERDES_PCS_CTL1, reg);
}
static int
e6000sw_attach(device_t dev)
{
bool sgmii;
e6000sw_softc_t *sc;
#ifdef FDT
phandle_t child, ports;
#endif
int err, port;
uint32_t reg;
err = 0;
sc = device_get_softc(dev);
/*
* According to the Linux source code, all of the Switch IDs we support
* are multi_chip capable, and should go into multi-chip mode if the
* sw_addr != 0.
*/
if (MVSWITCH_MULTICHIP(sc))
device_printf(dev, "multi-chip addressing mode (%#x)\n",
sc->sw_addr);
else
device_printf(dev, "single-chip addressing mode\n");
sx_init(&sc->sx, "e6000sw");
E6000SW_LOCK(sc);
e6000sw_setup(dev, sc);
sc->sc_tq = taskqueue_create("e6000sw_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->sc_tq);
TIMEOUT_TASK_INIT(sc->sc_tq, &sc->sc_tt, 0, e6000sw_tick, sc);
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(dev));
#ifdef FDT
ports = ofw_bus_find_child(sc->node, "ports");
if (ports == 0) {
device_printf(dev, "failed to parse DTS: no ports found for "
"switch\n");
E6000SW_UNLOCK(sc);
return (ENXIO);
}
for (child = OF_child(ports); child != 0; child = OF_peer(child)) {
err = e6000sw_parse_child_fdt(sc, child, &port);
if (err != 0) {
device_printf(sc->dev, "failed to parse DTS\n");
goto out_fail;
}
#else
for (port = 0; port < sc->num_ports; port++) {
err = e6000sw_parse_hinted_port(sc, port);
if (err != 0)
continue;
#endif
/* Port is in use. */
sc->ports_mask |= (1 << port);
err = e6000sw_init_interface(sc, port);
if (err != 0) {
device_printf(sc->dev, "failed to init interface\n");
goto out_fail;
}
if (e6000sw_is_fixedport(sc, port)) {
/* Link must be down to change speed force value. */
reg = e6000sw_readreg(sc, REG_PORT(sc, port),
PSC_CONTROL);
reg &= ~PSC_CONTROL_LINK_UP;
reg |= PSC_CONTROL_FORCED_LINK;
e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL,
reg);
/*
* Force speed, full-duplex, EEE off and flow-control
* on.
*/
reg &= ~(PSC_CONTROL_SPD2500 | PSC_CONTROL_ALT_SPD |
PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON |
PSC_CONTROL_FORCED_EEE);
if (e6000sw_is_fixed25port(sc, port))
reg |= PSC_CONTROL_SPD2500;
else
reg |= PSC_CONTROL_SPD1000;
if (MVSWITCH(sc, MV88E6190) &&
e6000sw_is_fixed25port(sc, port))
reg |= PSC_CONTROL_ALT_SPD;
reg |= PSC_CONTROL_FORCED_DPX | PSC_CONTROL_FULLDPX |
PSC_CONTROL_FORCED_LINK | PSC_CONTROL_LINK_UP |
PSC_CONTROL_FORCED_SPD;
if (!MVSWITCH(sc, MV88E6190))
reg |= PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON;
if (MVSWITCH(sc, MV88E6141) ||
MVSWITCH(sc, MV88E6341) ||
MVSWITCH(sc, MV88E6190))
reg |= PSC_CONTROL_FORCED_EEE;
e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL,
reg);
/* Power on the SERDES interfaces. */
if (MVSWITCH(sc, MV88E6190) &&
(port == 9 || port == 10)) {
if (e6000sw_is_fixed25port(sc, port))
sgmii = false;
else
sgmii = true;
e6000sw_serdes_power(sc->dev, port, sgmii);
}
}
/* Don't attach miibus at CPU/fixed ports */
if (!e6000sw_is_phyport(sc, port))
continue;
err = e6000sw_attach_miibus(sc, port);
if (err != 0) {
device_printf(sc->dev, "failed to attach miibus\n");
goto out_fail;
}
}
etherswitch_info.es_nports = sc->num_ports;
/* Default to port vlan. */
e6000sw_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT);
reg = e6000sw_readreg(sc, REG_GLOBAL, SWITCH_GLOBAL_STATUS);
if (reg & SWITCH_GLOBAL_STATUS_IR)
device_printf(dev, "switch is ready.\n");
E6000SW_UNLOCK(sc);
bus_generic_probe(dev);
bus_generic_attach(dev);
taskqueue_enqueue_timeout(sc->sc_tq, &sc->sc_tt, hz);
return (0);
out_fail:
e6000sw_detach(dev);
return (err);
}
static int
e6000sw_waitready(e6000sw_softc_t *sc, uint32_t phy, uint32_t reg,
uint32_t busybit)
{
int i;
for (i = 0; i < E6000SW_RETRIES; i++) {
if ((e6000sw_readreg(sc, phy, reg) & busybit) == 0)
return (0);
DELAY(1);
}
return (1);
}
/* XMDIO/Clause 45 access. */
static int
e6000sw_read_xmdio(device_t dev, int phy, int devaddr, int devreg)
{
e6000sw_softc_t *sc;
uint32_t reg;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
reg = devaddr & SMI_CMD_REG_ADDR_MASK;
reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK;
/* Load C45 register address. */
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg);
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
reg | SMI_CMD_OP_C45_ADDR);
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
/* Start C45 read operation. */
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
reg | SMI_CMD_OP_C45_READ);
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
/* Read C45 data. */
reg = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG);
return (reg & PHY_DATA_MASK);
}
static int
e6000sw_write_xmdio(device_t dev, int phy, int devaddr, int devreg, int val)
{
e6000sw_softc_t *sc;
uint32_t reg;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
reg = devaddr & SMI_CMD_REG_ADDR_MASK;
reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK;
/* Load C45 register address. */
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg);
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
reg | SMI_CMD_OP_C45_ADDR);
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
/* Load data and start the C45 write operation. */
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg);
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
reg | SMI_CMD_OP_C45_WRITE);
return (0);
}
static int
e6000sw_readphy(device_t dev, int phy, int reg)
{
e6000sw_softc_t *sc;
int locked, ret;
sc = device_get_softc(dev);
locked = E6000SW_LOCKED(sc);
if (!locked)
E6000SW_LOCK(sc);
ret = e6000sw_readphy_locked(dev, phy, reg);
if (!locked)
E6000SW_UNLOCK(sc);
return (ret);
}
/*
* PHY registers are paged. Put page index in reg 22 (accessible from every
* page), then access specific register.
*/
static int
e6000sw_readphy_locked(device_t dev, int phy, int reg)
{
e6000sw_softc_t *sc;
uint32_t val;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) |
((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
val = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG);
return (val & PHY_DATA_MASK);
}
static int
e6000sw_writephy(device_t dev, int phy, int reg, int data)
{
e6000sw_softc_t *sc;
int locked, ret;
sc = device_get_softc(dev);
locked = E6000SW_LOCKED(sc);
if (!locked)
E6000SW_LOCK(sc);
ret = e6000sw_writephy_locked(dev, phy, reg, data);
if (!locked)
E6000SW_UNLOCK(sc);
return (ret);
}
static int
e6000sw_writephy_locked(device_t dev, int phy, int reg, int data)
{
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
device_printf(dev, "Timeout while waiting for switch\n");
return (ETIMEDOUT);
}
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG,
data & PHY_DATA_MASK);
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) |
((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
return (0);
}
static int
e6000sw_detach(device_t dev)
{
int phy;
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
if (device_is_attached(dev))
taskqueue_drain_timeout(sc->sc_tq, &sc->sc_tt);
if (sc->sc_tq != NULL)
taskqueue_free(sc->sc_tq);
device_delete_children(dev);
sx_destroy(&sc->sx);
for (phy = 0; phy < sc->num_ports; phy++) {
if (sc->ifp[phy] != NULL)
if_free(sc->ifp[phy]);
if (sc->ifname[phy] != NULL)
free(sc->ifname[phy], M_E6000SW);
}
return (0);
}
static etherswitch_info_t*
e6000sw_getinfo(device_t dev)
{
return (&etherswitch_info);
}
static int
e6000sw_getconf(device_t dev, etherswitch_conf_t *conf)
{
struct e6000sw_softc *sc;
/* Return the VLAN mode. */
sc = device_get_softc(dev);
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->vlan_mode;
return (0);
}
static int
e6000sw_setconf(device_t dev, etherswitch_conf_t *conf)
{
struct e6000sw_softc *sc;
/* Set the VLAN mode. */
sc = device_get_softc(dev);
if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) {
E6000SW_LOCK(sc);
e6000sw_set_vlan_mode(sc, conf->vlan_mode);
E6000SW_UNLOCK(sc);
}
return (0);
}
static void
e6000sw_lock(device_t dev)
{
struct e6000sw_softc *sc;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
E6000SW_LOCK(sc);
}
static void
e6000sw_unlock(device_t dev)
{
struct e6000sw_softc *sc;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
E6000SW_UNLOCK(sc);
}
static int
e6000sw_getport(device_t dev, etherswitch_port_t *p)
{
struct mii_data *mii;
int err;
struct ifmediareq *ifmr;
uint32_t reg;
e6000sw_softc_t *sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
if (p->es_port >= sc->num_ports || p->es_port < 0)
return (EINVAL);
if (!e6000sw_is_portenabled(sc, p->es_port))
return (0);
E6000SW_LOCK(sc);
e6000sw_get_pvid(sc, p->es_port, &p->es_pvid);
/* Port flags. */
reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2);
if (reg & PORT_CONTROL2_DISC_TAGGED)
p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED;
if (reg & PORT_CONTROL2_DISC_UNTAGGED)
p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
err = 0;
if (e6000sw_is_fixedport(sc, p->es_port)) {
if (e6000sw_is_cpuport(sc, p->es_port))
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr = &p->es_ifmr;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
ifmr->ifm_count = 0;
if (e6000sw_is_fixed25port(sc, p->es_port))
ifmr->ifm_active = IFM_2500_T;
else
ifmr->ifm_active = IFM_1000_T;
ifmr->ifm_active |= IFM_ETHER | IFM_FDX;
ifmr->ifm_current = ifmr->ifm_active;
ifmr->ifm_mask = 0;
} else {
mii = e6000sw_miiforphy(sc, p->es_port);
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
}
E6000SW_UNLOCK(sc);
return (err);
}
static int
e6000sw_setport(device_t dev, etherswitch_port_t *p)
{
e6000sw_softc_t *sc;
int err;
struct mii_data *mii;
uint32_t reg;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
if (p->es_port >= sc->num_ports || p->es_port < 0)
return (EINVAL);
if (!e6000sw_is_portenabled(sc, p->es_port))
return (0);
E6000SW_LOCK(sc);
/* Port flags. */
reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2);
if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED)
reg |= PORT_CONTROL2_DISC_TAGGED;
else
reg &= ~PORT_CONTROL2_DISC_TAGGED;
if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
reg |= PORT_CONTROL2_DISC_UNTAGGED;
else
reg &= ~PORT_CONTROL2_DISC_UNTAGGED;
e6000sw_writereg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2, reg);
err = 0;
if (p->es_pvid != 0)
e6000sw_set_pvid(sc, p->es_port, p->es_pvid);
if (e6000sw_is_phyport(sc, p->es_port)) {
mii = e6000sw_miiforphy(sc, p->es_port);
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media,
SIOCSIFMEDIA);
}
E6000SW_UNLOCK(sc);
return (err);
}
static __inline void
e6000sw_port_vlan_assign(e6000sw_softc_t *sc, int port, uint32_t fid,
uint32_t members)
{
uint32_t reg;
reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP);
reg &= ~(PORT_MASK(sc) | PORT_VLAN_MAP_FID_MASK);
reg |= members & PORT_MASK(sc) & ~(1 << port);
reg |= (fid << PORT_VLAN_MAP_FID) & PORT_VLAN_MAP_FID_MASK;
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VLAN_MAP, reg);
reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1);
reg &= ~PORT_CONTROL1_FID_MASK;
reg |= (fid >> 4) & PORT_CONTROL1_FID_MASK;
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL1, reg);
}
static int
e6000sw_init_vlan(struct e6000sw_softc *sc)
{
int i, port, ret;
uint32_t members;
/* Disable all ports */
for (port = 0; port < sc->num_ports; port++) {
ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL);
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL,
(ret & ~PORT_CONTROL_ENABLE));
}
/* Flush VTU. */
e6000sw_vtu_flush(sc);
for (port = 0; port < sc->num_ports; port++) {
/* Reset the egress and frame mode. */
ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL);
ret &= ~(PORT_CONTROL_EGRESS | PORT_CONTROL_FRAME);
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, ret);
/* Set the 802.1q mode. */
ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL2);
ret &= ~PORT_CONTROL2_DOT1Q;
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
ret |= PORT_CONTROL2_DOT1Q;
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL2, ret);
}
for (port = 0; port < sc->num_ports; port++) {
if (!e6000sw_is_portenabled(sc, port))
continue;
ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID);
/* Set port priority */
ret &= ~PORT_VID_PRIORITY_MASK;
/* Set VID map */
ret &= ~PORT_VID_DEF_VID_MASK;
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
ret |= 1;
else
ret |= (port + 1);
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, ret);
}
/* Assign the member ports to each origin port. */
for (port = 0; port < sc->num_ports; port++) {
members = 0;
if (e6000sw_is_portenabled(sc, port)) {
for (i = 0; i < sc->num_ports; i++) {
if (i == port || !e6000sw_is_portenabled(sc, i))
continue;
members |= (1 << i);
}
}
/* Default to FID 0. */
e6000sw_port_vlan_assign(sc, port, 0, members);
}
/* Reset internal VLAN table. */
for (i = 0; i < nitems(sc->vlans); i++)
sc->vlans[i] = 0;
/* Create default VLAN (1). */
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
sc->vlans[0] = 1;
e6000sw_vtu_update(sc, 0, sc->vlans[0], 1, 0, sc->ports_mask);
}
/* Enable all ports */
for (port = 0; port < sc->num_ports; port++) {
if (!e6000sw_is_portenabled(sc, port))
continue;
ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL);
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL,
(ret | PORT_CONTROL_ENABLE));
}
return (0);
}
static int
e6000sw_set_vlan_mode(struct e6000sw_softc *sc, uint32_t mode)
{
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
switch (mode) {
case ETHERSWITCH_VLAN_PORT:
sc->vlan_mode = ETHERSWITCH_VLAN_PORT;
etherswitch_info.es_nvlangroups = sc->num_ports;
return (e6000sw_init_vlan(sc));
break;
case ETHERSWITCH_VLAN_DOT1Q:
sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
etherswitch_info.es_nvlangroups = E6000SW_NUM_VLANS;
return (e6000sw_init_vlan(sc));
break;
default:
return (EINVAL);
}
}
/*
* Registers in this switch are divided into sections, specified in
* documentation. So as to access any of them, section index and reg index
* is necessary. etherswitchcfg uses only one variable, so indexes were
* compressed into addr_reg: 32 * section_index + reg_index.
*/
static int
e6000sw_readreg_wrapper(device_t dev, int addr_reg)
{
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) ||
(addr_reg < (REG_PORT(sc, 0) * 32))) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
return (e6000sw_readreg(device_get_softc(dev), addr_reg / 32,
addr_reg % 32));
}
static int
e6000sw_writereg_wrapper(device_t dev, int addr_reg, int val)
{
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) ||
(addr_reg < (REG_PORT(sc, 0) * 32))) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
e6000sw_writereg(device_get_softc(dev), addr_reg / 32,
addr_reg % 32, val);
return (0);
}
/*
* setvgroup/getvgroup called from etherswitchfcg need to be locked,
* while internal calls do not.
*/
static int
e6000sw_setvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg)
{
e6000sw_softc_t *sc;
int ret;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
E6000SW_LOCK(sc);
ret = e6000sw_setvgroup(dev, vg);
E6000SW_UNLOCK(sc);
return (ret);
}
static int
e6000sw_getvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg)
{
e6000sw_softc_t *sc;
int ret;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
E6000SW_LOCK(sc);
ret = e6000sw_getvgroup(dev, vg);
E6000SW_UNLOCK(sc);
return (ret);
}
static int
e6000sw_set_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
{
uint32_t port;
port = vg->es_vlangroup;
if (port > sc->num_ports)
return (EINVAL);
if (vg->es_member_ports != vg->es_untagged_ports) {
device_printf(sc->dev, "Tagged ports not supported.\n");
return (EINVAL);
}
e6000sw_port_vlan_assign(sc, port, 0, vg->es_untagged_ports);
vg->es_vid = port | ETHERSWITCH_VID_VALID;
return (0);
}
static int
e6000sw_set_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
{
int i, vlan;
vlan = vg->es_vid & ETHERSWITCH_VID_MASK;
/* Set VLAN to '0' removes it from table. */
if (vlan == 0) {
e6000sw_vtu_update(sc, VTU_PURGE,
sc->vlans[vg->es_vlangroup], 0, 0, 0);
sc->vlans[vg->es_vlangroup] = 0;
return (0);
}
/* Is this VLAN already in table ? */
for (i = 0; i < etherswitch_info.es_nvlangroups; i++)
if (i != vg->es_vlangroup && vlan == sc->vlans[i])
return (EINVAL);
sc->vlans[vg->es_vlangroup] = vlan;
e6000sw_vtu_update(sc, 0, vlan, vg->es_vlangroup + 1,
vg->es_member_ports & sc->ports_mask,
vg->es_untagged_ports & sc->ports_mask);
return (0);
}
static int
e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT)
return (e6000sw_set_port_vlan(sc, vg));
else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
return (e6000sw_set_dot1q_vlan(sc, vg));
return (EINVAL);
}
static int
e6000sw_get_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
{
uint32_t port, reg;
port = vg->es_vlangroup;
if (port > sc->num_ports)
return (EINVAL);
if (!e6000sw_is_portenabled(sc, port)) {
vg->es_vid = port;
return (0);
}
reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP);
vg->es_untagged_ports = vg->es_member_ports = reg & PORT_MASK(sc);
vg->es_vid = port | ETHERSWITCH_VID_VALID;
vg->es_fid = (reg & PORT_VLAN_MAP_FID_MASK) >> PORT_VLAN_MAP_FID;
reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1);
vg->es_fid |= (reg & PORT_CONTROL1_FID_MASK) << 4;
return (0);
}
static int
e6000sw_get_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
{
int i, port;
uint32_t reg;
vg->es_fid = 0;
vg->es_vid = sc->vlans[vg->es_vlangroup];
vg->es_untagged_ports = vg->es_member_ports = 0;
if (vg->es_vid == 0)
return (0);
if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
device_printf(sc->dev, "VTU unit is busy, cannot access\n");
return (EBUSY);
}
e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, vg->es_vid - 1);
reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_OPERATION);
reg &= ~VTU_OP_MASK;
reg |= VTU_GET_NEXT | VTU_BUSY;
e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, reg);
if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
device_printf(sc->dev, "Timeout while reading\n");
return (EBUSY);
}
reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_VID);
if (reg == VTU_VID_MASK || (reg & VTU_VID_VALID) == 0)
return (EINVAL);
if ((reg & VTU_VID_MASK) != vg->es_vid)
return (EINVAL);
vg->es_vid |= ETHERSWITCH_VID_VALID;
reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA);
for (i = 0; i < sc->num_ports; i++) {
if (i == VTU_PPREG(sc))
reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA2);
port = (reg >> VTU_PORT(sc, i)) & VTU_PORT_MASK;
if (port == VTU_PORT_UNTAGGED) {
vg->es_untagged_ports |= (1 << i);
vg->es_member_ports |= (1 << i);
} else if (port == VTU_PORT_TAGGED)
vg->es_member_ports |= (1 << i);
}
return (0);
}
static int
e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT)
return (e6000sw_get_port_vlan(sc, vg));
else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
return (e6000sw_get_dot1q_vlan(sc, vg));
return (EINVAL);
}
static __inline struct mii_data*
e6000sw_miiforphy(e6000sw_softc_t *sc, unsigned int phy)
{
if (!e6000sw_is_phyport(sc, phy))
return (NULL);
return (device_get_softc(sc->miibus[phy]));
}
static int
e6000sw_ifmedia_upd(if_t ifp)
{
e6000sw_softc_t *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = e6000sw_miiforphy(sc, if_getdunit(ifp));
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
e6000sw_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
e6000sw_softc_t *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = e6000sw_miiforphy(sc, if_getdunit(ifp));
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
e6000sw_smi_waitready(e6000sw_softc_t *sc, int phy)
{
int i;
for (i = 0; i < E6000SW_SMI_TIMEOUT; i++) {
if ((MDIO_READ(sc->dev, phy, SMI_CMD) & SMI_CMD_BUSY) == 0)
return (0);
DELAY(1);
}
return (1);
}
static __inline uint32_t
e6000sw_readreg(e6000sw_softc_t *sc, int addr, int reg)
{
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (!MVSWITCH_MULTICHIP(sc))
return (MDIO_READ(sc->dev, addr, reg) & 0xffff);
if (e6000sw_smi_waitready(sc, sc->sw_addr)) {
printf("e6000sw: readreg timeout\n");
return (0xffff);
}
MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD,
SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) |
((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
if (e6000sw_smi_waitready(sc, sc->sw_addr)) {
printf("e6000sw: readreg timeout\n");
return (0xffff);
}
return (MDIO_READ(sc->dev, sc->sw_addr, SMI_DATA) & 0xffff);
}
static __inline void
e6000sw_writereg(e6000sw_softc_t *sc, int addr, int reg, int val)
{
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
if (!MVSWITCH_MULTICHIP(sc)) {
MDIO_WRITE(sc->dev, addr, reg, val);
return;
}
if (e6000sw_smi_waitready(sc, sc->sw_addr)) {
printf("e6000sw: readreg timeout\n");
return;
}
MDIO_WRITE(sc->dev, sc->sw_addr, SMI_DATA, val);
MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD,
SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) |
((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
}
static __inline bool
e6000sw_is_cpuport(e6000sw_softc_t *sc, int port)
{
return ((sc->cpuports_mask & (1 << port)) ? true : false);
}
static __inline bool
e6000sw_is_fixedport(e6000sw_softc_t *sc, int port)
{
return ((sc->fixed_mask & (1 << port)) ? true : false);
}
static __inline bool
e6000sw_is_fixed25port(e6000sw_softc_t *sc, int port)
{
return ((sc->fixed25_mask & (1 << port)) ? true : false);
}
static __inline bool
e6000sw_is_phyport(e6000sw_softc_t *sc, int port)
{
uint32_t phy_mask;
phy_mask = ~(sc->fixed_mask | sc->cpuports_mask);
return ((phy_mask & (1 << port)) ? true : false);
}
static __inline bool
e6000sw_is_portenabled(e6000sw_softc_t *sc, int port)
{
return ((sc->ports_mask & (1 << port)) ? true : false);
}
static __inline void
e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid)
{
uint32_t reg;
reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID);
reg &= ~PORT_VID_DEF_VID_MASK;
reg |= (pvid & PORT_VID_DEF_VID_MASK);
e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, reg);
}
static __inline int
e6000sw_get_pvid(e6000sw_softc_t *sc, int port, int *pvid)
{
if (pvid == NULL)
return (ENXIO);
*pvid = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID) &
PORT_VID_DEF_VID_MASK;
return (0);
}
/*
* Convert port status to ifmedia.
*/
static void
e6000sw_update_ifmedia(uint16_t portstatus, u_int *media_status, u_int *media_active)
{
*media_active = IFM_ETHER;
*media_status = IFM_AVALID;
if ((portstatus & PORT_STATUS_LINK_MASK) != 0)
*media_status |= IFM_ACTIVE;
else {
*media_active |= IFM_NONE;
return;
}
switch (portstatus & PORT_STATUS_SPEED_MASK) {
case PORT_STATUS_SPEED_10:
*media_active |= IFM_10_T;
break;
case PORT_STATUS_SPEED_100:
*media_active |= IFM_100_TX;
break;
case PORT_STATUS_SPEED_1000:
*media_active |= IFM_1000_T;
break;
}
if ((portstatus & PORT_STATUS_DUPLEX_MASK) == 0)
*media_active |= IFM_FDX;
else
*media_active |= IFM_HDX;
}
static void
e6000sw_tick(void *arg, int p __unused)
{
e6000sw_softc_t *sc;
struct mii_data *mii;
struct mii_softc *miisc;
uint16_t portstatus;
int port;
sc = arg;
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
E6000SW_LOCK(sc);
for (port = 0; port < sc->num_ports; port++) {
/* Tick only on PHY ports */
if (!e6000sw_is_portenabled(sc, port) ||
!e6000sw_is_phyport(sc, port))
continue;
mii = e6000sw_miiforphy(sc, port);
if (mii == NULL)
continue;
portstatus = e6000sw_readreg(sc, REG_PORT(sc, port),
PORT_STATUS);
e6000sw_update_ifmedia(portstatus,
&mii->mii_media_status, &mii->mii_media_active);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media)
!= miisc->mii_inst)
continue;
mii_phy_update(miisc, MII_POLLSTAT);
}
}
E6000SW_UNLOCK(sc);
}
static void
e6000sw_setup(device_t dev, e6000sw_softc_t *sc)
{
uint32_t atu_ctrl;
/* Set aging time. */
atu_ctrl = e6000sw_readreg(sc, REG_GLOBAL, ATU_CONTROL);
atu_ctrl &= ~ATU_CONTROL_AGETIME_MASK;
atu_ctrl |= E6000SW_DEFAULT_AGETIME << ATU_CONTROL_AGETIME;
e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL, atu_ctrl);
/* Send all with specific mac address to cpu port */
e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_2x, MGMT_EN_ALL);
e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_0x, MGMT_EN_ALL);
/* Disable Remote Management */
e6000sw_writereg(sc, REG_GLOBAL, SWITCH_GLOBAL_CONTROL2, 0);
/* Disable loopback filter and flow control messages */
e6000sw_writereg(sc, REG_GLOBAL2, SWITCH_MGMT,
SWITCH_MGMT_PRI_MASK |
(1 << SWITCH_MGMT_RSVD2CPU) |
SWITCH_MGMT_FC_PRI_MASK |
(1 << SWITCH_MGMT_FORCEFLOW));
e6000sw_atu_flush(dev, sc, NO_OPERATION);
e6000sw_atu_mac_table(dev, sc, NULL, NO_OPERATION);
e6000sw_set_atustat(dev, sc, 0, COUNT_ALL);
}
static void
e6000sw_set_atustat(device_t dev, e6000sw_softc_t *sc, int bin, int flag)
{
e6000sw_readreg(sc, REG_GLOBAL2, ATU_STATS);
e6000sw_writereg(sc, REG_GLOBAL2, ATU_STATS, (bin << ATU_STATS_BIN ) |
(flag << ATU_STATS_FLAG));
}
static int
e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct atu_opt *atu,
int flag)
{
uint16_t ret_opt;
uint16_t ret_data;
if (flag == NO_OPERATION)
return (0);
else if ((flag & (LOAD_FROM_FIB | PURGE_FROM_FIB | GET_NEXT_IN_FIB |
GET_VIOLATION_DATA | CLEAR_VIOLATION_DATA)) == 0) {
device_printf(dev, "Wrong Opcode for ATU operation\n");
return (EINVAL);
}
if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) {
device_printf(dev, "ATU unit is busy, cannot access\n");
return (EBUSY);
}
ret_opt = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION);
if (flag & LOAD_FROM_FIB) {
ret_data = e6000sw_readreg(sc, REG_GLOBAL, ATU_DATA);
e6000sw_writereg(sc, REG_GLOBAL2, ATU_DATA, (ret_data &
~ENTRY_STATE));
}
e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR01, atu->mac_01);
e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR23, atu->mac_23);
e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR45, atu->mac_45);
e6000sw_writereg(sc, REG_GLOBAL, ATU_FID, atu->fid);
e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION,
(ret_opt | ATU_UNIT_BUSY | flag));
if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY))
device_printf(dev, "Timeout while waiting ATU\n");
else if (flag & GET_NEXT_IN_FIB) {
atu->mac_01 = e6000sw_readreg(sc, REG_GLOBAL,
ATU_MAC_ADDR01);
atu->mac_23 = e6000sw_readreg(sc, REG_GLOBAL,
ATU_MAC_ADDR23);
atu->mac_45 = e6000sw_readreg(sc, REG_GLOBAL,
ATU_MAC_ADDR45);
}
return (0);
}
static int
e6000sw_atu_flush(device_t dev, e6000sw_softc_t *sc, int flag)
{
uint32_t reg;
if (flag == NO_OPERATION)
return (0);
if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) {
device_printf(dev, "ATU unit is busy, cannot access\n");
return (EBUSY);
}
reg = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION);
e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION,
(reg | ATU_UNIT_BUSY | flag));
if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY))
device_printf(dev, "Timeout while flushing ATU\n");
return (0);
}
static int
e6000sw_vtu_flush(e6000sw_softc_t *sc)
{
if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
device_printf(sc->dev, "VTU unit is busy, cannot access\n");
return (EBUSY);
}
e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, VTU_FLUSH | VTU_BUSY);
if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
device_printf(sc->dev, "Timeout while flushing VTU\n");
return (ETIMEDOUT);
}
return (0);
}
static int
e6000sw_vtu_update(e6000sw_softc_t *sc, int purge, int vid, int fid,
int members, int untagged)
{
int i, op;
uint32_t data[2];
if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
device_printf(sc->dev, "VTU unit is busy, cannot access\n");
return (EBUSY);
}
*data = (vid & VTU_VID_MASK);
if (purge == 0)
*data |= VTU_VID_VALID;
e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, *data);
if (purge == 0) {
data[0] = 0;
data[1] = 0;
for (i = 0; i < sc->num_ports; i++) {
if ((untagged & (1 << i)) != 0)
data[i / VTU_PPREG(sc)] |=
VTU_PORT_UNTAGGED << VTU_PORT(sc, i);
else if ((members & (1 << i)) != 0)
data[i / VTU_PPREG(sc)] |=
VTU_PORT_TAGGED << VTU_PORT(sc, i);
else
data[i / VTU_PPREG(sc)] |=
VTU_PORT_DISCARD << VTU_PORT(sc, i);
}
e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA, data[0]);
e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA2, data[1]);
e6000sw_writereg(sc, REG_GLOBAL, VTU_FID,
fid & VTU_FID_MASK(sc));
op = VTU_LOAD;
} else
op = VTU_PURGE;
e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, op | VTU_BUSY);
if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
device_printf(sc->dev, "Timeout while flushing VTU\n");
return (ETIMEDOUT);
}
return (0);
}
diff --git a/sys/dev/etherswitch/e6000sw/e6060sw.c b/sys/dev/etherswitch/e6000sw/e6060sw.c
index 8bc482af24e8..3ff38abb69a0 100644
--- a/sys/dev/etherswitch/e6000sw/e6060sw.c
+++ b/sys/dev/etherswitch/e6000sw/e6060sw.c
@@ -1,1028 +1,1022 @@
/*-
* Copyright (c) 2016-2017 Hiroki Mori
* Copyright (c) 2013 Luiz Otavio O Souza.
* Copyright (c) 2011-2012 Stefan Bethke.
* Copyright (c) 2012 Adrian Chadd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This code is Marvell 88E6060 ethernet switch support code on etherswitch
* framework.
* 88E6060 support is only port vlan support. Not support ingress/egress
* trailer.
* 88E6065 support is port and dot1q vlan. Also group base tag support.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include "mdio_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
#define CORE_REGISTER 0x8
#define SWITCH_ID 3
#define PORT_CONTROL 4
#define ENGRESSFSHIFT 2
#define ENGRESSFMASK 3
#define ENGRESSTAGSHIFT 12
#define ENGRESSTAGMASK 3
#define PORT_VLAN_MAP 6
#define FORCEMAPSHIFT 8
#define FORCEMAPMASK 1
#define PORT_DEFVLAN 7
#define DEFVIDMASK 0xfff
#define DEFPRIMASK 7
#define PORT_CONTROL2 8
#define DOT1QMODESHIFT 10
#define DOT1QMODEMASK 3
#define DOT1QNONE 0
#define DOT1QFALLBACK 1
#define DOT1QCHECK 2
#define DOT1QSECURE 3
#define GLOBAL_REGISTER 0xf
#define VTU_OPERATION 5
#define VTU_VID_REG 6
#define VTU_DATA1_REG 7
#define VTU_DATA2_REG 8
#define VTU_DATA3_REG 9
#define VTU_BUSY 0x8000
#define VTU_FLASH 1
#define VTU_LOAD_PURGE 3
#define VTU_GET_NEXT 4
#define VTU_VIOLATION 7
MALLOC_DECLARE(M_E6060SW);
MALLOC_DEFINE(M_E6060SW, "e6060sw", "e6060sw data structures");
struct e6060sw_softc {
struct mtx sc_mtx; /* serialize access to softc */
device_t sc_dev;
int vlan_mode;
int media; /* cpu port media */
int cpuport; /* which PHY is connected to the CPU */
int phymask; /* PHYs we manage */
int numports; /* number of ports */
int ifpport[MII_NPHY];
int *portphy;
char **ifname;
device_t **miibus;
if_t *ifp;
struct callout callout_tick;
etherswitch_info_t info;
int smi_offset;
int sw_model;
};
/* Switch Identifier DeviceID */
#define E6060 0x60
#define E6063 0x63
#define E6065 0x65
#define E6060SW_LOCK(_sc) \
mtx_lock(&(_sc)->sc_mtx)
#define E6060SW_UNLOCK(_sc) \
mtx_unlock(&(_sc)->sc_mtx)
#define E6060SW_LOCK_ASSERT(_sc, _what) \
mtx_assert(&(_sc)->sc_mtx, (_what))
#define E6060SW_TRYLOCK(_sc) \
mtx_trylock(&(_sc)->sc_mtx)
#if defined(DEBUG)
#define DPRINTF(dev, args...) device_printf(dev, args)
#else
#define DPRINTF(dev, args...)
#endif
static inline int e6060sw_portforphy(struct e6060sw_softc *, int);
static void e6060sw_tick(void *);
static int e6060sw_ifmedia_upd(if_t);
static void e6060sw_ifmedia_sts(if_t, struct ifmediareq *);
static void e6060sw_setup(device_t dev);
static int e6060sw_read_vtu(device_t dev, int num, int *data1, int *data2);
static void e6060sw_set_vtu(device_t dev, int num, int data1, int data2);
static int
e6060sw_probe(device_t dev)
{
int data;
struct e6060sw_softc *sc;
int devid, i;
char *devname;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
devid = 0;
for (i = 0; i < 2; ++i) {
data = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + i * 0x10, SWITCH_ID);
if (bootverbose)
device_printf(dev,"Switch Identifier Register %x\n",
data);
devid = data >> 4;
if (devid == E6060 ||
devid == E6063 || devid == E6065) {
sc->sw_model = devid;
sc->smi_offset = i * 0x10;
break;
}
}
if (devid == E6060)
devname = "88E6060";
else if (devid == E6063)
devname = "88E6063";
else if (devid == E6065)
devname = "88E6065";
else
return (ENXIO);
device_set_descf(dev, "Marvell %s MDIO switch driver at 0x%02x",
devname, sc->smi_offset);
return (BUS_PROBE_DEFAULT);
}
static int
e6060sw_attach_phys(struct e6060sw_softc *sc)
{
int phy, port, err;
char name[IFNAMSIZ];
port = 0;
err = 0;
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < sc->numports; phy++) {
if (((1 << phy) & sc->phymask) == 0)
continue;
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
sc->ifp[port]->if_softc = sc;
sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX;
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_E6060SW,
M_WAITOK | M_ZERO);
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
e6060sw_ifmedia_upd, e6060sw_ifmedia_sts, \
BMSR_DEFCAPMASK, phy + sc->smi_offset, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
sc->ifp[port]->if_xname);
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
break;
}
++port;
}
sc->info.es_nports = port;
if (sc->cpuport != -1) {
/* assume cpuport is last one */
sc->ifpport[sc->cpuport] = port;
sc->portphy[port] = sc->cpuport;
++sc->info.es_nports;
}
return (err);
}
static int
e6060sw_attach(device_t dev)
{
struct e6060sw_softc *sc;
int err;
sc = device_get_softc(dev);
err = 0;
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "e6060sw", NULL, MTX_DEF);
strlcpy(sc->info.es_name, device_get_desc(dev),
sizeof(sc->info.es_name));
/* XXX Defaults */
if (sc->sw_model == E6063) {
sc->numports = 3;
sc->phymask = 0x07;
sc->cpuport = 2;
} else {
sc->numports = 6;
sc->phymask = 0x1f;
sc->cpuport = 5;
}
sc->media = 100;
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"numports", &sc->numports);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"phymask", &sc->phymask);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"cpuport", &sc->cpuport);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"media", &sc->media);
if (sc->sw_model == E6060) {
sc->info.es_nvlangroups = sc->numports;
sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT;
} else {
sc->info.es_nvlangroups = 64;
sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT |
ETHERSWITCH_VLAN_DOT1Q;
}
e6060sw_setup(dev);
sc->ifp = malloc(sizeof(if_t) * sc->numports, M_E6060SW,
M_WAITOK | M_ZERO);
sc->ifname = malloc(sizeof(char *) * sc->numports, M_E6060SW,
M_WAITOK | M_ZERO);
sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_E6060SW,
M_WAITOK | M_ZERO);
sc->portphy = malloc(sizeof(int) * sc->numports, M_E6060SW,
M_WAITOK | M_ZERO);
/*
* Attach the PHYs and complete the bus enumeration.
*/
err = e6060sw_attach_phys(sc);
if (err != 0)
return (err);
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
if (err != 0)
return (err);
callout_init(&sc->callout_tick, 0);
e6060sw_tick(sc);
return (err);
}
static int
e6060sw_detach(device_t dev)
{
struct e6060sw_softc *sc;
int i, port;
sc = device_get_softc(dev);
callout_drain(&sc->callout_tick);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = e6060sw_portforphy(sc, i);
if (sc->miibus[port] != NULL)
device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_E6060SW);
free(sc->miibus[port], M_E6060SW);
}
free(sc->portphy, M_E6060SW);
free(sc->miibus, M_E6060SW);
free(sc->ifname, M_E6060SW);
free(sc->ifp, M_E6060SW);
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
}
/*
* Convert PHY number to port number.
*/
static inline int
e6060sw_portforphy(struct e6060sw_softc *sc, int phy)
{
return (sc->ifpport[phy]);
}
static inline struct mii_data *
e6060sw_miiforport(struct e6060sw_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
if (port == sc->cpuport)
return (NULL);
return (device_get_softc(*sc->miibus[port]));
}
static inline if_t
e6060sw_ifpforport(struct e6060sw_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
return (sc->ifp[port]);
}
/*
* Poll the status for all PHYs.
*/
static void
e6060sw_miipollstat(struct e6060sw_softc *sc)
{
int i, port;
struct mii_data *mii;
struct mii_softc *miisc;
E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = e6060sw_portforphy(sc, i);
if ((*sc->miibus[port]) == NULL)
continue;
mii = device_get_softc(*sc->miibus[port]);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
ukphy_status(miisc);
mii_phy_update(miisc, MII_POLLSTAT);
}
}
}
static void
e6060sw_tick(void *arg)
{
struct e6060sw_softc *sc;
sc = arg;
e6060sw_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, e6060sw_tick, sc);
}
static void
e6060sw_lock(device_t dev)
{
struct e6060sw_softc *sc;
sc = device_get_softc(dev);
E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED);
E6060SW_LOCK(sc);
}
static void
e6060sw_unlock(device_t dev)
{
struct e6060sw_softc *sc;
sc = device_get_softc(dev);
E6060SW_LOCK_ASSERT(sc, MA_OWNED);
E6060SW_UNLOCK(sc);
}
static etherswitch_info_t *
e6060sw_getinfo(device_t dev)
{
struct e6060sw_softc *sc;
sc = device_get_softc(dev);
return (&sc->info);
}
static int
e6060sw_getport(device_t dev, etherswitch_port_t *p)
{
struct e6060sw_softc *sc;
struct mii_data *mii;
struct ifmediareq *ifmr;
int err, phy;
sc = device_get_softc(dev);
ifmr = &p->es_ifmr;
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
p->es_pvid = 0;
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
p->es_pvid = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + p->es_port,
PORT_DEFVLAN) & 0xfff;
}
phy = sc->portphy[p->es_port];
mii = e6060sw_miiforport(sc, p->es_port);
if (sc->cpuport != -1 && phy == sc->cpuport) {
/* fill in fixed values for CPU port */
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr->ifm_count = 0;
if (sc->media == 100)
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_100_TX | IFM_FDX;
else
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
} else if (mii != NULL) {
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
if (err)
return (err);
} else {
return (ENXIO);
}
return (0);
}
static int
e6060sw_setport(device_t dev, etherswitch_port_t *p)
{
struct e6060sw_softc *sc;
struct ifmedia *ifm;
struct mii_data *mii;
if_t ifp;
int err;
int data;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
data = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + p->es_port,
PORT_DEFVLAN);
data &= ~0xfff;
data |= p->es_pvid;
data |= 1 << 12;
MDIO_WRITEREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + p->es_port,
PORT_DEFVLAN, data);
}
if (sc->portphy[p->es_port] == sc->cpuport)
return(0);
mii = e6060sw_miiforport(sc, p->es_port);
if (mii == NULL)
return (ENXIO);
ifp = e6060sw_ifpforport(sc, p->es_port);
ifm = &mii->mii_media;
err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA);
return (err);
}
static int
e6060sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
struct e6060sw_softc *sc;
int data1, data2;
int vid;
int i, tag;
sc = device_get_softc(dev);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) {
vg->es_vid = ETHERSWITCH_VID_VALID;
vg->es_vid |= vg->es_vlangroup;
data1 = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + vg->es_vlangroup,
PORT_VLAN_MAP);
vg->es_member_ports = data1 & 0x3f;
vg->es_untagged_ports = vg->es_member_ports;
vg->es_fid = 0;
} else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
if (vg->es_vlangroup == 0)
return (0);
vid = e6060sw_read_vtu(dev, vg->es_vlangroup, &data1, &data2);
if (vid > 0) {
vg->es_vid = ETHERSWITCH_VID_VALID;
vg->es_vid |= vid;
vg->es_member_ports = 0;
vg->es_untagged_ports = 0;
for (i = 0; i < 4; ++i) {
tag = data1 >> (i * 4) & 3;
if (tag == 0 || tag == 1) {
vg->es_member_ports |= 1 << i;
vg->es_untagged_ports |= 1 << i;
} else if (tag == 2) {
vg->es_member_ports |= 1 << i;
}
}
for (i = 0; i < 2; ++i) {
tag = data2 >> (i * 4) & 3;
if (tag == 0 || tag == 1) {
vg->es_member_ports |= 1 << (i + 4);
vg->es_untagged_ports |= 1 << (i + 4);
} else if (tag == 2) {
vg->es_member_ports |= 1 << (i + 4);
}
}
}
} else {
vg->es_vid = 0;
}
return (0);
}
static int
e6060sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
struct e6060sw_softc *sc;
int data1, data2;
int i;
sc = device_get_softc(dev);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) {
data1 = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + vg->es_vlangroup,
PORT_VLAN_MAP);
data1 &= ~0x3f;
data1 |= vg->es_member_ports;
MDIO_WRITEREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + vg->es_vlangroup,
PORT_VLAN_MAP, data1);
} else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
if (vg->es_vlangroup == 0)
return (0);
data1 = 0;
data2 = 0;
for (i = 0; i < 6; ++i) {
if (vg->es_member_ports &
vg->es_untagged_ports & (1 << i)) {
if (i < 4) {
data1 |= (0xd << i * 4);
} else {
data2 |= (0xd << (i - 4) * 4);
}
} else if (vg->es_member_ports & (1 << i)) {
if (i < 4) {
data1 |= (0xe << i * 4);
} else {
data2 |= (0xe << (i - 4) * 4);
}
} else {
if (i < 4) {
data1 |= (0x3 << i * 4);
} else {
data2 |= (0x3 << (i - 4) * 4);
}
}
}
e6060sw_set_vtu(dev, vg->es_vlangroup, data1, data2);
}
return (0);
}
static void
e6060sw_reset_vlans(device_t dev)
{
struct e6060sw_softc *sc;
uint32_t ports;
int i;
int data;
sc = device_get_softc(dev);
for (i = 0; i <= sc->numports; i++) {
ports = (1 << (sc->numports + 1)) - 1;
ports &= ~(1 << i);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) {
data = i << 12;
} else if (sc->vlan_mode == 0) {
data = 1 << 8;
} else {
data = 0;
}
data |= ports;
MDIO_WRITEREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i, PORT_VLAN_MAP, data);
}
}
static void
e6060sw_setup(device_t dev)
{
struct e6060sw_softc *sc;
int i;
int data;
sc = device_get_softc(dev);
for (i = 0; i <= sc->numports; i++) {
if (sc->sw_model == E6063 || sc->sw_model == E6065) {
data = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i, PORT_VLAN_MAP);
data &= ~(FORCEMAPMASK << FORCEMAPSHIFT);
MDIO_WRITEREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i,
PORT_VLAN_MAP, data);
data = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i, PORT_CONTROL);
data |= 3 << ENGRESSFSHIFT;
MDIO_WRITEREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i,
PORT_CONTROL, data);
}
}
}
static void
e6060sw_dot1q_mode(device_t dev, int mode)
{
struct e6060sw_softc *sc;
int i;
int data;
sc = device_get_softc(dev);
for (i = 0; i <= sc->numports; i++) {
data = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i, PORT_CONTROL2);
data &= ~(DOT1QMODEMASK << DOT1QMODESHIFT);
data |= mode << DOT1QMODESHIFT;
MDIO_WRITEREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i, PORT_CONTROL2, data);
data = MDIO_READREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i,
PORT_DEFVLAN);
data &= ~0xfff;
data |= 1;
MDIO_WRITEREG(device_get_parent(dev),
CORE_REGISTER + sc->smi_offset + i,
PORT_DEFVLAN, data);
}
}
static int
e6060sw_getconf(device_t dev, etherswitch_conf_t *conf)
{
struct e6060sw_softc *sc;
sc = device_get_softc(dev);
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->vlan_mode;
return (0);
}
static void
e6060sw_init_vtu(device_t dev)
{
struct e6060sw_softc *sc;
int busy;
sc = device_get_softc(dev);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_OPERATION, VTU_BUSY | (VTU_FLASH << 12));
while (1) {
busy = MDIO_READREG(device_get_parent(dev),
GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION);
if ((busy & VTU_BUSY) == 0)
break;
}
/* initial member set at vlan 1*/
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_DATA1_REG, 0xcccc);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_DATA2_REG, 0x00cc);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_VID_REG, 0x1000 | 1);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_OPERATION, VTU_BUSY | (VTU_LOAD_PURGE << 12) | 1);
while (1) {
busy = MDIO_READREG(device_get_parent(dev),
GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION);
if ((busy & VTU_BUSY) == 0)
break;
}
}
static void
e6060sw_set_vtu(device_t dev, int num, int data1, int data2)
{
struct e6060sw_softc *sc;
int busy;
sc = device_get_softc(dev);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_DATA1_REG, data1);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_DATA2_REG, data2);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_VID_REG, 0x1000 | num);
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_OPERATION, VTU_BUSY | (VTU_LOAD_PURGE << 12) | num);
while (1) {
busy = MDIO_READREG(device_get_parent(dev),
GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION);
if ((busy & VTU_BUSY) == 0)
break;
}
}
static int
e6060sw_read_vtu(device_t dev, int num, int *data1, int *data2)
{
struct e6060sw_softc *sc;
int busy;
sc = device_get_softc(dev);
num = num - 1;
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_VID_REG, num & 0xfff);
/* Get Next */
MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset,
VTU_OPERATION, VTU_BUSY | (VTU_GET_NEXT << 12));
while (1) {
busy = MDIO_READREG(device_get_parent(dev),
GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION);
if ((busy & VTU_BUSY) == 0)
break;
}
int vid = MDIO_READREG(device_get_parent(dev),
GLOBAL_REGISTER + sc->smi_offset, VTU_VID_REG);
if (vid & 0x1000) {
*data1 = MDIO_READREG(device_get_parent(dev),
GLOBAL_REGISTER + sc->smi_offset, VTU_DATA1_REG);
*data2 = MDIO_READREG(device_get_parent(dev),
GLOBAL_REGISTER + sc->smi_offset, VTU_DATA2_REG);
return (vid & 0xfff);
}
return (-1);
}
static int
e6060sw_setconf(device_t dev, etherswitch_conf_t *conf)
{
struct e6060sw_softc *sc;
sc = device_get_softc(dev);
/* Set the VLAN mode. */
if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) {
if (conf->vlan_mode == ETHERSWITCH_VLAN_PORT) {
sc->vlan_mode = ETHERSWITCH_VLAN_PORT;
e6060sw_dot1q_mode(dev, DOT1QNONE);
e6060sw_reset_vlans(dev);
} else if ((sc->sw_model == E6063 || sc->sw_model == E6065) &&
conf->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
e6060sw_dot1q_mode(dev, DOT1QSECURE);
e6060sw_init_vtu(dev);
} else {
sc->vlan_mode = 0;
/* Reset VLANs. */
e6060sw_dot1q_mode(dev, DOT1QNONE);
e6060sw_reset_vlans(dev);
}
}
return (0);
}
static void
e6060sw_statchg(device_t dev)
{
DPRINTF(dev, "%s\n", __func__);
}
static int
e6060sw_ifmedia_upd(if_t ifp)
{
struct e6060sw_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = e6060sw_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
e6060sw_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct e6060sw_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = e6060sw_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
e6060sw_readphy(device_t dev, int phy, int reg)
{
struct e6060sw_softc *sc;
int data;
sc = device_get_softc(dev);
E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED);
if (phy < 0 || phy >= 32)
return (ENXIO);
if (reg < 0 || reg >= 32)
return (ENXIO);
E6060SW_LOCK(sc);
data = MDIO_READREG(device_get_parent(dev), phy, reg);
E6060SW_UNLOCK(sc);
return (data);
}
static int
e6060sw_writephy(device_t dev, int phy, int reg, int data)
{
struct e6060sw_softc *sc;
int err;
sc = device_get_softc(dev);
E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED);
if (phy < 0 || phy >= 32)
return (ENXIO);
if (reg < 0 || reg >= 32)
return (ENXIO);
E6060SW_LOCK(sc);
err = MDIO_WRITEREG(device_get_parent(dev), phy, reg, data);
E6060SW_UNLOCK(sc);
return (err);
}
/* addr is 5-8 bit is SMI Device Addres, 0-4 bit is SMI Register Address */
static int
e6060sw_readreg(device_t dev, int addr)
{
int devaddr, regaddr;
devaddr = (addr >> 5) & 0x1f;
regaddr = addr & 0x1f;
return MDIO_READREG(device_get_parent(dev), devaddr, regaddr);
}
/* addr is 5-8 bit is SMI Device Addres, 0-4 bit is SMI Register Address */
static int
e6060sw_writereg(device_t dev, int addr, int value)
{
int devaddr, regaddr;
devaddr = (addr >> 5) & 0x1f;
regaddr = addr & 0x1f;
return (MDIO_WRITEREG(device_get_parent(dev), devaddr, regaddr, value));
}
static device_method_t e6060sw_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, e6060sw_probe),
DEVMETHOD(device_attach, e6060sw_attach),
DEVMETHOD(device_detach, e6060sw_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, e6060sw_readphy),
DEVMETHOD(miibus_writereg, e6060sw_writephy),
DEVMETHOD(miibus_statchg, e6060sw_statchg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, e6060sw_readphy),
DEVMETHOD(mdio_writereg, e6060sw_writephy),
/* etherswitch interface */
DEVMETHOD(etherswitch_lock, e6060sw_lock),
DEVMETHOD(etherswitch_unlock, e6060sw_unlock),
DEVMETHOD(etherswitch_getinfo, e6060sw_getinfo),
DEVMETHOD(etherswitch_readreg, e6060sw_readreg),
DEVMETHOD(etherswitch_writereg, e6060sw_writereg),
DEVMETHOD(etherswitch_readphyreg, e6060sw_readphy),
DEVMETHOD(etherswitch_writephyreg, e6060sw_writephy),
DEVMETHOD(etherswitch_getport, e6060sw_getport),
DEVMETHOD(etherswitch_setport, e6060sw_setport),
DEVMETHOD(etherswitch_getvgroup, e6060sw_getvgroup),
DEVMETHOD(etherswitch_setvgroup, e6060sw_setvgroup),
DEVMETHOD(etherswitch_setconf, e6060sw_setconf),
DEVMETHOD(etherswitch_getconf, e6060sw_getconf),
DEVMETHOD_END
};
DEFINE_CLASS_0(e6060sw, e6060sw_driver, e6060sw_methods,
sizeof(struct e6060sw_softc));
DRIVER_MODULE(e6060sw, mdio, e6060sw_driver, 0, 0);
DRIVER_MODULE(miibus, e6060sw, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, e6060sw, mdio_driver, 0, 0);
DRIVER_MODULE(etherswitch, e6060sw, etherswitch_driver, 0, 0);
MODULE_VERSION(e6060sw, 1);
MODULE_DEPEND(e6060sw, miibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(e6060sw, etherswitch, 1, 1, 1); /* XXX which versions? */
diff --git a/sys/dev/etherswitch/felix/felix.c b/sys/dev/etherswitch/felix/felix.c
index 92d654961f50..6e9bc29e5757 100644
--- a/sys/dev/etherswitch/felix/felix.c
+++ b/sys/dev/etherswitch/felix/felix.c
@@ -1,1009 +1,1006 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Alstom Group.
* Copyright (c) 2021 Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/rman.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/enetc/enetc_mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/etherswitch/felix/felix_var.h>
#include <dev/etherswitch/felix/felix_reg.h>
#include "etherswitch_if.h"
#include "miibus_if.h"
MALLOC_DECLARE(M_FELIX);
MALLOC_DEFINE(M_FELIX, "felix", "felix switch");
static device_probe_t felix_probe;
static device_attach_t felix_attach;
static device_detach_t felix_detach;
static etherswitch_info_t* felix_getinfo(device_t);
static int felix_getconf(device_t, etherswitch_conf_t *);
static int felix_setconf(device_t, etherswitch_conf_t *);
static void felix_lock(device_t);
static void felix_unlock(device_t);
static int felix_getport(device_t, etherswitch_port_t *);
static int felix_setport(device_t, etherswitch_port_t *);
static int felix_readreg_wrapper(device_t, int);
static int felix_writereg_wrapper(device_t, int, int);
static int felix_readphy(device_t, int, int);
static int felix_writephy(device_t, int, int, int);
static int felix_setvgroup(device_t, etherswitch_vlangroup_t *);
static int felix_getvgroup(device_t, etherswitch_vlangroup_t *);
static int felix_parse_port_fdt(felix_softc_t, phandle_t, int *);
static int felix_setup(felix_softc_t);
static void felix_setup_port(felix_softc_t, int);
static void felix_tick(void *);
static int felix_ifmedia_upd(if_t);
static void felix_ifmedia_sts(if_t, struct ifmediareq *);
static void felix_get_port_cfg(felix_softc_t, etherswitch_port_t *);
static void felix_set_port_cfg(felix_softc_t, etherswitch_port_t *);
static bool felix_is_phyport(felix_softc_t, int);
static struct mii_data *felix_miiforport(felix_softc_t, unsigned int);
static struct felix_pci_id felix_pci_ids[] = {
{PCI_VENDOR_FREESCALE, FELIX_DEV_ID, FELIX_DEV_NAME},
{0, 0, NULL}
};
static device_method_t felix_methods[] = {
/* device interface */
DEVMETHOD(device_probe, felix_probe),
DEVMETHOD(device_attach, felix_attach),
DEVMETHOD(device_detach, felix_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_release_resource, bus_generic_release_resource),
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
/* etherswitch interface */
DEVMETHOD(etherswitch_getinfo, felix_getinfo),
DEVMETHOD(etherswitch_getconf, felix_getconf),
DEVMETHOD(etherswitch_setconf, felix_setconf),
DEVMETHOD(etherswitch_lock, felix_lock),
DEVMETHOD(etherswitch_unlock, felix_unlock),
DEVMETHOD(etherswitch_getport, felix_getport),
DEVMETHOD(etherswitch_setport, felix_setport),
DEVMETHOD(etherswitch_readreg, felix_readreg_wrapper),
DEVMETHOD(etherswitch_writereg, felix_writereg_wrapper),
DEVMETHOD(etherswitch_readphyreg, felix_readphy),
DEVMETHOD(etherswitch_writephyreg, felix_writephy),
DEVMETHOD(etherswitch_setvgroup, felix_setvgroup),
DEVMETHOD(etherswitch_getvgroup, felix_getvgroup),
/* miibus interface */
DEVMETHOD(miibus_readreg, felix_readphy),
DEVMETHOD(miibus_writereg, felix_writephy),
DEVMETHOD_END
};
DEFINE_CLASS_0(felix, felix_driver, felix_methods,
sizeof(struct felix_softc));
DRIVER_MODULE_ORDERED(felix, pci, felix_driver, NULL, NULL, SI_ORDER_ANY);
DRIVER_MODULE(miibus, felix, miibus_fdt_driver, NULL, NULL);
DRIVER_MODULE(etherswitch, felix, etherswitch_driver, NULL, NULL);
MODULE_VERSION(felix, 1);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, felix,
felix_pci_ids, nitems(felix_pci_ids) - 1);
static int
felix_probe(device_t dev)
{
struct felix_pci_id *id;
felix_softc_t sc;
sc = device_get_softc(dev);
sc->dev = dev;
for (id = felix_pci_ids; id->vendor != 0; ++id) {
if (pci_get_device(dev) != id->device ||
pci_get_vendor(dev) != id->vendor)
continue;
device_set_desc(dev, id->desc);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
felix_parse_port_fdt(felix_softc_t sc, phandle_t child, int *pport)
{
uint32_t port, status;
phandle_t node;
if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0) {
device_printf(sc->dev, "Port node doesn't have reg property\n");
return (ENXIO);
}
*pport = port;
node = OF_getproplen(child, "ethernet");
if (node <= 0)
sc->ports[port].cpu_port = false;
else
sc->ports[port].cpu_port = true;
node = ofw_bus_find_child(child, "fixed-link");
if (node <= 0) {
sc->ports[port].fixed_port = false;
return (0);
}
sc->ports[port].fixed_port = true;
if (OF_getencprop(node, "speed", &status, sizeof(status)) <= 0) {
device_printf(sc->dev,
"Port has fixed-link node without link speed specified\n");
return (ENXIO);
}
switch (status) {
case 2500:
status = IFM_2500_T;
break;
case 1000:
status = IFM_1000_T;
break;
case 100:
status = IFM_100_T;
break;
case 10:
status = IFM_10_T;
break;
default:
device_printf(sc->dev,
"Unsupported link speed value of %d\n",
status);
return (ENXIO);
}
if (OF_hasprop(node, "full-duplex"))
status |= IFM_FDX;
else
status |= IFM_HDX;
status |= IFM_ETHER;
sc->ports[port].fixed_link_status = status;
return (0);
}
static int
felix_init_interface(felix_softc_t sc, int port)
{
char name[IFNAMSIZ];
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev));
sc->ports[port].ifp = if_alloc(IFT_ETHER);
- if (sc->ports[port].ifp == NULL)
- return (ENOMEM);
-
if_setsoftc(sc->ports[port].ifp, sc);
if_setflags(sc->ports[port].ifp, IFF_UP | IFF_BROADCAST | IFF_MULTICAST |
IFF_DRV_RUNNING | IFF_SIMPLEX);
sc->ports[port].ifname = malloc(strlen(name) + 1, M_FELIX, M_NOWAIT);
if (sc->ports[port].ifname == NULL) {
if_free(sc->ports[port].ifp);
return (ENOMEM);
}
memcpy(sc->ports[port].ifname, name, strlen(name) + 1);
if_initname(sc->ports[port].ifp, sc->ports[port].ifname, port);
return (0);
}
static void
felix_setup_port(felix_softc_t sc, int port)
{
/* Link speed has to be always set to 1000 in the clock register. */
FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_CLK_CFG,
FELIX_DEVGMII_CLK_CFG_SPEED_1000);
FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_MAC_CFG,
FELIX_DEVGMII_MAC_CFG_TX_ENA | FELIX_DEVGMII_MAC_CFG_RX_ENA);
FELIX_WR4(sc, FELIX_QSYS_PORT_MODE(port),
FELIX_QSYS_PORT_MODE_PORT_ENA);
/*
* Enable "VLANMTU". Each port has a configurable MTU.
* Accept frames that are 8 and 4 bytes longer than it
* for double and single tagged frames respectively.
* Since etherswitch API doesn't provide an option to change
* MTU don't touch it for now.
*/
FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_VLAN_CFG,
FELIX_DEVGMII_VLAN_CFG_ENA |
FELIX_DEVGMII_VLAN_CFG_LEN_ENA |
FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA);
}
static int
felix_setup(felix_softc_t sc)
{
int timeout, i;
uint32_t reg;
/* Trigger soft reset, bit is self-clearing, with 5s timeout. */
FELIX_WR4(sc, FELIX_DEVCPU_GCB_RST, FELIX_DEVCPU_GCB_RST_EN);
timeout = FELIX_INIT_TIMEOUT;
do {
DELAY(1000);
reg = FELIX_RD4(sc, FELIX_DEVCPU_GCB_RST);
if ((reg & FELIX_DEVCPU_GCB_RST_EN) == 0)
break;
} while (timeout-- > 0);
if (timeout == 0) {
device_printf(sc->dev,
"Timeout while waiting for switch to reset\n");
return (ETIMEDOUT);
}
FELIX_WR4(sc, FELIX_SYS_RAM_CTRL, FELIX_SYS_RAM_CTRL_INIT);
timeout = FELIX_INIT_TIMEOUT;
do {
DELAY(1000);
reg = FELIX_RD4(sc, FELIX_SYS_RAM_CTRL);
if ((reg & FELIX_SYS_RAM_CTRL_INIT) == 0)
break;
} while (timeout-- > 0);
if (timeout == 0) {
device_printf(sc->dev,
"Timeout while waiting for switch RAM init.\n");
return (ETIMEDOUT);
}
FELIX_WR4(sc, FELIX_SYS_CFG, FELIX_SYS_CFG_CORE_EN);
for (i = 0; i < sc->info.es_nports; i++)
felix_setup_port(sc, i);
return (0);
}
static int
felix_timer_rate(SYSCTL_HANDLER_ARGS)
{
felix_softc_t sc;
int error, value, old;
sc = arg1;
old = value = sc->timer_ticks;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (value < 0)
return (EINVAL);
if (value == old)
return (0);
FELIX_LOCK(sc);
sc->timer_ticks = value;
callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc);
FELIX_UNLOCK(sc);
return (0);
}
static int
felix_attach(device_t dev)
{
phandle_t child, ports, node;
int error, port, rid;
felix_softc_t sc;
uint32_t phy_addr;
ssize_t size;
sc = device_get_softc(dev);
sc->info.es_nports = 0;
sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q;
strlcpy(sc->info.es_name, "Felix TSN Switch", sizeof(sc->info.es_name));
rid = PCIR_BAR(FELIX_BAR_MDIO);
sc->mdio = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mdio == NULL) {
device_printf(dev, "Failed to allocate MDIO registers.\n");
return (ENXIO);
}
rid = PCIR_BAR(FELIX_BAR_REGS);
sc->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->regs == NULL) {
device_printf(dev, "Failed to allocate registers BAR.\n");
error = ENXIO;
goto out_fail;
}
mtx_init(&sc->mtx, "felix lock", NULL, MTX_DEF);
callout_init_mtx(&sc->tick_callout, &sc->mtx, 0);
node = ofw_bus_get_node(dev);
if (node <= 0) {
error = ENXIO;
goto out_fail;
}
ports = ofw_bus_find_child(node, "ports");
if (ports == 0) {
device_printf(dev,
"Failed to find \"ports\" property in DTS.\n");
error = ENXIO;
goto out_fail;
}
for (child = OF_child(ports); child != 0; child = OF_peer(child)) {
/* Do not parse disabled ports. */
if (ofw_bus_node_status_okay(child) == 0)
continue;
error = felix_parse_port_fdt(sc, child, &port);
if (error != 0)
goto out_fail;
error = felix_init_interface(sc, port);
if (error != 0) {
device_printf(sc->dev,
"Failed to initialize interface.\n");
goto out_fail;
}
if (sc->ports[port].fixed_port) {
sc->info.es_nports++;
continue;
}
size = OF_getencprop(child, "phy-handle", &node, sizeof(node));
if (size <= 0) {
device_printf(sc->dev,
"Failed to acquire PHY handle from FDT.\n");
error = ENXIO;
goto out_fail;
}
node = OF_node_from_xref(node);
size = OF_getencprop(node, "reg", &phy_addr, sizeof(phy_addr));
if (size <= 0) {
device_printf(sc->dev,
"Failed to obtain PHY address.\n");
error = ENXIO;
goto out_fail;
}
sc->ports[port].phyaddr = phy_addr;
sc->ports[port].miibus = NULL;
error = mii_attach(dev, &sc->ports[port].miibus, sc->ports[port].ifp,
felix_ifmedia_upd, felix_ifmedia_sts, BMSR_DEFCAPMASK,
phy_addr, MII_OFFSET_ANY, 0);
if (error != 0)
goto out_fail;
sc->info.es_nports++;
}
error = felix_setup(sc);
if (error != 0)
goto out_fail;
sc->timer_ticks = hz; /* Default to 1s. */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "timer_ticks", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, felix_timer_rate, "I",
"Number of ticks between timer invocations");
/* The tick routine has to be called with the lock held. */
FELIX_LOCK(sc);
felix_tick(sc);
FELIX_UNLOCK(sc);
/* Allow etherswitch to attach as our child. */
bus_generic_probe(dev);
bus_generic_attach(dev);
return (0);
out_fail:
felix_detach(dev);
return (error);
}
static int
felix_detach(device_t dev)
{
felix_softc_t sc;
int error;
int i;
error = 0;
sc = device_get_softc(dev);
bus_generic_detach(dev);
mtx_lock(&sc->mtx);
callout_stop(&sc->tick_callout);
mtx_unlock(&sc->mtx);
mtx_destroy(&sc->mtx);
/*
* If we have been fully attached do a soft reset.
* This way after when driver is unloaded switch is left in unmanaged mode.
*/
if (device_is_attached(dev))
felix_setup(sc);
for (i = 0; i < sc->info.es_nports; i++) {
if (sc->ports[i].miibus != NULL)
device_delete_child(dev, sc->ports[i].miibus);
if (sc->ports[i].ifp != NULL)
if_free(sc->ports[i].ifp);
if (sc->ports[i].ifname != NULL)
free(sc->ports[i].ifname, M_FELIX);
}
if (sc->regs != NULL)
error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
rman_get_rid(sc->regs), sc->regs);
if (sc->mdio != NULL)
error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
rman_get_rid(sc->mdio), sc->mdio);
return (error);
}
static etherswitch_info_t*
felix_getinfo(device_t dev)
{
felix_softc_t sc;
sc = device_get_softc(dev);
return (&sc->info);
}
static int
felix_getconf(device_t dev, etherswitch_conf_t *conf)
{
felix_softc_t sc;
sc = device_get_softc(dev);
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->vlan_mode;
return (0);
}
static int
felix_init_vlan(felix_softc_t sc)
{
int timeout = FELIX_INIT_TIMEOUT;
uint32_t reg;
int i;
/* Flush VLAN table in hardware. */
FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_RESET);
do {
DELAY(1000);
reg = FELIX_RD4(sc, FELIX_ANA_VT);
if ((reg & FELIX_ANA_VT_STS) == FELIX_ANA_VT_IDLE)
break;
} while (timeout-- > 0);
if (timeout == 0) {
device_printf(sc->dev,
"Timeout during VLAN table reset.\n");
return (ETIMEDOUT);
}
/* Flush VLAN table in sc. */
for (i = 0; i < sc->info.es_nvlangroups; i++)
sc->vlans[i] = 0;
/*
* Make all ports VLAN aware.
* Read VID from incoming frames and use it for port grouping
* purposes.
* Don't set this if pvid is set.
*/
for (i = 0; i < sc->info.es_nports; i++) {
reg = FELIX_ANA_PORT_RD4(sc, i, FELIX_ANA_PORT_VLAN_CFG);
if ((reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK) != 0)
continue;
reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE;
FELIX_ANA_PORT_WR4(sc, i, FELIX_ANA_PORT_VLAN_CFG, reg);
}
return (0);
}
static int
felix_setconf(device_t dev, etherswitch_conf_t *conf)
{
felix_softc_t sc;
int error;
error = 0;
/* Set the VLAN mode. */
sc = device_get_softc(dev);
FELIX_LOCK(sc);
if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) {
switch (conf->vlan_mode) {
case ETHERSWITCH_VLAN_DOT1Q:
sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
sc->info.es_nvlangroups = FELIX_NUM_VLANS;
error = felix_init_vlan(sc);
break;
default:
error = EINVAL;
}
}
FELIX_UNLOCK(sc);
return (error);
}
static void
felix_lock(device_t dev)
{
felix_softc_t sc;
sc = device_get_softc(dev);
FELIX_LOCK_ASSERT(sc, MA_NOTOWNED);
FELIX_LOCK(sc);
}
static void
felix_unlock(device_t dev)
{
felix_softc_t sc;
sc = device_get_softc(dev);
FELIX_LOCK_ASSERT(sc, MA_OWNED);
FELIX_UNLOCK(sc);
}
static void
felix_get_port_cfg(felix_softc_t sc, etherswitch_port_t *p)
{
uint32_t reg;
p->es_flags = 0;
reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG);
if (reg & FELIX_ANA_PORT_DROP_CFG_TAGGED)
p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED;
if (reg & FELIX_ANA_PORT_DROP_CFG_UNTAGGED)
p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
reg = FELIX_DEVGMII_PORT_RD4(sc, p->es_port, FELIX_DEVGMII_VLAN_CFG);
if (reg & FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA)
p->es_flags |= ETHERSWITCH_PORT_DOUBLE_TAG;
reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG);
if (reg & FELIX_REW_PORT_TAG_CFG_ALL)
p->es_flags |= ETHERSWITCH_PORT_ADDTAG;
reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG);
if (reg & FELIX_ANA_PORT_VLAN_CFG_POP)
p->es_flags |= ETHERSWITCH_PORT_STRIPTAGINGRESS;
p->es_pvid = reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK;
}
static int
felix_getport(device_t dev, etherswitch_port_t *p)
{
struct ifmediareq *ifmr;
struct mii_data *mii;
felix_softc_t sc;
int error;
error = 0;
sc = device_get_softc(dev);
FELIX_LOCK_ASSERT(sc, MA_NOTOWNED);
if (p->es_port >= sc->info.es_nports || p->es_port < 0)
return (EINVAL);
FELIX_LOCK(sc);
felix_get_port_cfg(sc, p);
if (sc->ports[p->es_port].fixed_port) {
ifmr = &p->es_ifmr;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
ifmr->ifm_count = 0;
ifmr->ifm_active = sc->ports[p->es_port].fixed_link_status;
ifmr->ifm_current = ifmr->ifm_active;
ifmr->ifm_mask = 0;
} else {
mii = felix_miiforport(sc, p->es_port);
error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
}
FELIX_UNLOCK(sc);
return (error);
}
static void
felix_set_port_cfg(felix_softc_t sc, etherswitch_port_t *p)
{
uint32_t reg;
reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG);
if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED)
reg |= FELIX_ANA_PORT_DROP_CFG_TAGGED;
else
reg &= ~FELIX_ANA_PORT_DROP_CFG_TAGGED;
if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
reg |= FELIX_ANA_PORT_DROP_CFG_UNTAGGED;
else
reg &= ~FELIX_ANA_PORT_DROP_CFG_UNTAGGED;
FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG, reg);
reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG);
if (p->es_flags & ETHERSWITCH_PORT_ADDTAG)
reg |= FELIX_REW_PORT_TAG_CFG_ALL;
else
reg &= ~FELIX_REW_PORT_TAG_CFG_ALL;
FELIX_REW_PORT_WR4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG, reg);
reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG);
if (p->es_flags & ETHERSWITCH_PORT_STRIPTAGINGRESS)
reg |= FELIX_ANA_PORT_VLAN_CFG_POP;
else
reg &= ~FELIX_ANA_PORT_VLAN_CFG_POP;
reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_MASK;
reg |= p->es_pvid & FELIX_ANA_PORT_VLAN_CFG_VID_MASK;
/*
* If port VID is set use it for VLAN classification,
* instead of frame VID.
* By default the frame tag takes precedence.
* Force the switch to ignore it.
*/
if (p->es_pvid != 0)
reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_AWARE;
else
reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE;
FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG, reg);
}
static int
felix_setport(device_t dev, etherswitch_port_t *p)
{
felix_softc_t sc;
struct mii_data *mii;
int error;
error = 0;
sc = device_get_softc(dev);
FELIX_LOCK_ASSERT(sc, MA_NOTOWNED);
if (p->es_port >= sc->info.es_nports || p->es_port < 0)
return (EINVAL);
FELIX_LOCK(sc);
felix_set_port_cfg(sc, p);
if (felix_is_phyport(sc, p->es_port)) {
mii = felix_miiforport(sc, p->es_port);
error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media,
SIOCSIFMEDIA);
}
FELIX_UNLOCK(sc);
return (error);
}
static int
felix_readreg_wrapper(device_t dev, int addr_reg)
{
felix_softc_t sc;
sc = device_get_softc(dev);
if (addr_reg > rman_get_size(sc->regs))
return (UINT32_MAX); /* Can't return errors here. */
return (FELIX_RD4(sc, addr_reg));
}
static int
felix_writereg_wrapper(device_t dev, int addr_reg, int val)
{
felix_softc_t sc;
sc = device_get_softc(dev);
if (addr_reg > rman_get_size(sc->regs))
return (EINVAL);
FELIX_WR4(sc, addr_reg, val);
return (0);
}
static int
felix_readphy(device_t dev, int phy, int reg)
{
felix_softc_t sc;
sc = device_get_softc(dev);
return (enetc_mdio_read(sc->mdio, FELIX_MDIO_BASE, phy, reg));
}
static int
felix_writephy(device_t dev, int phy, int reg, int data)
{
felix_softc_t sc;
sc = device_get_softc(dev);
return (enetc_mdio_write(sc->mdio, FELIX_MDIO_BASE, phy, reg, data));
}
static int
felix_set_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg)
{
uint32_t reg;
int i, vid;
vid = vg->es_vid & ETHERSWITCH_VID_MASK;
/* Tagged mode is not supported. */
if (vg->es_member_ports != vg->es_untagged_ports)
return (EINVAL);
/*
* Hardware support 4096 groups, but we can't do group_id == vid.
* Note that hw_group_id == vid.
*/
if (vid == 0) {
/* Clear VLAN table entry using old VID. */
FELIX_WR4(sc, FELIX_ANA_VTIDX, sc->vlans[vg->es_vlangroup]);
FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_WRITE);
sc->vlans[vg->es_vlangroup] = 0;
return (0);
}
/* The VID is already used in a different group. */
for (i = 0; i < sc->info.es_nvlangroups; i++)
if (i != vg->es_vlangroup && vid == sc->vlans[i])
return (EINVAL);
/* This group already uses a different VID. */
if (sc->vlans[vg->es_vlangroup] != 0 &&
sc->vlans[vg->es_vlangroup] != vid)
return (EINVAL);
sc->vlans[vg->es_vlangroup] = vid;
/* Assign members to the given group. */
reg = vg->es_member_ports & FELIX_ANA_VT_PORTMASK_MASK;
reg <<= FELIX_ANA_VT_PORTMASK_SHIFT;
reg |= FELIX_ANA_VT_WRITE;
FELIX_WR4(sc, FELIX_ANA_VTIDX, vid);
FELIX_WR4(sc, FELIX_ANA_VT, reg);
/*
* According to documentation read and write commands
* are instant.
* Add a small delay just to be safe.
*/
mb();
DELAY(100);
reg = FELIX_RD4(sc, FELIX_ANA_VT);
if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE)
return (ENXIO);
return (0);
}
static int
felix_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
felix_softc_t sc;
int error;
sc = device_get_softc(dev);
FELIX_LOCK(sc);
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
error = felix_set_dot1q_vlan(sc, vg);
else
error = EINVAL;
FELIX_UNLOCK(sc);
return (error);
}
static int
felix_get_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg)
{
uint32_t reg;
int vid;
vid = sc->vlans[vg->es_vlangroup];
if (vid == 0)
return (0);
FELIX_WR4(sc, FELIX_ANA_VTIDX, vid);
FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_READ);
/*
* According to documentation read and write commands
* are instant.
* Add a small delay just to be safe.
*/
mb();
DELAY(100);
reg = FELIX_RD4(sc, FELIX_ANA_VT);
if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE)
return (ENXIO);
reg >>= FELIX_ANA_VT_PORTMASK_SHIFT;
reg &= FELIX_ANA_VT_PORTMASK_MASK;
vg->es_untagged_ports = vg->es_member_ports = reg;
vg->es_fid = 0;
vg->es_vid = vid | ETHERSWITCH_VID_VALID;
return (0);
}
static int
felix_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
felix_softc_t sc;
int error;
sc = device_get_softc(dev);
FELIX_LOCK(sc);
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
error = felix_get_dot1q_vlan(sc, vg);
else
error = EINVAL;
FELIX_UNLOCK(sc);
return (error);
}
static void
felix_tick(void *arg)
{
struct mii_data *mii;
felix_softc_t sc;
int port;
sc = arg;
FELIX_LOCK_ASSERT(sc, MA_OWNED);
for (port = 0; port < sc->info.es_nports; port++) {
if (!felix_is_phyport(sc, port))
continue;
mii = felix_miiforport(sc, port);
MPASS(mii != NULL);
mii_tick(mii);
}
if (sc->timer_ticks != 0)
callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc);
}
static int
felix_ifmedia_upd(if_t ifp)
{
struct mii_data *mii;
felix_softc_t sc;
sc = if_getsoftc(ifp);
mii = felix_miiforport(sc, if_getdunit(ifp));
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
felix_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
felix_softc_t sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = felix_miiforport(sc, if_getdunit(ifp));
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static bool
felix_is_phyport(felix_softc_t sc, int port)
{
return (!sc->ports[port].fixed_port);
}
static struct mii_data*
felix_miiforport(felix_softc_t sc, unsigned int port)
{
if (!felix_is_phyport(sc, port))
return (NULL);
return (device_get_softc(sc->ports[port].miibus));
}
diff --git a/sys/dev/etherswitch/infineon/adm6996fc.c b/sys/dev/etherswitch/infineon/adm6996fc.c
index 95a24a2fb37a..2c6c83a4388d 100644
--- a/sys/dev/etherswitch/infineon/adm6996fc.c
+++ b/sys/dev/etherswitch/infineon/adm6996fc.c
@@ -1,860 +1,854 @@
/*-
* Copyright (c) 2016 Hiroki Mori
* Copyright (c) 2013 Luiz Otavio O Souza.
* Copyright (c) 2011-2012 Stefan Bethke.
* Copyright (c) 2012 Adrian Chadd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This is Infineon ADM6996FC/M/MX driver code on etherswitch framework.
* Support PORT and DOT1Q VLAN.
* This code suppose ADM6996FC SDC/SDIO connect to SOC network interface
* MDC/MDIO.
* This code development on Netgear WGR614Cv7.
* etherswitchcfg command port option support addtag.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include "mdio_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
#define ADM6996FC_PRODUCT_CODE 0x7102
#define ADM6996FC_SC3 0x11
#define ADM6996FC_VF0L 0x40
#define ADM6996FC_VF0H 0x41
#define ADM6996FC_CI0 0xa0
#define ADM6996FC_CI1 0xa1
#define ADM6996FC_PHY_C0 0x200
#define ADM6996FC_PC_SHIFT 4
#define ADM6996FC_TBV_SHIFT 5
#define ADM6996FC_PVID_SHIFT 10
#define ADM6996FC_OPTE_SHIFT 4
#define ADM6996FC_VV_SHIFT 15
#define ADM6996FC_PHY_SIZE 0x20
MALLOC_DECLARE(M_ADM6996FC);
MALLOC_DEFINE(M_ADM6996FC, "adm6996fc", "adm6996fc data structures");
struct adm6996fc_softc {
struct mtx sc_mtx; /* serialize access to softc */
device_t sc_dev;
int vlan_mode;
int media; /* cpu port media */
int cpuport; /* which PHY is connected to the CPU */
int phymask; /* PHYs we manage */
int numports; /* number of ports */
int ifpport[MII_NPHY];
int *portphy;
char **ifname;
device_t **miibus;
if_t *ifp;
struct callout callout_tick;
etherswitch_info_t info;
};
#define ADM6996FC_LOCK(_sc) \
mtx_lock(&(_sc)->sc_mtx)
#define ADM6996FC_UNLOCK(_sc) \
mtx_unlock(&(_sc)->sc_mtx)
#define ADM6996FC_LOCK_ASSERT(_sc, _what) \
mtx_assert(&(_sc)->sc_mtx, (_what))
#define ADM6996FC_TRYLOCK(_sc) \
mtx_trylock(&(_sc)->sc_mtx)
#if defined(DEBUG)
#define DPRINTF(dev, args...) device_printf(dev, args)
#else
#define DPRINTF(dev, args...)
#endif
static inline int adm6996fc_portforphy(struct adm6996fc_softc *, int);
static void adm6996fc_tick(void *);
static int adm6996fc_ifmedia_upd(if_t);
static void adm6996fc_ifmedia_sts(if_t, struct ifmediareq *);
#define ADM6996FC_READREG(dev, x) \
MDIO_READREG(dev, ((x) >> 5), ((x) & 0x1f));
#define ADM6996FC_WRITEREG(dev, x, v) \
MDIO_WRITEREG(dev, ((x) >> 5), ((x) & 0x1f), v);
#define ADM6996FC_PVIDBYDATA(data1, data2) \
((((data1) >> ADM6996FC_PVID_SHIFT) & 0x0f) | ((data2) << 4))
static int
adm6996fc_probe(device_t dev)
{
int data1, data2;
int pc;
struct adm6996fc_softc *sc;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
data1 = ADM6996FC_READREG(device_get_parent(dev), ADM6996FC_CI0);
data2 = ADM6996FC_READREG(device_get_parent(dev), ADM6996FC_CI1);
pc = ((data2 << 16) | data1) >> ADM6996FC_PC_SHIFT;
if (bootverbose)
device_printf(dev,"Chip Identifier Register %x %x\n", data1,
data2);
/* check Product Code */
if (pc != ADM6996FC_PRODUCT_CODE) {
return (ENXIO);
}
device_set_desc(dev, "Infineon ADM6996FC/M/MX MDIO switch driver");
return (BUS_PROBE_DEFAULT);
}
static int
adm6996fc_attach_phys(struct adm6996fc_softc *sc)
{
int phy, port, err;
char name[IFNAMSIZ];
port = 0;
err = 0;
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < sc->numports; phy++) {
if (((1 << phy) & sc->phymask) == 0)
continue;
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
sc->ifp[port]->if_softc = sc;
sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX;
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_ADM6996FC,
M_WAITOK | M_ZERO);
if (sc->miibus[port] == NULL) {
err = ENOMEM;
goto failed;
}
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
adm6996fc_ifmedia_upd, adm6996fc_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
sc->ifp[port]->if_xname);
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
goto failed;
}
++port;
}
sc->info.es_nports = port;
if (sc->cpuport != -1) {
/* assume cpuport is last one */
sc->ifpport[sc->cpuport] = port;
sc->portphy[port] = sc->cpuport;
++sc->info.es_nports;
}
return (0);
failed:
for (phy = 0; phy < sc->numports; phy++) {
if (((1 << phy) & sc->phymask) == 0)
continue;
port = adm6996fc_portforphy(sc, phy);
if (sc->miibus[port] != NULL)
device_delete_child(sc->sc_dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
if (sc->ifname[port] != NULL)
free(sc->ifname[port], M_ADM6996FC);
if (sc->miibus[port] != NULL)
free(sc->miibus[port], M_ADM6996FC);
}
return (err);
}
static int
adm6996fc_attach(device_t dev)
{
struct adm6996fc_softc *sc;
int err;
err = 0;
sc = device_get_softc(dev);
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "adm6996fc", NULL, MTX_DEF);
strlcpy(sc->info.es_name, device_get_desc(dev),
sizeof(sc->info.es_name));
/* ADM6996FC Defaults */
sc->numports = 6;
sc->phymask = 0x1f;
sc->cpuport = 5;
sc->media = 100;
sc->info.es_nvlangroups = 16;
sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q;
sc->ifp = malloc(sizeof(if_t) * sc->numports, M_ADM6996FC,
M_WAITOK | M_ZERO);
sc->ifname = malloc(sizeof(char *) * sc->numports, M_ADM6996FC,
M_WAITOK | M_ZERO);
sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_ADM6996FC,
M_WAITOK | M_ZERO);
sc->portphy = malloc(sizeof(int) * sc->numports, M_ADM6996FC,
M_WAITOK | M_ZERO);
if (sc->ifp == NULL || sc->ifname == NULL || sc->miibus == NULL ||
sc->portphy == NULL) {
err = ENOMEM;
goto failed;
}
/*
* Attach the PHYs and complete the bus enumeration.
*/
err = adm6996fc_attach_phys(sc);
if (err != 0)
goto failed;
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
if (err != 0)
goto failed;
callout_init(&sc->callout_tick, 0);
adm6996fc_tick(sc);
return (0);
failed:
if (sc->portphy != NULL)
free(sc->portphy, M_ADM6996FC);
if (sc->miibus != NULL)
free(sc->miibus, M_ADM6996FC);
if (sc->ifname != NULL)
free(sc->ifname, M_ADM6996FC);
if (sc->ifp != NULL)
free(sc->ifp, M_ADM6996FC);
return (err);
}
static int
adm6996fc_detach(device_t dev)
{
struct adm6996fc_softc *sc;
int i, port;
sc = device_get_softc(dev);
callout_drain(&sc->callout_tick);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = adm6996fc_portforphy(sc, i);
if (sc->miibus[port] != NULL)
device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_ADM6996FC);
free(sc->miibus[port], M_ADM6996FC);
}
free(sc->portphy, M_ADM6996FC);
free(sc->miibus, M_ADM6996FC);
free(sc->ifname, M_ADM6996FC);
free(sc->ifp, M_ADM6996FC);
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
}
/*
* Convert PHY number to port number.
*/
static inline int
adm6996fc_portforphy(struct adm6996fc_softc *sc, int phy)
{
return (sc->ifpport[phy]);
}
static inline struct mii_data *
adm6996fc_miiforport(struct adm6996fc_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
if (port == sc->cpuport)
return (NULL);
return (device_get_softc(*sc->miibus[port]));
}
static inline if_t
adm6996fc_ifpforport(struct adm6996fc_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
return (sc->ifp[port]);
}
/*
* Poll the status for all PHYs.
*/
static void
adm6996fc_miipollstat(struct adm6996fc_softc *sc)
{
int i, port;
struct mii_data *mii;
struct mii_softc *miisc;
ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = adm6996fc_portforphy(sc, i);
if ((*sc->miibus[port]) == NULL)
continue;
mii = device_get_softc(*sc->miibus[port]);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
ukphy_status(miisc);
mii_phy_update(miisc, MII_POLLSTAT);
}
}
}
static void
adm6996fc_tick(void *arg)
{
struct adm6996fc_softc *sc;
sc = arg;
adm6996fc_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, adm6996fc_tick, sc);
}
static void
adm6996fc_lock(device_t dev)
{
struct adm6996fc_softc *sc;
sc = device_get_softc(dev);
ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED);
ADM6996FC_LOCK(sc);
}
static void
adm6996fc_unlock(device_t dev)
{
struct adm6996fc_softc *sc;
sc = device_get_softc(dev);
ADM6996FC_LOCK_ASSERT(sc, MA_OWNED);
ADM6996FC_UNLOCK(sc);
}
static etherswitch_info_t *
adm6996fc_getinfo(device_t dev)
{
struct adm6996fc_softc *sc;
sc = device_get_softc(dev);
return (&sc->info);
}
static int
adm6996fc_getport(device_t dev, etherswitch_port_t *p)
{
struct adm6996fc_softc *sc;
struct mii_data *mii;
struct ifmediareq *ifmr;
device_t parent;
int err, phy;
int data1, data2;
int bcaddr[6] = {0x01, 0x03, 0x05, 0x07, 0x08, 0x09};
int vidaddr[6] = {0x28, 0x29, 0x2a, 0x2b, 0x2b, 0x2c};
sc = device_get_softc(dev);
ifmr = &p->es_ifmr;
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
parent = device_get_parent(dev);
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
data1 = ADM6996FC_READREG(parent, bcaddr[p->es_port]);
data2 = ADM6996FC_READREG(parent, vidaddr[p->es_port]);
/* only port 4 is hi bit */
if (p->es_port == 4)
data2 = (data2 >> 8) & 0xff;
else
data2 = data2 & 0xff;
p->es_pvid = ADM6996FC_PVIDBYDATA(data1, data2);
if (((data1 >> ADM6996FC_OPTE_SHIFT) & 0x01) == 1)
p->es_flags |= ETHERSWITCH_PORT_ADDTAG;
} else {
p->es_pvid = 0;
}
phy = sc->portphy[p->es_port];
mii = adm6996fc_miiforport(sc, p->es_port);
if (sc->cpuport != -1 && phy == sc->cpuport) {
/* fill in fixed values for CPU port */
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr->ifm_count = 0;
if (sc->media == 100)
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_100_TX | IFM_FDX;
else
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
} else if (mii != NULL) {
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
if (err)
return (err);
} else {
return (ENXIO);
}
return (0);
}
static int
adm6996fc_setport(device_t dev, etherswitch_port_t *p)
{
struct adm6996fc_softc *sc;
struct ifmedia *ifm;
struct mii_data *mii;
if_t ifp;
device_t parent;
int err;
int data;
int bcaddr[6] = {0x01, 0x03, 0x05, 0x07, 0x08, 0x09};
int vidaddr[6] = {0x28, 0x29, 0x2a, 0x2b, 0x2b, 0x2c};
sc = device_get_softc(dev);
parent = device_get_parent(dev);
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
data = ADM6996FC_READREG(parent, bcaddr[p->es_port]);
data &= ~(0xf << 10);
data |= (p->es_pvid & 0xf) << ADM6996FC_PVID_SHIFT;
if (p->es_flags & ETHERSWITCH_PORT_ADDTAG)
data |= 1 << ADM6996FC_OPTE_SHIFT;
else
data &= ~(1 << ADM6996FC_OPTE_SHIFT);
ADM6996FC_WRITEREG(parent, bcaddr[p->es_port], data);
data = ADM6996FC_READREG(parent, vidaddr[p->es_port]);
/* only port 4 is hi bit */
if (p->es_port == 4) {
data &= ~(0xff << 8);
data = data | (((p->es_pvid >> 4) & 0xff) << 8);
} else {
data &= ~0xff;
data = data | ((p->es_pvid >> 4) & 0xff);
}
ADM6996FC_WRITEREG(parent, vidaddr[p->es_port], data);
err = 0;
} else {
if (sc->portphy[p->es_port] == sc->cpuport)
return (ENXIO);
}
if (sc->portphy[p->es_port] != sc->cpuport) {
mii = adm6996fc_miiforport(sc, p->es_port);
if (mii == NULL)
return (ENXIO);
ifp = adm6996fc_ifpforport(sc, p->es_port);
ifm = &mii->mii_media;
err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA);
}
return (err);
}
static int
adm6996fc_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
struct adm6996fc_softc *sc;
device_t parent;
int datahi, datalo;
sc = device_get_softc(dev);
parent = device_get_parent(dev);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) {
if (vg->es_vlangroup <= 5) {
vg->es_vid = ETHERSWITCH_VID_VALID;
vg->es_vid |= vg->es_vlangroup;
datalo = ADM6996FC_READREG(parent,
ADM6996FC_VF0L + 2 * vg->es_vlangroup);
datahi = ADM6996FC_READREG(parent,
ADM6996FC_VF0H + 2 * vg->es_vlangroup);
vg->es_member_ports = datalo & 0x3f;
vg->es_untagged_ports = vg->es_member_ports;
vg->es_fid = 0;
} else {
vg->es_vid = 0;
}
} else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
datalo = ADM6996FC_READREG(parent,
ADM6996FC_VF0L + 2 * vg->es_vlangroup);
datahi = ADM6996FC_READREG(parent,
ADM6996FC_VF0H + 2 * vg->es_vlangroup);
if (datahi & (1 << ADM6996FC_VV_SHIFT)) {
vg->es_vid = ETHERSWITCH_VID_VALID;
vg->es_vid |= datahi & 0xfff;
vg->es_member_ports = datalo & 0x3f;
vg->es_untagged_ports = (~datalo >> 6) & 0x3f;
vg->es_fid = 0;
} else {
vg->es_fid = 0;
}
} else {
vg->es_fid = 0;
}
return (0);
}
static int
adm6996fc_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
struct adm6996fc_softc *sc;
device_t parent;
sc = device_get_softc(dev);
parent = device_get_parent(dev);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) {
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2 * vg->es_vlangroup,
vg->es_member_ports);
} else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2 * vg->es_vlangroup,
vg->es_member_ports | ((~vg->es_untagged_ports & 0x3f)<< 6));
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2 * vg->es_vlangroup,
(1 << ADM6996FC_VV_SHIFT) | vg->es_vid);
}
return (0);
}
static int
adm6996fc_getconf(device_t dev, etherswitch_conf_t *conf)
{
struct adm6996fc_softc *sc;
sc = device_get_softc(dev);
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->vlan_mode;
return (0);
}
static int
adm6996fc_setconf(device_t dev, etherswitch_conf_t *conf)
{
struct adm6996fc_softc *sc;
device_t parent;
int i;
int data;
int bcaddr[6] = {0x01, 0x03, 0x05, 0x07, 0x08, 0x09};
sc = device_get_softc(dev);
parent = device_get_parent(dev);
if ((conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) == 0)
return (0);
if (conf->vlan_mode == ETHERSWITCH_VLAN_PORT) {
sc->vlan_mode = ETHERSWITCH_VLAN_PORT;
data = ADM6996FC_READREG(parent, ADM6996FC_SC3);
data &= ~(1 << ADM6996FC_TBV_SHIFT);
ADM6996FC_WRITEREG(parent, ADM6996FC_SC3, data);
for (i = 0;i <= 5; ++i) {
data = ADM6996FC_READREG(parent, bcaddr[i]);
data &= ~(0xf << 10);
data |= (i << 10);
ADM6996FC_WRITEREG(parent, bcaddr[i], data);
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2 * i,
0x003f);
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2 * i,
(1 << ADM6996FC_VV_SHIFT) | 1);
}
} else if (conf->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
data = ADM6996FC_READREG(parent, ADM6996FC_SC3);
data |= (1 << ADM6996FC_TBV_SHIFT);
ADM6996FC_WRITEREG(parent, ADM6996FC_SC3, data);
for (i = 0;i <= 5; ++i) {
data = ADM6996FC_READREG(parent, bcaddr[i]);
/* Private VID set 1 */
data &= ~(0xf << 10);
data |= (1 << 10);
ADM6996FC_WRITEREG(parent, bcaddr[i], data);
}
for (i = 2;i <= 15; ++i) {
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2 * i,
0x0000);
}
} else {
/*
ADM6996FC have no VLAN off. Then set Port base and
add all port to member. Use VLAN Filter 1 is reset
default.
*/
sc->vlan_mode = 0;
data = ADM6996FC_READREG(parent, ADM6996FC_SC3);
data &= ~(1 << ADM6996FC_TBV_SHIFT);
ADM6996FC_WRITEREG(parent, ADM6996FC_SC3, data);
for (i = 0;i <= 5; ++i) {
data = ADM6996FC_READREG(parent, bcaddr[i]);
data &= ~(0xf << 10);
data |= (1 << 10);
if (i == 5)
data &= ~(1 << 4);
ADM6996FC_WRITEREG(parent, bcaddr[i], data);
}
/* default setting */
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2, 0x003f);
ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2,
(1 << ADM6996FC_VV_SHIFT) | 1);
}
return (0);
}
static void
adm6996fc_statchg(device_t dev)
{
DPRINTF(dev, "%s\n", __func__);
}
static int
adm6996fc_ifmedia_upd(if_t ifp)
{
struct adm6996fc_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = adm6996fc_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
adm6996fc_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct adm6996fc_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = adm6996fc_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
adm6996fc_readphy(device_t dev, int phy, int reg)
{
struct adm6996fc_softc *sc;
int data;
sc = device_get_softc(dev);
ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED);
if (phy < 0 || phy >= 32)
return (ENXIO);
if (reg < 0 || reg >= 32)
return (ENXIO);
ADM6996FC_LOCK(sc);
data = ADM6996FC_READREG(device_get_parent(dev),
(ADM6996FC_PHY_C0 + ADM6996FC_PHY_SIZE * phy) + reg);
ADM6996FC_UNLOCK(sc);
return (data);
}
static int
adm6996fc_writephy(device_t dev, int phy, int reg, int data)
{
struct adm6996fc_softc *sc;
int err;
sc = device_get_softc(dev);
ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED);
if (phy < 0 || phy >= 32)
return (ENXIO);
if (reg < 0 || reg >= 32)
return (ENXIO);
ADM6996FC_LOCK(sc);
err = ADM6996FC_WRITEREG(device_get_parent(dev),
(ADM6996FC_PHY_C0 + ADM6996FC_PHY_SIZE * phy) + reg, data);
ADM6996FC_UNLOCK(sc);
return (err);
}
static int
adm6996fc_readreg(device_t dev, int addr)
{
return ADM6996FC_READREG(device_get_parent(dev), addr);
}
static int
adm6996fc_writereg(device_t dev, int addr, int value)
{
int err;
err = ADM6996FC_WRITEREG(device_get_parent(dev), addr, value);
return (err);
}
static device_method_t adm6996fc_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, adm6996fc_probe),
DEVMETHOD(device_attach, adm6996fc_attach),
DEVMETHOD(device_detach, adm6996fc_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, adm6996fc_readphy),
DEVMETHOD(miibus_writereg, adm6996fc_writephy),
DEVMETHOD(miibus_statchg, adm6996fc_statchg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, adm6996fc_readphy),
DEVMETHOD(mdio_writereg, adm6996fc_writephy),
/* etherswitch interface */
DEVMETHOD(etherswitch_lock, adm6996fc_lock),
DEVMETHOD(etherswitch_unlock, adm6996fc_unlock),
DEVMETHOD(etherswitch_getinfo, adm6996fc_getinfo),
DEVMETHOD(etherswitch_readreg, adm6996fc_readreg),
DEVMETHOD(etherswitch_writereg, adm6996fc_writereg),
DEVMETHOD(etherswitch_readphyreg, adm6996fc_readphy),
DEVMETHOD(etherswitch_writephyreg, adm6996fc_writephy),
DEVMETHOD(etherswitch_getport, adm6996fc_getport),
DEVMETHOD(etherswitch_setport, adm6996fc_setport),
DEVMETHOD(etherswitch_getvgroup, adm6996fc_getvgroup),
DEVMETHOD(etherswitch_setvgroup, adm6996fc_setvgroup),
DEVMETHOD(etherswitch_setconf, adm6996fc_setconf),
DEVMETHOD(etherswitch_getconf, adm6996fc_getconf),
DEVMETHOD_END
};
DEFINE_CLASS_0(adm6996fc, adm6996fc_driver, adm6996fc_methods,
sizeof(struct adm6996fc_softc));
DRIVER_MODULE(adm6996fc, mdio, adm6996fc_driver, 0, 0);
DRIVER_MODULE(miibus, adm6996fc, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, adm6996fc, mdio_driver, 0, 0);
DRIVER_MODULE(etherswitch, adm6996fc, etherswitch_driver, 0, 0);
MODULE_VERSION(adm6996fc, 1);
MODULE_DEPEND(adm6996fc, miibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(adm6996fc, etherswitch, 1, 1, 1); /* XXX which versions? */
diff --git a/sys/dev/etherswitch/ip17x/ip17x.c b/sys/dev/etherswitch/ip17x/ip17x.c
index 45119ded83a9..5e07e1f72b80 100644
--- a/sys/dev/etherswitch/ip17x/ip17x.c
+++ b/sys/dev/etherswitch/ip17x/ip17x.c
@@ -1,659 +1,653 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013 Luiz Otavio O Souza.
* Copyright (c) 2011-2012 Stefan Bethke.
* Copyright (c) 2012 Adrian Chadd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/types.h>
#include <net/if.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/etherswitch/ip17x/ip17x_phy.h>
#include <dev/etherswitch/ip17x/ip17x_reg.h>
#include <dev/etherswitch/ip17x/ip17x_var.h>
#include <dev/etherswitch/ip17x/ip17x_vlans.h>
#include <dev/etherswitch/ip17x/ip175c.h>
#include <dev/etherswitch/ip17x/ip175d.h>
#ifdef FDT
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#endif
#include "mdio_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
MALLOC_DECLARE(M_IP17X);
MALLOC_DEFINE(M_IP17X, "ip17x", "ip17x data structures");
static void ip17x_tick(void *);
static int ip17x_ifmedia_upd(if_t);
static void ip17x_ifmedia_sts(if_t, struct ifmediareq *);
static void
ip17x_identify(driver_t *driver, device_t parent)
{
if (device_find_child(parent, "ip17x", -1) == NULL)
BUS_ADD_CHILD(parent, 0, "ip17x", -1);
}
static int
ip17x_probe(device_t dev)
{
struct ip17x_softc *sc;
uint32_t oui, model, phy_id1, phy_id2;
#ifdef FDT
phandle_t ip17x_node;
pcell_t cell;
ip17x_node = fdt_find_compatible(OF_finddevice("/"),
"icplus,ip17x", 0);
if (ip17x_node == 0)
return (ENXIO);
#endif
sc = device_get_softc(dev);
/* Read ID from PHY 0. */
phy_id1 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR1);
phy_id2 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR2);
oui = MII_OUI(phy_id1, phy_id2);
model = MII_MODEL(phy_id2);
/* We only care about IC+ devices. */
if (oui != IP17X_OUI) {
device_printf(dev,
"Unsupported IC+ switch. Unknown OUI: %#x\n", oui);
return (ENXIO);
}
switch (model) {
case IP17X_IP175A:
sc->sc_switchtype = IP17X_SWITCH_IP175A;
break;
case IP17X_IP175C:
sc->sc_switchtype = IP17X_SWITCH_IP175C;
break;
default:
device_printf(dev, "Unsupported IC+ switch model: %#x\n",
model);
return (ENXIO);
}
/* IP175D has a specific ID register. */
model = MDIO_READREG(device_get_parent(dev), IP175D_ID_PHY,
IP175D_ID_REG);
if (model == 0x175d)
sc->sc_switchtype = IP17X_SWITCH_IP175D;
else {
/* IP178 has more PHYs. Try it. */
model = MDIO_READREG(device_get_parent(dev), 5, MII_PHYIDR1);
if (phy_id1 == model)
sc->sc_switchtype = IP17X_SWITCH_IP178C;
}
sc->miipoll = 1;
#ifdef FDT
if ((OF_getencprop(ip17x_node, "mii-poll",
&cell, sizeof(cell))) > 0)
sc->miipoll = cell ? 1 : 0;
#else
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"mii-poll", &sc->miipoll);
#endif
device_set_desc(dev, "IC+ IP17x switch driver");
return (BUS_PROBE_DEFAULT);
}
static int
ip17x_attach_phys(struct ip17x_softc *sc)
{
int err, phy, port;
char name[IFNAMSIZ];
port = err = 0;
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < MII_NPHY; phy++) {
if (((1 << phy) & sc->phymask) == 0)
continue;
sc->phyport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[port], sc);
if_setflags(sc->ifp[port], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX);
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_IP17X,
M_WAITOK | M_ZERO);
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
ip17x_ifmedia_upd, ip17x_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
if_name(sc->ifp[port]));
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
break;
}
sc->info.es_nports = port + 1;
if (++port >= sc->numports)
break;
}
return (err);
}
static int
ip17x_attach(device_t dev)
{
struct ip17x_softc *sc;
int err;
sc = device_get_softc(dev);
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "ip17x", NULL, MTX_DEF);
strlcpy(sc->info.es_name, device_get_desc(dev),
sizeof(sc->info.es_name));
/* XXX Defaults */
sc->phymask = 0x0f;
sc->media = 100;
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"phymask", &sc->phymask);
/* Number of vlans supported by the switch. */
sc->info.es_nvlangroups = IP17X_MAX_VLANS;
/* Attach the switch related functions. */
if (IP17X_IS_SWITCH(sc, IP175C))
ip175c_attach(sc);
else if (IP17X_IS_SWITCH(sc, IP175D))
ip175d_attach(sc);
else
/* We don't have support to all the models yet :-/ */
return (ENXIO);
/* Always attach the cpu port. */
sc->phymask |= (1 << sc->cpuport);
sc->ifp = malloc(sizeof(if_t) * sc->numports, M_IP17X,
M_WAITOK | M_ZERO);
sc->pvid = malloc(sizeof(uint32_t) * sc->numports, M_IP17X,
M_WAITOK | M_ZERO);
sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_IP17X,
M_WAITOK | M_ZERO);
sc->portphy = malloc(sizeof(int) * sc->numports, M_IP17X,
M_WAITOK | M_ZERO);
/* Initialize the switch. */
sc->hal.ip17x_reset(sc);
/*
* Attach the PHYs and complete the bus enumeration.
*/
err = ip17x_attach_phys(sc);
if (err != 0)
return (err);
/*
* Set the switch to port based vlans or disabled (if not supported
* on this model).
*/
sc->hal.ip17x_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT);
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
if (err != 0)
return (err);
if (sc->miipoll) {
callout_init(&sc->callout_tick, 0);
ip17x_tick(sc);
}
return (0);
}
static int
ip17x_detach(device_t dev)
{
struct ip17x_softc *sc;
int i, port;
sc = device_get_softc(dev);
if (sc->miipoll)
callout_drain(&sc->callout_tick);
for (i=0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = sc->phyport[i];
if (sc->miibus[port] != NULL)
device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->miibus[port], M_IP17X);
}
free(sc->portphy, M_IP17X);
free(sc->miibus, M_IP17X);
free(sc->pvid, M_IP17X);
free(sc->ifp, M_IP17X);
/* Reset the switch. */
sc->hal.ip17x_reset(sc);
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static inline struct mii_data *
ip17x_miiforport(struct ip17x_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
return (device_get_softc(*sc->miibus[port]));
}
static inline if_t
ip17x_ifpforport(struct ip17x_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
return (sc->ifp[port]);
}
/*
* Poll the status for all PHYs.
*/
static void
ip17x_miipollstat(struct ip17x_softc *sc)
{
struct mii_softc *miisc;
struct mii_data *mii;
int i, port;
IP17X_LOCK_ASSERT(sc, MA_NOTOWNED);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = sc->phyport[i];
if ((*sc->miibus[port]) == NULL)
continue;
mii = device_get_softc(*sc->miibus[port]);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
ukphy_status(miisc);
mii_phy_update(miisc, MII_POLLSTAT);
}
}
}
static void
ip17x_tick(void *arg)
{
struct ip17x_softc *sc;
sc = arg;
ip17x_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, ip17x_tick, sc);
}
static void
ip17x_lock(device_t dev)
{
struct ip17x_softc *sc;
sc = device_get_softc(dev);
IP17X_LOCK_ASSERT(sc, MA_NOTOWNED);
IP17X_LOCK(sc);
}
static void
ip17x_unlock(device_t dev)
{
struct ip17x_softc *sc;
sc = device_get_softc(dev);
IP17X_LOCK_ASSERT(sc, MA_OWNED);
IP17X_UNLOCK(sc);
}
static etherswitch_info_t *
ip17x_getinfo(device_t dev)
{
struct ip17x_softc *sc;
sc = device_get_softc(dev);
return (&sc->info);
}
static int
ip17x_getport(device_t dev, etherswitch_port_t *p)
{
struct ip17x_softc *sc;
struct ifmediareq *ifmr;
struct mii_data *mii;
int err, phy;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
phy = sc->portphy[p->es_port];
/* Retrieve the PVID. */
p->es_pvid = sc->pvid[phy];
/* Port flags. */
if (sc->addtag & (1 << phy))
p->es_flags |= ETHERSWITCH_PORT_ADDTAG;
if (sc->striptag & (1 << phy))
p->es_flags |= ETHERSWITCH_PORT_STRIPTAG;
ifmr = &p->es_ifmr;
/* No media settings ? */
if (p->es_ifmr.ifm_count == 0)
return (0);
mii = ip17x_miiforport(sc, p->es_port);
if (mii == NULL)
return (ENXIO);
if (phy == sc->cpuport) {
/* fill in fixed values for CPU port */
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr->ifm_count = 0;
if (sc->media == 100)
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_100_TX | IFM_FDX;
else
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
} else {
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
if (err)
return (err);
}
return (0);
}
static int
ip17x_setport(device_t dev, etherswitch_port_t *p)
{
struct ip17x_softc *sc;
struct ifmedia *ifm;
if_t ifp;
struct mii_data *mii;
int phy;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
phy = sc->portphy[p->es_port];
ifp = ip17x_ifpforport(sc, p->es_port);
mii = ip17x_miiforport(sc, p->es_port);
if (ifp == NULL || mii == NULL)
return (ENXIO);
/* Port flags. */
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
/* Set the PVID. */
if (p->es_pvid != 0) {
if (IP17X_IS_SWITCH(sc, IP175C) &&
p->es_pvid > IP175C_LAST_VLAN)
return (ENXIO);
sc->pvid[phy] = p->es_pvid;
}
/* Mutually exclusive. */
if (p->es_flags & ETHERSWITCH_PORT_ADDTAG &&
p->es_flags & ETHERSWITCH_PORT_STRIPTAG)
return (EINVAL);
/* Reset the settings for this port. */
sc->addtag &= ~(1 << phy);
sc->striptag &= ~(1 << phy);
/* And then set it to the new value. */
if (p->es_flags & ETHERSWITCH_PORT_ADDTAG)
sc->addtag |= (1 << phy);
if (p->es_flags & ETHERSWITCH_PORT_STRIPTAG)
sc->striptag |= (1 << phy);
}
/* Update the switch configuration. */
if (sc->hal.ip17x_hw_setup(sc))
return (ENXIO);
/* Do not allow media changes on CPU port. */
if (phy == sc->cpuport)
return (0);
/* No media settings ? */
if (p->es_ifmr.ifm_count == 0)
return (0);
ifm = &mii->mii_media;
return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
}
static void
ip17x_statchg(device_t dev)
{
DPRINTF(dev, "%s\n", __func__);
}
static int
ip17x_ifmedia_upd(if_t ifp)
{
struct ip17x_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
DPRINTF(sc->sc_dev, "%s\n", __func__);
mii = ip17x_miiforport(sc, if_getdunit(ifp));
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
ip17x_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct ip17x_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
DPRINTF(sc->sc_dev, "%s\n", __func__);
mii = ip17x_miiforport(sc, if_getdunit(ifp));
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
ip17x_readreg(device_t dev, int addr)
{
struct ip17x_softc *sc __diagused;
sc = device_get_softc(dev);
IP17X_LOCK_ASSERT(sc, MA_OWNED);
/* Not supported. */
return (0);
}
static int
ip17x_writereg(device_t dev, int addr, int value)
{
struct ip17x_softc *sc __diagused;
sc = device_get_softc(dev);
IP17X_LOCK_ASSERT(sc, MA_OWNED);
/* Not supported. */
return (0);
}
static int
ip17x_getconf(device_t dev, etherswitch_conf_t *conf)
{
struct ip17x_softc *sc;
sc = device_get_softc(dev);
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->hal.ip17x_get_vlan_mode(sc);
return (0);
}
static int
ip17x_setconf(device_t dev, etherswitch_conf_t *conf)
{
struct ip17x_softc *sc;
sc = device_get_softc(dev);
/* Set the VLAN mode. */
if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE)
sc->hal.ip17x_set_vlan_mode(sc, conf->vlan_mode);
return (0);
}
static device_method_t ip17x_methods[] = {
/* Device interface */
DEVMETHOD(device_identify, ip17x_identify),
DEVMETHOD(device_probe, ip17x_probe),
DEVMETHOD(device_attach, ip17x_attach),
DEVMETHOD(device_detach, ip17x_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, ip17x_readphy),
DEVMETHOD(miibus_writereg, ip17x_writephy),
DEVMETHOD(miibus_statchg, ip17x_statchg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, ip17x_readphy),
DEVMETHOD(mdio_writereg, ip17x_writephy),
/* etherswitch interface */
DEVMETHOD(etherswitch_lock, ip17x_lock),
DEVMETHOD(etherswitch_unlock, ip17x_unlock),
DEVMETHOD(etherswitch_getinfo, ip17x_getinfo),
DEVMETHOD(etherswitch_readreg, ip17x_readreg),
DEVMETHOD(etherswitch_writereg, ip17x_writereg),
DEVMETHOD(etherswitch_readphyreg, ip17x_readphy),
DEVMETHOD(etherswitch_writephyreg, ip17x_writephy),
DEVMETHOD(etherswitch_getport, ip17x_getport),
DEVMETHOD(etherswitch_setport, ip17x_setport),
DEVMETHOD(etherswitch_getvgroup, ip17x_getvgroup),
DEVMETHOD(etherswitch_setvgroup, ip17x_setvgroup),
DEVMETHOD(etherswitch_getconf, ip17x_getconf),
DEVMETHOD(etherswitch_setconf, ip17x_setconf),
DEVMETHOD_END
};
DEFINE_CLASS_0(ip17x, ip17x_driver, ip17x_methods,
sizeof(struct ip17x_softc));
DRIVER_MODULE(ip17x, mdio, ip17x_driver, 0, 0);
DRIVER_MODULE(miibus, ip17x, miibus_driver, 0, 0);
DRIVER_MODULE(etherswitch, ip17x, etherswitch_driver, 0, 0);
MODULE_VERSION(ip17x, 1);
#ifdef FDT
MODULE_DEPEND(ip17x, mdio, 1, 1, 1); /* XXX which versions? */
#else
DRIVER_MODULE(mdio, ip17x, mdio_driver, 0, 0);
MODULE_DEPEND(ip17x, miibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(ip17x, etherswitch, 1, 1, 1); /* XXX which versions? */
#endif
diff --git a/sys/dev/etherswitch/micrel/ksz8995ma.c b/sys/dev/etherswitch/micrel/ksz8995ma.c
index 2b5af2a5625a..e512a86202c6 100644
--- a/sys/dev/etherswitch/micrel/ksz8995ma.c
+++ b/sys/dev/etherswitch/micrel/ksz8995ma.c
@@ -1,962 +1,956 @@
/*-
* Copyright (c) 2016 Hiroki Mori
* Copyright (c) 2013 Luiz Otavio O Souza.
* Copyright (c) 2011-2012 Stefan Bethke.
* Copyright (c) 2012 Adrian Chadd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This is Micrel KSZ8995MA driver code. KSZ8995MA use SPI bus on control.
* This code development on @SRCHACK's ksz8995ma board and FON2100 with
* gpiospi.
* etherswitchcfg command port option support addtag, ingress, striptag,
* dropuntagged.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/spibus/spi.h>
#include "spibus_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
#define KSZ8995MA_SPI_READ 0x03
#define KSZ8995MA_SPI_WRITE 0x02
#define KSZ8995MA_CID0 0x00
#define KSZ8995MA_CID1 0x01
#define KSZ8995MA_GC0 0x02
#define KSZ8995MA_GC1 0x03
#define KSZ8995MA_GC2 0x04
#define KSZ8995MA_GC3 0x05
#define KSZ8995MA_PORT_SIZE 0x10
#define KSZ8995MA_PC0_BASE 0x10
#define KSZ8995MA_PC1_BASE 0x11
#define KSZ8995MA_PC2_BASE 0x12
#define KSZ8995MA_PC3_BASE 0x13
#define KSZ8995MA_PC4_BASE 0x14
#define KSZ8995MA_PC5_BASE 0x15
#define KSZ8995MA_PC6_BASE 0x16
#define KSZ8995MA_PC7_BASE 0x17
#define KSZ8995MA_PC8_BASE 0x18
#define KSZ8995MA_PC9_BASE 0x19
#define KSZ8995MA_PC10_BASE 0x1a
#define KSZ8995MA_PC11_BASE 0x1b
#define KSZ8995MA_PC12_BASE 0x1c
#define KSZ8995MA_PC13_BASE 0x1d
#define KSZ8995MA_PS0_BASE 0x1e
#define KSZ8995MA_PC14_BASE 0x1f
#define KSZ8995MA_IAC0 0x6e
#define KSZ8995MA_IAC1 0x6f
#define KSZ8995MA_IDR8 0x70
#define KSZ8995MA_IDR7 0x71
#define KSZ8995MA_IDR6 0x72
#define KSZ8995MA_IDR5 0x73
#define KSZ8995MA_IDR4 0x74
#define KSZ8995MA_IDR3 0x75
#define KSZ8995MA_IDR2 0x76
#define KSZ8995MA_IDR1 0x77
#define KSZ8995MA_IDR0 0x78
#define KSZ8995MA_FAMILI_ID 0x95
#define KSZ8995MA_CHIP_ID 0x00
#define KSZ8995MA_CHIP_ID_MASK 0xf0
#define KSZ8995MA_START 0x01
#define KSZ8995MA_VLAN_ENABLE 0x80
#define KSZ8995MA_TAG_INS 0x04
#define KSZ8995MA_TAG_RM 0x02
#define KSZ8995MA_INGR_FILT 0x40
#define KSZ8995MA_DROP_NONPVID 0x20
#define KSZ8995MA_PDOWN 0x08
#define KSZ8995MA_STARTNEG 0x20
#define KSZ8995MA_MII_STAT 0x7808
#define KSZ8995MA_MII_PHYID_H 0x0022
#define KSZ8995MA_MII_PHYID_L 0x1450
#define KSZ8995MA_MII_AA 0x0401
#define KSZ8995MA_VLAN_TABLE_VALID 0x20
#define KSZ8995MA_VLAN_TABLE_READ 0x14
#define KSZ8995MA_VLAN_TABLE_WRITE 0x04
#define KSZ8995MA_MAX_PORT 5
MALLOC_DECLARE(M_KSZ8995MA);
MALLOC_DEFINE(M_KSZ8995MA, "ksz8995ma", "ksz8995ma data structures");
struct ksz8995ma_softc {
struct mtx sc_mtx; /* serialize access to softc */
device_t sc_dev;
int vlan_mode;
int media; /* cpu port media */
int cpuport; /* which PHY is connected to the CPU */
int phymask; /* PHYs we manage */
int numports; /* number of ports */
int ifpport[KSZ8995MA_MAX_PORT];
int *portphy;
char **ifname;
device_t **miibus;
if_t *ifp;
struct callout callout_tick;
etherswitch_info_t info;
};
#define KSZ8995MA_LOCK(_sc) \
mtx_lock(&(_sc)->sc_mtx)
#define KSZ8995MA_UNLOCK(_sc) \
mtx_unlock(&(_sc)->sc_mtx)
#define KSZ8995MA_LOCK_ASSERT(_sc, _what) \
mtx_assert(&(_sc)->sc_mtx, (_what))
#define KSZ8995MA_TRYLOCK(_sc) \
mtx_trylock(&(_sc)->sc_mtx)
#if defined(DEBUG)
#define DPRINTF(dev, args...) device_printf(dev, args)
#else
#define DPRINTF(dev, args...)
#endif
static inline int ksz8995ma_portforphy(struct ksz8995ma_softc *, int);
static void ksz8995ma_tick(void *);
static int ksz8995ma_ifmedia_upd(if_t);
static void ksz8995ma_ifmedia_sts(if_t, struct ifmediareq *);
static int ksz8995ma_readreg(device_t dev, int addr);
static int ksz8995ma_writereg(device_t dev, int addr, int value);
static void ksz8995ma_portvlanreset(device_t dev);
static int
ksz8995ma_probe(device_t dev)
{
int id0, id1;
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
id0 = ksz8995ma_readreg(dev, KSZ8995MA_CID0);
id1 = ksz8995ma_readreg(dev, KSZ8995MA_CID1);
if (bootverbose)
device_printf(dev,"Chip Identifier Register %x %x\n", id0, id1);
/* check Product Code */
if (id0 != KSZ8995MA_FAMILI_ID || (id1 & KSZ8995MA_CHIP_ID_MASK) !=
KSZ8995MA_CHIP_ID) {
return (ENXIO);
}
device_set_desc(dev, "Micrel KSZ8995MA SPI switch driver");
return (BUS_PROBE_DEFAULT);
}
static int
ksz8995ma_attach_phys(struct ksz8995ma_softc *sc)
{
int phy, port, err;
char name[IFNAMSIZ];
port = 0;
err = 0;
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < sc->numports; phy++) {
if (phy == sc->cpuport)
continue;
if (((1 << phy) & sc->phymask) == 0)
continue;
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
sc->ifp[port]->if_softc = sc;
sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX;
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_KSZ8995MA,
M_WAITOK | M_ZERO);
if (sc->miibus[port] == NULL) {
err = ENOMEM;
goto failed;
}
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
ksz8995ma_ifmedia_upd, ksz8995ma_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
sc->ifp[port]->if_xname);
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
goto failed;
}
++port;
}
sc->info.es_nports = port;
if (sc->cpuport != -1) {
/* cpu port is MAC5 on ksz8995ma */
sc->ifpport[sc->cpuport] = port;
sc->portphy[port] = sc->cpuport;
++sc->info.es_nports;
}
return (0);
failed:
for (phy = 0; phy < sc->numports; phy++) {
if (((1 << phy) & sc->phymask) == 0)
continue;
port = ksz8995ma_portforphy(sc, phy);
if (sc->miibus[port] != NULL)
device_delete_child(sc->sc_dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
if (sc->ifname[port] != NULL)
free(sc->ifname[port], M_KSZ8995MA);
if (sc->miibus[port] != NULL)
free(sc->miibus[port], M_KSZ8995MA);
}
return (err);
}
static int
ksz8995ma_attach(device_t dev)
{
struct ksz8995ma_softc *sc;
int err, reg;
err = 0;
sc = device_get_softc(dev);
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "ksz8995ma", NULL, MTX_DEF);
strlcpy(sc->info.es_name, device_get_desc(dev),
sizeof(sc->info.es_name));
/* KSZ8995MA Defaults */
sc->numports = KSZ8995MA_MAX_PORT;
sc->phymask = (1 << (KSZ8995MA_MAX_PORT + 1)) - 1;
sc->cpuport = -1;
sc->media = 100;
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"cpuport", &sc->cpuport);
sc->info.es_nvlangroups = 16;
sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q;
sc->ifp = malloc(sizeof(if_t) * sc->numports, M_KSZ8995MA,
M_WAITOK | M_ZERO);
sc->ifname = malloc(sizeof(char *) * sc->numports, M_KSZ8995MA,
M_WAITOK | M_ZERO);
sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_KSZ8995MA,
M_WAITOK | M_ZERO);
sc->portphy = malloc(sizeof(int) * sc->numports, M_KSZ8995MA,
M_WAITOK | M_ZERO);
if (sc->ifp == NULL || sc->ifname == NULL || sc->miibus == NULL ||
sc->portphy == NULL) {
err = ENOMEM;
goto failed;
}
/*
* Attach the PHYs and complete the bus enumeration.
*/
err = ksz8995ma_attach_phys(sc);
if (err != 0)
goto failed;
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
if (err != 0)
goto failed;
callout_init(&sc->callout_tick, 0);
ksz8995ma_tick(sc);
/* start switch */
sc->vlan_mode = 0;
reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3);
ksz8995ma_writereg(dev, KSZ8995MA_GC3,
reg & ~KSZ8995MA_VLAN_ENABLE);
ksz8995ma_portvlanreset(dev);
ksz8995ma_writereg(dev, KSZ8995MA_CID1, KSZ8995MA_START);
return (0);
failed:
if (sc->portphy != NULL)
free(sc->portphy, M_KSZ8995MA);
if (sc->miibus != NULL)
free(sc->miibus, M_KSZ8995MA);
if (sc->ifname != NULL)
free(sc->ifname, M_KSZ8995MA);
if (sc->ifp != NULL)
free(sc->ifp, M_KSZ8995MA);
return (err);
}
static int
ksz8995ma_detach(device_t dev)
{
struct ksz8995ma_softc *sc;
int i, port;
sc = device_get_softc(dev);
callout_drain(&sc->callout_tick);
for (i = 0; i < KSZ8995MA_MAX_PORT; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = ksz8995ma_portforphy(sc, i);
if (sc->miibus[port] != NULL)
device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_KSZ8995MA);
free(sc->miibus[port], M_KSZ8995MA);
}
free(sc->portphy, M_KSZ8995MA);
free(sc->miibus, M_KSZ8995MA);
free(sc->ifname, M_KSZ8995MA);
free(sc->ifp, M_KSZ8995MA);
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
}
/*
* Convert PHY number to port number.
*/
static inline int
ksz8995ma_portforphy(struct ksz8995ma_softc *sc, int phy)
{
return (sc->ifpport[phy]);
}
static inline struct mii_data *
ksz8995ma_miiforport(struct ksz8995ma_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
if (port == sc->cpuport)
return (NULL);
return (device_get_softc(*sc->miibus[port]));
}
static inline if_t
ksz8995ma_ifpforport(struct ksz8995ma_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
return (sc->ifp[port]);
}
/*
* Poll the status for all PHYs.
*/
static void
ksz8995ma_miipollstat(struct ksz8995ma_softc *sc)
{
int i, port;
struct mii_data *mii;
struct mii_softc *miisc;
KSZ8995MA_LOCK_ASSERT(sc, MA_NOTOWNED);
for (i = 0; i < KSZ8995MA_MAX_PORT; i++) {
if (i == sc->cpuport)
continue;
if (((1 << i) & sc->phymask) == 0)
continue;
port = ksz8995ma_portforphy(sc, i);
if ((*sc->miibus[port]) == NULL)
continue;
mii = device_get_softc(*sc->miibus[port]);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
ukphy_status(miisc);
mii_phy_update(miisc, MII_POLLSTAT);
}
}
}
static void
ksz8995ma_tick(void *arg)
{
struct ksz8995ma_softc *sc;
sc = arg;
ksz8995ma_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, ksz8995ma_tick, sc);
}
static void
ksz8995ma_lock(device_t dev)
{
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
KSZ8995MA_LOCK_ASSERT(sc, MA_NOTOWNED);
KSZ8995MA_LOCK(sc);
}
static void
ksz8995ma_unlock(device_t dev)
{
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
KSZ8995MA_LOCK_ASSERT(sc, MA_OWNED);
KSZ8995MA_UNLOCK(sc);
}
static etherswitch_info_t *
ksz8995ma_getinfo(device_t dev)
{
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
return (&sc->info);
}
static int
ksz8995ma_getport(device_t dev, etherswitch_port_t *p)
{
struct ksz8995ma_softc *sc;
struct mii_data *mii;
struct ifmediareq *ifmr;
int phy, err;
int tag1, tag2, portreg;
sc = device_get_softc(dev);
ifmr = &p->es_ifmr;
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
tag1 = ksz8995ma_readreg(dev, KSZ8995MA_PC3_BASE +
KSZ8995MA_PORT_SIZE * p->es_port);
tag2 = ksz8995ma_readreg(dev, KSZ8995MA_PC4_BASE +
KSZ8995MA_PORT_SIZE * p->es_port);
p->es_pvid = (tag1 & 0x0f) << 8 | tag2;
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC0_BASE +
KSZ8995MA_PORT_SIZE * p->es_port);
if (portreg & KSZ8995MA_TAG_INS)
p->es_flags |= ETHERSWITCH_PORT_ADDTAG;
if (portreg & KSZ8995MA_TAG_RM)
p->es_flags |= ETHERSWITCH_PORT_STRIPTAG;
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC2_BASE +
KSZ8995MA_PORT_SIZE * p->es_port);
if (portreg & KSZ8995MA_DROP_NONPVID)
p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
if (portreg & KSZ8995MA_INGR_FILT)
p->es_flags |= ETHERSWITCH_PORT_INGRESS;
}
phy = sc->portphy[p->es_port];
mii = ksz8995ma_miiforport(sc, p->es_port);
if (sc->cpuport != -1 && phy == sc->cpuport) {
/* fill in fixed values for CPU port */
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr->ifm_count = 0;
if (sc->media == 100)
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_100_TX | IFM_FDX;
else
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
} else if (mii != NULL) {
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
if (err)
return (err);
} else {
return (ENXIO);
}
return (0);
}
static int
ksz8995ma_setport(device_t dev, etherswitch_port_t *p)
{
struct ksz8995ma_softc *sc;
struct mii_data *mii;
struct ifmedia *ifm;
if_t ifp;
int phy, err;
int portreg;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
ksz8995ma_writereg(dev, KSZ8995MA_PC4_BASE +
KSZ8995MA_PORT_SIZE * p->es_port, p->es_pvid & 0xff);
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC3_BASE +
KSZ8995MA_PORT_SIZE * p->es_port);
ksz8995ma_writereg(dev, KSZ8995MA_PC3_BASE +
KSZ8995MA_PORT_SIZE * p->es_port,
(portreg & 0xf0) | ((p->es_pvid >> 8) & 0x0f));
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC0_BASE +
KSZ8995MA_PORT_SIZE * p->es_port);
if (p->es_flags & ETHERSWITCH_PORT_ADDTAG)
portreg |= KSZ8995MA_TAG_INS;
else
portreg &= ~KSZ8995MA_TAG_INS;
if (p->es_flags & ETHERSWITCH_PORT_STRIPTAG)
portreg |= KSZ8995MA_TAG_RM;
else
portreg &= ~KSZ8995MA_TAG_RM;
ksz8995ma_writereg(dev, KSZ8995MA_PC0_BASE +
KSZ8995MA_PORT_SIZE * p->es_port, portreg);
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC2_BASE +
KSZ8995MA_PORT_SIZE * p->es_port);
if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
portreg |= KSZ8995MA_DROP_NONPVID;
else
portreg &= ~KSZ8995MA_DROP_NONPVID;
if (p->es_flags & ETHERSWITCH_PORT_INGRESS)
portreg |= KSZ8995MA_INGR_FILT;
else
portreg &= ~KSZ8995MA_INGR_FILT;
ksz8995ma_writereg(dev, KSZ8995MA_PC2_BASE +
KSZ8995MA_PORT_SIZE * p->es_port, portreg);
}
phy = sc->portphy[p->es_port];
mii = ksz8995ma_miiforport(sc, p->es_port);
if (phy != sc->cpuport) {
if (mii == NULL)
return (ENXIO);
ifp = ksz8995ma_ifpforport(sc, p->es_port);
ifm = &mii->mii_media;
err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA);
}
return (0);
}
static int
ksz8995ma_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
int data0, data1, data2;
int vlantab;
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) {
if (vg->es_vlangroup < sc->numports) {
vg->es_vid = ETHERSWITCH_VID_VALID;
vg->es_vid |= vg->es_vlangroup;
data0 = ksz8995ma_readreg(dev, KSZ8995MA_PC1_BASE +
KSZ8995MA_PORT_SIZE * vg->es_vlangroup);
vg->es_member_ports = data0 & 0x1f;
vg->es_untagged_ports = vg->es_member_ports;
vg->es_fid = 0;
} else {
vg->es_vid = 0;
}
} else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
ksz8995ma_writereg(dev, KSZ8995MA_IAC0,
KSZ8995MA_VLAN_TABLE_READ);
ksz8995ma_writereg(dev, KSZ8995MA_IAC1, vg->es_vlangroup);
data2 = ksz8995ma_readreg(dev, KSZ8995MA_IDR2);
data1 = ksz8995ma_readreg(dev, KSZ8995MA_IDR1);
data0 = ksz8995ma_readreg(dev, KSZ8995MA_IDR0);
vlantab = data2 << 16 | data1 << 8 | data0;
if (data2 & KSZ8995MA_VLAN_TABLE_VALID) {
vg->es_vid = ETHERSWITCH_VID_VALID;
vg->es_vid |= vlantab & 0xfff;
vg->es_member_ports = (vlantab >> 16) & 0x1f;
vg->es_untagged_ports = vg->es_member_ports;
vg->es_fid = (vlantab >> 12) & 0x0f;
} else {
vg->es_fid = 0;
}
}
return (0);
}
static int
ksz8995ma_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
struct ksz8995ma_softc *sc;
int data0;
sc = device_get_softc(dev);
if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) {
data0 = ksz8995ma_readreg(dev, KSZ8995MA_PC1_BASE +
KSZ8995MA_PORT_SIZE * vg->es_vlangroup);
ksz8995ma_writereg(dev, KSZ8995MA_PC1_BASE +
KSZ8995MA_PORT_SIZE * vg->es_vlangroup,
(data0 & 0xe0) | (vg->es_member_ports & 0x1f));
} else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
if (vg->es_member_ports != 0) {
ksz8995ma_writereg(dev, KSZ8995MA_IDR2,
KSZ8995MA_VLAN_TABLE_VALID |
(vg->es_member_ports & 0x1f));
ksz8995ma_writereg(dev, KSZ8995MA_IDR1,
vg->es_fid << 4 | vg->es_vid >> 8);
ksz8995ma_writereg(dev, KSZ8995MA_IDR0,
vg->es_vid & 0xff);
} else {
ksz8995ma_writereg(dev, KSZ8995MA_IDR2, 0);
ksz8995ma_writereg(dev, KSZ8995MA_IDR1, 0);
ksz8995ma_writereg(dev, KSZ8995MA_IDR0, 0);
}
ksz8995ma_writereg(dev, KSZ8995MA_IAC0,
KSZ8995MA_VLAN_TABLE_WRITE);
ksz8995ma_writereg(dev, KSZ8995MA_IAC1, vg->es_vlangroup);
}
return (0);
}
static int
ksz8995ma_getconf(device_t dev, etherswitch_conf_t *conf)
{
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->vlan_mode;
return (0);
}
static void
ksz8995ma_portvlanreset(device_t dev)
{
int i, data;
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
for (i = 0; i < sc->numports; ++i) {
data = ksz8995ma_readreg(dev, KSZ8995MA_PC1_BASE +
KSZ8995MA_PORT_SIZE * i);
ksz8995ma_writereg(dev, KSZ8995MA_PC1_BASE +
KSZ8995MA_PORT_SIZE * i, (data & 0xe0) | 0x1f);
}
}
static int
ksz8995ma_setconf(device_t dev, etherswitch_conf_t *conf)
{
int reg;
struct ksz8995ma_softc *sc;
sc = device_get_softc(dev);
if ((conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) == 0)
return (0);
if (conf->vlan_mode == ETHERSWITCH_VLAN_PORT) {
sc->vlan_mode = ETHERSWITCH_VLAN_PORT;
reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3);
ksz8995ma_writereg(dev, KSZ8995MA_GC3,
reg & ~KSZ8995MA_VLAN_ENABLE);
ksz8995ma_portvlanreset(dev);
} else if (conf->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3);
ksz8995ma_writereg(dev, KSZ8995MA_GC3,
reg | KSZ8995MA_VLAN_ENABLE);
} else {
sc->vlan_mode = 0;
reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3);
ksz8995ma_writereg(dev, KSZ8995MA_GC3,
reg & ~KSZ8995MA_VLAN_ENABLE);
ksz8995ma_portvlanreset(dev);
}
return (0);
}
static void
ksz8995ma_statchg(device_t dev)
{
DPRINTF(dev, "%s\n", __func__);
}
static int
ksz8995ma_ifmedia_upd(if_t ifp)
{
struct ksz8995ma_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = ksz8995ma_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
ksz8995ma_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct ksz8995ma_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = ksz8995ma_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
ksz8995ma_readphy(device_t dev, int phy, int reg)
{
int portreg;
/*
* This is no mdio/mdc connection code.
* simulate MIIM Registers via the SPI interface
*/
if (reg == MII_BMSR) {
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PS0_BASE +
KSZ8995MA_PORT_SIZE * phy);
return (KSZ8995MA_MII_STAT |
(portreg & 0x20 ? BMSR_LINK : 0x00) |
(portreg & 0x40 ? BMSR_ACOMP : 0x00));
} else if (reg == MII_PHYIDR1) {
return (KSZ8995MA_MII_PHYID_H);
} else if (reg == MII_PHYIDR2) {
return (KSZ8995MA_MII_PHYID_L);
} else if (reg == MII_ANAR) {
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC12_BASE +
KSZ8995MA_PORT_SIZE * phy);
return (KSZ8995MA_MII_AA | (portreg & 0x0f) << 5);
} else if (reg == MII_ANLPAR) {
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PS0_BASE +
KSZ8995MA_PORT_SIZE * phy);
return (((portreg & 0x0f) << 5) | 0x01);
}
return (0);
}
static int
ksz8995ma_writephy(device_t dev, int phy, int reg, int data)
{
int portreg;
/*
* This is no mdio/mdc connection code.
* simulate MIIM Registers via the SPI interface
*/
if (reg == MII_BMCR) {
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC13_BASE +
KSZ8995MA_PORT_SIZE * phy);
if (data & BMCR_PDOWN)
portreg |= KSZ8995MA_PDOWN;
else
portreg &= ~KSZ8995MA_PDOWN;
if (data & BMCR_STARTNEG)
portreg |= KSZ8995MA_STARTNEG;
else
portreg &= ~KSZ8995MA_STARTNEG;
ksz8995ma_writereg(dev, KSZ8995MA_PC13_BASE +
KSZ8995MA_PORT_SIZE * phy, portreg);
} else if (reg == MII_ANAR) {
portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC12_BASE +
KSZ8995MA_PORT_SIZE * phy);
portreg &= 0xf;
portreg |= ((data >> 5) & 0x0f);
ksz8995ma_writereg(dev, KSZ8995MA_PC12_BASE +
KSZ8995MA_PORT_SIZE * phy, portreg);
}
return (0);
}
static int
ksz8995ma_readreg(device_t dev, int addr)
{
uint8_t txBuf[8], rxBuf[8];
struct spi_command cmd;
int err;
memset(&cmd, 0, sizeof(cmd));
memset(txBuf, 0, sizeof(txBuf));
memset(rxBuf, 0, sizeof(rxBuf));
/* read spi */
txBuf[0] = KSZ8995MA_SPI_READ;
txBuf[1] = addr;
cmd.tx_cmd = &txBuf;
cmd.rx_cmd = &rxBuf;
cmd.tx_cmd_sz = 3;
cmd.rx_cmd_sz = 3;
err = SPIBUS_TRANSFER(device_get_parent(dev), dev, &cmd);
if (err)
return(0);
return (rxBuf[2]);
}
static int
ksz8995ma_writereg(device_t dev, int addr, int value)
{
uint8_t txBuf[8], rxBuf[8];
struct spi_command cmd;
int err;
memset(&cmd, 0, sizeof(cmd));
memset(txBuf, 0, sizeof(txBuf));
memset(rxBuf, 0, sizeof(rxBuf));
/* write spi */
txBuf[0] = KSZ8995MA_SPI_WRITE;
txBuf[1] = addr;
txBuf[2] = value;
cmd.tx_cmd = &txBuf;
cmd.rx_cmd = &rxBuf;
cmd.tx_cmd_sz = 3;
cmd.rx_cmd_sz = 3;
err = SPIBUS_TRANSFER(device_get_parent(dev), dev, &cmd);
if (err)
return(0);
return (0);
}
static device_method_t ksz8995ma_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ksz8995ma_probe),
DEVMETHOD(device_attach, ksz8995ma_attach),
DEVMETHOD(device_detach, ksz8995ma_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, ksz8995ma_readphy),
DEVMETHOD(miibus_writereg, ksz8995ma_writephy),
DEVMETHOD(miibus_statchg, ksz8995ma_statchg),
/* etherswitch interface */
DEVMETHOD(etherswitch_lock, ksz8995ma_lock),
DEVMETHOD(etherswitch_unlock, ksz8995ma_unlock),
DEVMETHOD(etherswitch_getinfo, ksz8995ma_getinfo),
DEVMETHOD(etherswitch_readreg, ksz8995ma_readreg),
DEVMETHOD(etherswitch_writereg, ksz8995ma_writereg),
DEVMETHOD(etherswitch_readphyreg, ksz8995ma_readphy),
DEVMETHOD(etherswitch_writephyreg, ksz8995ma_writephy),
DEVMETHOD(etherswitch_getport, ksz8995ma_getport),
DEVMETHOD(etherswitch_setport, ksz8995ma_setport),
DEVMETHOD(etherswitch_getvgroup, ksz8995ma_getvgroup),
DEVMETHOD(etherswitch_setvgroup, ksz8995ma_setvgroup),
DEVMETHOD(etherswitch_setconf, ksz8995ma_setconf),
DEVMETHOD(etherswitch_getconf, ksz8995ma_getconf),
DEVMETHOD_END
};
DEFINE_CLASS_0(ksz8995ma, ksz8995ma_driver, ksz8995ma_methods,
sizeof(struct ksz8995ma_softc));
DRIVER_MODULE(ksz8995ma, spibus, ksz8995ma_driver, 0, 0);
DRIVER_MODULE(miibus, ksz8995ma, miibus_driver, 0, 0);
DRIVER_MODULE(etherswitch, ksz8995ma, etherswitch_driver, 0, 0);
MODULE_VERSION(ksz8995ma, 1);
MODULE_DEPEND(ksz8995ma, spibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(ksz8995ma, miibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(ksz8995ma, etherswitch, 1, 1, 1); /* XXX which versions? */
diff --git a/sys/dev/etherswitch/mtkswitch/mtkswitch.c b/sys/dev/etherswitch/mtkswitch/mtkswitch.c
index 75f7cbfa6941..fa96a81b40ef 100644
--- a/sys/dev/etherswitch/mtkswitch/mtkswitch.c
+++ b/sys/dev/etherswitch/mtkswitch/mtkswitch.c
@@ -1,671 +1,665 @@
/*-
* Copyright (c) 2016 Stanislav Galabov.
* Copyright (c) 2011-2012 Stefan Bethke.
* Copyright (c) 2012 Adrian Chadd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/etherswitch/mtkswitch/mtkswitchvar.h>
#include <dev/ofw/ofw_bus_subr.h>
#include "mdio_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
#define DEBUG
#if defined(DEBUG)
static SYSCTL_NODE(_debug, OID_AUTO, mtkswitch, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"mtkswitch");
#endif
static inline int mtkswitch_portforphy(int phy);
static int mtkswitch_ifmedia_upd(if_t ifp);
static void mtkswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
static void mtkswitch_tick(void *arg);
static const struct ofw_compat_data compat_data[] = {
{ "ralink,rt3050-esw", MTK_SWITCH_RT3050 },
{ "ralink,rt3352-esw", MTK_SWITCH_RT3352 },
{ "ralink,rt5350-esw", MTK_SWITCH_RT5350 },
{ "mediatek,mt7620-gsw", MTK_SWITCH_MT7620 },
{ "mediatek,mt7621-gsw", MTK_SWITCH_MT7621 },
{ "mediatek,mt7628-esw", MTK_SWITCH_MT7628 },
/* Sentinel */
{ NULL, MTK_SWITCH_NONE }
};
static int
mtkswitch_probe(device_t dev)
{
struct mtkswitch_softc *sc;
mtk_switch_type switch_type;
if (!ofw_bus_status_okay(dev))
return (ENXIO);
switch_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
if (switch_type == MTK_SWITCH_NONE)
return (ENXIO);
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
sc->sc_switchtype = switch_type;
device_set_desc(dev, "MTK Switch Driver");
return (0);
}
static int
mtkswitch_attach_phys(struct mtkswitch_softc *sc)
{
int phy, err = 0;
char name[IFNAMSIZ];
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < sc->numphys; phy++) {
if ((sc->phymap & (1u << phy)) == 0) {
sc->ifp[phy] = NULL;
sc->ifname[phy] = NULL;
sc->miibus[phy] = NULL;
continue;
}
sc->ifp[phy] = if_alloc(IFT_ETHER);
- if (sc->ifp[phy] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
sc->ifp[phy]->if_softc = sc;
sc->ifp[phy]->if_flags |= IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX;
sc->ifname[phy] = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
bcopy(name, sc->ifname[phy], strlen(name) + 1);
if_initname(sc->ifp[phy], sc->ifname[phy],
mtkswitch_portforphy(phy));
err = mii_attach(sc->sc_dev, &sc->miibus[phy], sc->ifp[phy],
mtkswitch_ifmedia_upd, mtkswitch_ifmedia_sts,
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
} else {
DPRINTF(sc->sc_dev, "%s attached to pseudo interface "
"%s\n", device_get_nameunit(sc->miibus[phy]),
sc->ifp[phy]->if_xname);
}
}
return (err);
}
static int
mtkswitch_set_vlan_mode(struct mtkswitch_softc *sc, uint32_t mode)
{
/* Check for invalid modes. */
if ((mode & sc->info.es_vlan_caps) != mode)
return (EINVAL);
sc->vlan_mode = mode;
/* Reset VLANs. */
sc->hal.mtkswitch_vlan_init_hw(sc);
return (0);
}
static int
mtkswitch_attach(device_t dev)
{
struct mtkswitch_softc *sc;
int err = 0;
int port, rid;
sc = device_get_softc(dev);
/* sc->sc_switchtype is already decided in mtkswitch_probe() */
sc->numports = MTKSWITCH_MAX_PORTS;
sc->numphys = MTKSWITCH_MAX_PHYS;
sc->cpuport = MTKSWITCH_CPU_PORT;
sc->sc_dev = dev;
/* Attach switch related functions */
if (sc->sc_switchtype == MTK_SWITCH_NONE) {
device_printf(dev, "Unknown switch type\n");
return (ENXIO);
}
if (sc->sc_switchtype == MTK_SWITCH_MT7620 ||
sc->sc_switchtype == MTK_SWITCH_MT7621)
mtk_attach_switch_mt7620(sc);
else
mtk_attach_switch_rt3050(sc);
/* Allocate resources */
rid = 0;
sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->sc_res == NULL) {
device_printf(dev, "could not map memory\n");
return (ENXIO);
}
mtx_init(&sc->sc_mtx, "mtkswitch", NULL, MTX_DEF);
/* Reset the switch */
if (sc->hal.mtkswitch_reset(sc)) {
DPRINTF(dev, "%s: mtkswitch_reset: failed\n", __func__);
return (ENXIO);
}
err = sc->hal.mtkswitch_hw_setup(sc);
DPRINTF(dev, "%s: hw_setup: err=%d\n", __func__, err);
if (err != 0)
return (err);
err = sc->hal.mtkswitch_hw_global_setup(sc);
DPRINTF(dev, "%s: hw_global_setup: err=%d\n", __func__, err);
if (err != 0)
return (err);
/* Initialize the switch ports */
for (port = 0; port < sc->numports; port++) {
sc->hal.mtkswitch_port_init(sc, port);
}
/* Attach the PHYs and complete the bus enumeration */
err = mtkswitch_attach_phys(sc);
DPRINTF(dev, "%s: attach_phys: err=%d\n", __func__, err);
if (err != 0)
return (err);
/* Default to ingress filters off. */
err = mtkswitch_set_vlan_mode(sc, ETHERSWITCH_VLAN_DOT1Q);
DPRINTF(dev, "%s: set_vlan_mode: err=%d\n", __func__, err);
if (err != 0)
return (err);
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
DPRINTF(dev, "%s: bus_generic_attach: err=%d\n", __func__, err);
if (err != 0)
return (err);
callout_init_mtx(&sc->callout_tick, &sc->sc_mtx, 0);
MTKSWITCH_LOCK(sc);
mtkswitch_tick(sc);
MTKSWITCH_UNLOCK(sc);
return (0);
}
static int
mtkswitch_detach(device_t dev)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
int phy;
callout_drain(&sc->callout_tick);
for (phy = 0; phy < MTKSWITCH_MAX_PHYS; phy++) {
if (sc->miibus[phy] != NULL)
device_delete_child(dev, sc->miibus[phy]);
if (sc->ifp[phy] != NULL)
if_free(sc->ifp[phy]);
free(sc->ifname[phy], M_DEVBUF);
}
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
}
/* PHY <-> port mapping is currently 1:1 */
static inline int
mtkswitch_portforphy(int phy)
{
return (phy);
}
static inline int
mtkswitch_phyforport(int port)
{
return (port);
}
static inline struct mii_data *
mtkswitch_miiforport(struct mtkswitch_softc *sc, int port)
{
int phy = mtkswitch_phyforport(port);
if (phy < 0 || phy >= MTKSWITCH_MAX_PHYS || sc->miibus[phy] == NULL)
return (NULL);
return (device_get_softc(sc->miibus[phy]));
}
static inline if_t
mtkswitch_ifpforport(struct mtkswitch_softc *sc, int port)
{
int phy = mtkswitch_phyforport(port);
if (phy < 0 || phy >= MTKSWITCH_MAX_PHYS)
return (NULL);
return (sc->ifp[phy]);
}
/*
* Convert port status to ifmedia.
*/
static void
mtkswitch_update_ifmedia(uint32_t portstatus, u_int *media_status,
u_int *media_active)
{
*media_active = IFM_ETHER;
*media_status = IFM_AVALID;
if ((portstatus & MTKSWITCH_LINK_UP) != 0)
*media_status |= IFM_ACTIVE;
else {
*media_active |= IFM_NONE;
return;
}
switch (portstatus & MTKSWITCH_SPEED_MASK) {
case MTKSWITCH_SPEED_10:
*media_active |= IFM_10_T;
break;
case MTKSWITCH_SPEED_100:
*media_active |= IFM_100_TX;
break;
case MTKSWITCH_SPEED_1000:
*media_active |= IFM_1000_T;
break;
}
if ((portstatus & MTKSWITCH_DUPLEX) != 0)
*media_active |= IFM_FDX;
else
*media_active |= IFM_HDX;
if ((portstatus & MTKSWITCH_TXFLOW) != 0)
*media_active |= IFM_ETH_TXPAUSE;
if ((portstatus & MTKSWITCH_RXFLOW) != 0)
*media_active |= IFM_ETH_RXPAUSE;
}
static void
mtkswitch_miipollstat(struct mtkswitch_softc *sc)
{
struct mii_data *mii;
struct mii_softc *miisc;
uint32_t portstatus;
int i, port_flap = 0;
MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED);
for (i = 0; i < sc->numphys; i++) {
if (sc->miibus[i] == NULL)
continue;
mii = device_get_softc(sc->miibus[i]);
portstatus = sc->hal.mtkswitch_get_port_status(sc,
mtkswitch_portforphy(i));
/* If a port has flapped - mark it so we can flush the ATU */
if (((mii->mii_media_status & IFM_ACTIVE) == 0 &&
(portstatus & MTKSWITCH_LINK_UP) != 0) ||
((mii->mii_media_status & IFM_ACTIVE) != 0 &&
(portstatus & MTKSWITCH_LINK_UP) == 0)) {
port_flap = 1;
}
mtkswitch_update_ifmedia(portstatus, &mii->mii_media_status,
&mii->mii_media_active);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
mii_phy_update(miisc, MII_POLLSTAT);
}
}
if (port_flap)
sc->hal.mtkswitch_atu_flush(sc);
}
static void
mtkswitch_tick(void *arg)
{
struct mtkswitch_softc *sc = arg;
mtkswitch_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, mtkswitch_tick, sc);
}
static void
mtkswitch_lock(device_t dev)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED);
MTKSWITCH_LOCK(sc);
}
static void
mtkswitch_unlock(device_t dev)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED);
MTKSWITCH_UNLOCK(sc);
}
static etherswitch_info_t *
mtkswitch_getinfo(device_t dev)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
return (&sc->info);
}
static inline int
mtkswitch_is_cpuport(struct mtkswitch_softc *sc, int port)
{
return (sc->cpuport == port);
}
static int
mtkswitch_getport(device_t dev, etherswitch_port_t *p)
{
struct mtkswitch_softc *sc;
struct mii_data *mii;
struct ifmediareq *ifmr;
int err;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port > sc->info.es_nports)
return (ENXIO);
err = sc->hal.mtkswitch_port_vlan_get(sc, p);
if (err != 0)
return (err);
mii = mtkswitch_miiforport(sc, p->es_port);
if (mtkswitch_is_cpuport(sc, p->es_port)) {
/* fill in fixed values for CPU port */
/* XXX is this valid in all cases? */
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr = &p->es_ifmr;
ifmr->ifm_count = 0;
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
} else if (mii != NULL) {
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
if (err)
return (err);
} else {
ifmr = &p->es_ifmr;
ifmr->ifm_count = 0;
ifmr->ifm_current = ifmr->ifm_active = IFM_NONE;
ifmr->ifm_mask = 0;
ifmr->ifm_status = 0;
}
return (0);
}
static int
mtkswitch_setport(device_t dev, etherswitch_port_t *p)
{
int err;
struct mtkswitch_softc *sc;
struct ifmedia *ifm;
struct mii_data *mii;
if_t ifp;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port > sc->info.es_nports)
return (ENXIO);
/* Port flags. */
if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
err = sc->hal.mtkswitch_port_vlan_setup(sc, p);
if (err)
return (err);
}
/* Do not allow media changes on CPU port. */
if (mtkswitch_is_cpuport(sc, p->es_port))
return (0);
mii = mtkswitch_miiforport(sc, p->es_port);
if (mii == NULL)
return (ENXIO);
ifp = mtkswitch_ifpforport(sc, p->es_port);
ifm = &mii->mii_media;
return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
}
static void
mtkswitch_statchg(device_t dev)
{
DPRINTF(dev, "%s\n", __func__);
}
static int
mtkswitch_ifmedia_upd(if_t ifp)
{
struct mtkswitch_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = mtkswitch_miiforport(sc, if_getdunit(ifp));
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
mtkswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct mtkswitch_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = mtkswitch_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
mtkswitch_getconf(device_t dev, etherswitch_conf_t *conf)
{
struct mtkswitch_softc *sc;
sc = device_get_softc(dev);
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = sc->vlan_mode;
return (0);
}
static int
mtkswitch_setconf(device_t dev, etherswitch_conf_t *conf)
{
struct mtkswitch_softc *sc;
int err;
sc = device_get_softc(dev);
/* Set the VLAN mode. */
if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) {
err = mtkswitch_set_vlan_mode(sc, conf->vlan_mode);
if (err != 0)
return (err);
}
return (0);
}
static int
mtkswitch_getvgroup(device_t dev, etherswitch_vlangroup_t *e)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
return (sc->hal.mtkswitch_vlan_getvgroup(sc, e));
}
static int
mtkswitch_setvgroup(device_t dev, etherswitch_vlangroup_t *e)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
return (sc->hal.mtkswitch_vlan_setvgroup(sc, e));
}
static int
mtkswitch_readphy(device_t dev, int phy, int reg)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
return (sc->hal.mtkswitch_phy_read(dev, phy, reg));
}
static int
mtkswitch_writephy(device_t dev, int phy, int reg, int val)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
return (sc->hal.mtkswitch_phy_write(dev, phy, reg, val));
}
static int
mtkswitch_readreg(device_t dev, int addr)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
return (sc->hal.mtkswitch_reg_read(dev, addr));
}
static int
mtkswitch_writereg(device_t dev, int addr, int value)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
return (sc->hal.mtkswitch_reg_write(dev, addr, value));
}
static device_method_t mtkswitch_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mtkswitch_probe),
DEVMETHOD(device_attach, mtkswitch_attach),
DEVMETHOD(device_detach, mtkswitch_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, mtkswitch_readphy),
DEVMETHOD(miibus_writereg, mtkswitch_writephy),
DEVMETHOD(miibus_statchg, mtkswitch_statchg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, mtkswitch_readphy),
DEVMETHOD(mdio_writereg, mtkswitch_writephy),
/* ehterswitch interface */
DEVMETHOD(etherswitch_lock, mtkswitch_lock),
DEVMETHOD(etherswitch_unlock, mtkswitch_unlock),
DEVMETHOD(etherswitch_getinfo, mtkswitch_getinfo),
DEVMETHOD(etherswitch_readreg, mtkswitch_readreg),
DEVMETHOD(etherswitch_writereg, mtkswitch_writereg),
DEVMETHOD(etherswitch_readphyreg, mtkswitch_readphy),
DEVMETHOD(etherswitch_writephyreg, mtkswitch_writephy),
DEVMETHOD(etherswitch_getport, mtkswitch_getport),
DEVMETHOD(etherswitch_setport, mtkswitch_setport),
DEVMETHOD(etherswitch_getvgroup, mtkswitch_getvgroup),
DEVMETHOD(etherswitch_setvgroup, mtkswitch_setvgroup),
DEVMETHOD(etherswitch_getconf, mtkswitch_getconf),
DEVMETHOD(etherswitch_setconf, mtkswitch_setconf),
DEVMETHOD_END
};
DEFINE_CLASS_0(mtkswitch, mtkswitch_driver, mtkswitch_methods,
sizeof(struct mtkswitch_softc));
DRIVER_MODULE(mtkswitch, simplebus, mtkswitch_driver, 0, 0);
DRIVER_MODULE(miibus, mtkswitch, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, mtkswitch, mdio_driver, 0, 0);
DRIVER_MODULE(etherswitch, mtkswitch, etherswitch_driver, 0, 0);
MODULE_VERSION(mtkswitch, 1);
MODULE_DEPEND(mtkswitch, miibus, 1, 1, 1);
MODULE_DEPEND(mtkswitch, etherswitch, 1, 1, 1);
diff --git a/sys/dev/etherswitch/rtl8366/rtl8366rb.c b/sys/dev/etherswitch/rtl8366/rtl8366rb.c
index 8a74ae189284..761a96b0ec80 100644
--- a/sys/dev/etherswitch/rtl8366/rtl8366rb.c
+++ b/sys/dev/etherswitch/rtl8366/rtl8366rb.c
@@ -1,966 +1,960 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2015-2016 Hiroki Mori.
* Copyright (c) 2011-2012 Stefan Bethke.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_etherswitch.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/iicbus/iic.h>
#include <dev/iicbus/iiconf.h>
#include <dev/iicbus/iicbus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include <dev/etherswitch/rtl8366/rtl8366rbvar.h>
#include "mdio_if.h"
#include "iicbus_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
struct rtl8366rb_softc {
struct mtx sc_mtx; /* serialize access to softc */
int smi_acquired; /* serialize access to SMI/I2C bus */
struct mtx callout_mtx; /* serialize callout */
device_t dev;
int vid[RTL8366_NUM_VLANS];
char *ifname[RTL8366_NUM_PHYS];
device_t miibus[RTL8366_NUM_PHYS];
if_t ifp[RTL8366_NUM_PHYS];
struct callout callout_tick;
etherswitch_info_t info;
int chip_type;
int phy4cpu;
int numphys;
};
#define RTL_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define RTL_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define RTL_LOCK_ASSERT(_sc, _what) mtx_assert(&(_s)c->sc_mtx, (_what))
#define RTL_TRYLOCK(_sc) mtx_trylock(&(_sc)->sc_mtx)
#define RTL_WAITOK 0
#define RTL_NOWAIT 1
#define RTL_SMI_ACQUIRED 1
#define RTL_SMI_ACQUIRED_ASSERT(_sc) \
KASSERT((_sc)->smi_acquired == RTL_SMI_ACQUIRED, ("smi must be acquired @%s", __FUNCTION__))
#if defined(DEBUG)
#define DPRINTF(dev, args...) device_printf(dev, args)
#define DEVERR(dev, err, fmt, args...) do { \
if (err != 0) device_printf(dev, fmt, err, args); \
} while (0)
#define DEBUG_INCRVAR(var) do { \
var++; \
} while (0)
static int callout_blocked = 0;
static int iic_select_retries = 0;
static int phy_access_retries = 0;
static SYSCTL_NODE(_debug, OID_AUTO, rtl8366rb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"rtl8366rb");
SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, callout_blocked, CTLFLAG_RW, &callout_blocked, 0,
"number of times the callout couldn't acquire the bus");
SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, iic_select_retries, CTLFLAG_RW, &iic_select_retries, 0,
"number of times the I2C bus selection had to be retried");
SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, phy_access_retries, CTLFLAG_RW, &phy_access_retries, 0,
"number of times PHY register access had to be retried");
#else
#define DPRINTF(dev, args...)
#define DEVERR(dev, err, fmt, args...)
#define DEBUG_INCRVAR(var)
#endif
static int smi_probe(device_t dev);
static int smi_read(device_t dev, uint16_t addr, uint16_t *data, int sleep);
static int smi_write(device_t dev, uint16_t addr, uint16_t data, int sleep);
static int smi_rmw(device_t dev, uint16_t addr, uint16_t mask, uint16_t data, int sleep);
static void rtl8366rb_tick(void *arg);
static int rtl8366rb_ifmedia_upd(if_t);
static void rtl8366rb_ifmedia_sts(if_t, struct ifmediareq *);
static void
rtl8366rb_identify(driver_t *driver, device_t parent)
{
device_t child;
struct iicbus_ivar *devi;
if (device_find_child(parent, "rtl8366rb", -1) == NULL) {
child = BUS_ADD_CHILD(parent, 0, "rtl8366rb", -1);
devi = IICBUS_IVAR(child);
devi->addr = RTL8366_IIC_ADDR;
}
}
static int
rtl8366rb_probe(device_t dev)
{
struct rtl8366rb_softc *sc;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
if (smi_probe(dev) != 0)
return (ENXIO);
if (sc->chip_type == RTL8366RB)
device_set_desc(dev, "RTL8366RB Ethernet Switch Controller");
else
device_set_desc(dev, "RTL8366SR Ethernet Switch Controller");
return (BUS_PROBE_DEFAULT);
}
static void
rtl8366rb_init(device_t dev)
{
struct rtl8366rb_softc *sc;
int i;
sc = device_get_softc(dev);
/* Initialisation for TL-WR1043ND */
#ifdef RTL8366_SOFT_RESET
smi_rmw(dev, RTL8366_RCR,
RTL8366_RCR_SOFT_RESET,
RTL8366_RCR_SOFT_RESET, RTL_WAITOK);
#else
smi_rmw(dev, RTL8366_RCR,
RTL8366_RCR_HARD_RESET,
RTL8366_RCR_HARD_RESET, RTL_WAITOK);
#endif
/* hard reset not return ack */
DELAY(100000);
/* Enable 16 VLAN mode */
smi_rmw(dev, RTL8366_SGCR,
RTL8366_SGCR_EN_VLAN | RTL8366_SGCR_EN_VLAN_4KTB,
RTL8366_SGCR_EN_VLAN, RTL_WAITOK);
/* Initialize our vlan table. */
for (i = 0; i <= 1; i++)
sc->vid[i] = (i + 1) | ETHERSWITCH_VID_VALID;
/* Remove port 0 from VLAN 1. */
smi_rmw(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, 0),
(1 << 0), 0, RTL_WAITOK);
/* Add port 0 untagged and port 5 tagged to VLAN 2. */
smi_rmw(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, 1),
((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_MEMBER_SHIFT)
| ((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_UNTAG_SHIFT),
((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_MEMBER_SHIFT
| ((1 << 0) << RTL8366_VMCR_MU_UNTAG_SHIFT)),
RTL_WAITOK);
/* Set PVID 2 for port 0. */
smi_rmw(dev, RTL8366_PVCR_REG(0),
RTL8366_PVCR_VAL(0, RTL8366_PVCR_PORT_MASK),
RTL8366_PVCR_VAL(0, 1), RTL_WAITOK);
}
static int
rtl8366rb_attach(device_t dev)
{
struct rtl8366rb_softc *sc;
uint16_t rev = 0;
char name[IFNAMSIZ];
int err = 0;
int i;
sc = device_get_softc(dev);
sc->dev = dev;
mtx_init(&sc->sc_mtx, "rtl8366rb", NULL, MTX_DEF);
sc->smi_acquired = 0;
mtx_init(&sc->callout_mtx, "rtl8366rbcallout", NULL, MTX_DEF);
rtl8366rb_init(dev);
smi_read(dev, RTL8366_CVCR, &rev, RTL_WAITOK);
device_printf(dev, "rev. %d\n", rev & 0x000f);
sc->phy4cpu = 0;
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"phy4cpu", &sc->phy4cpu);
sc->numphys = sc->phy4cpu ? RTL8366_NUM_PHYS - 1 : RTL8366_NUM_PHYS;
sc->info.es_nports = sc->numphys + 1;
sc->info.es_nvlangroups = RTL8366_NUM_VLANS;
sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q;
if (sc->chip_type == RTL8366RB)
sprintf(sc->info.es_name, "Realtek RTL8366RB");
else
sprintf(sc->info.es_name, "Realtek RTL8366SR");
/* attach miibus and phys */
/* PHYs need an interface, so we generate a dummy one */
for (i = 0; i < sc->numphys; i++) {
sc->ifp[i] = if_alloc(IFT_ETHER);
- if (sc->ifp[i] == NULL) {
- device_printf(dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[i], sc);
if_setflagbits(sc->ifp[i], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING
| IFF_SIMPLEX, 0);
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(dev));
sc->ifname[i] = malloc(strlen(name)+1, M_DEVBUF, M_WAITOK);
bcopy(name, sc->ifname[i], strlen(name)+1);
if_initname(sc->ifp[i], sc->ifname[i], i);
err = mii_attach(dev, &sc->miibus[i], sc->ifp[i], rtl8366rb_ifmedia_upd, \
rtl8366rb_ifmedia_sts, BMSR_DEFCAPMASK, \
i, MII_OFFSET_ANY, 0);
if (err != 0) {
device_printf(dev, "attaching PHY %d failed\n", i);
return (err);
}
}
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
if (err != 0)
return (err);
callout_init_mtx(&sc->callout_tick, &sc->callout_mtx, 0);
rtl8366rb_tick(sc);
return (err);
}
static int
rtl8366rb_detach(device_t dev)
{
struct rtl8366rb_softc *sc;
int i;
sc = device_get_softc(dev);
for (i=0; i < sc->numphys; i++) {
if (sc->miibus[i])
device_delete_child(dev, sc->miibus[i]);
if (sc->ifp[i] != NULL)
if_free(sc->ifp[i]);
free(sc->ifname[i], M_DEVBUF);
}
bus_generic_detach(dev);
callout_drain(&sc->callout_tick);
mtx_destroy(&sc->callout_mtx);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static void
rtl8366rb_update_ifmedia(int portstatus, u_int *media_status, u_int *media_active)
{
*media_active = IFM_ETHER;
*media_status = IFM_AVALID;
if ((portstatus & RTL8366_PLSR_LINK) != 0)
*media_status |= IFM_ACTIVE;
else {
*media_active |= IFM_NONE;
return;
}
switch (portstatus & RTL8366_PLSR_SPEED_MASK) {
case RTL8366_PLSR_SPEED_10:
*media_active |= IFM_10_T;
break;
case RTL8366_PLSR_SPEED_100:
*media_active |= IFM_100_TX;
break;
case RTL8366_PLSR_SPEED_1000:
*media_active |= IFM_1000_T;
break;
}
if ((portstatus & RTL8366_PLSR_FULLDUPLEX) != 0)
*media_active |= IFM_FDX;
else
*media_active |= IFM_HDX;
if ((portstatus & RTL8366_PLSR_TXPAUSE) != 0)
*media_active |= IFM_ETH_TXPAUSE;
if ((portstatus & RTL8366_PLSR_RXPAUSE) != 0)
*media_active |= IFM_ETH_RXPAUSE;
}
static void
rtl833rb_miipollstat(struct rtl8366rb_softc *sc)
{
int i;
struct mii_data *mii;
struct mii_softc *miisc;
uint16_t value;
int portstatus;
for (i = 0; i < sc->numphys; i++) {
mii = device_get_softc(sc->miibus[i]);
if ((i % 2) == 0) {
if (smi_read(sc->dev, RTL8366_PLSR_BASE + i/2, &value, RTL_NOWAIT) != 0) {
DEBUG_INCRVAR(callout_blocked);
return;
}
portstatus = value & 0xff;
} else {
portstatus = (value >> 8) & 0xff;
}
rtl8366rb_update_ifmedia(portstatus, &mii->mii_media_status, &mii->mii_media_active);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst)
continue;
mii_phy_update(miisc, MII_POLLSTAT);
}
}
}
static void
rtl8366rb_tick(void *arg)
{
struct rtl8366rb_softc *sc;
sc = arg;
rtl833rb_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, rtl8366rb_tick, sc);
}
static int
smi_probe(device_t dev)
{
struct rtl8366rb_softc *sc;
device_t iicbus, iicha;
int err, i, j;
uint16_t chipid;
char bytes[2];
int xferd;
sc = device_get_softc(dev);
iicbus = device_get_parent(dev);
iicha = device_get_parent(iicbus);
for (i = 0; i < 2; ++i) {
iicbus_reset(iicbus, IIC_FASTEST, RTL8366_IIC_ADDR, NULL);
for (j=3; j--; ) {
IICBUS_STOP(iicha);
/*
* we go directly to the host adapter because iicbus.c
* only issues a stop on a bus that was successfully started.
*/
}
err = iicbus_request_bus(iicbus, dev, IIC_WAIT);
if (err != 0)
goto out;
err = iicbus_start(iicbus, RTL8366_IIC_ADDR | RTL_IICBUS_READ, RTL_IICBUS_TIMEOUT);
if (err != 0)
goto out;
if (i == 0) {
bytes[0] = RTL8366RB_CIR & 0xff;
bytes[1] = (RTL8366RB_CIR >> 8) & 0xff;
} else {
bytes[0] = RTL8366SR_CIR & 0xff;
bytes[1] = (RTL8366SR_CIR >> 8) & 0xff;
}
err = iicbus_write(iicbus, bytes, 2, &xferd, RTL_IICBUS_TIMEOUT);
if (err != 0)
goto out;
err = iicbus_read(iicbus, bytes, 2, &xferd, IIC_LAST_READ, 0);
if (err != 0)
goto out;
chipid = ((bytes[1] & 0xff) << 8) | (bytes[0] & 0xff);
if (i == 0 && chipid == RTL8366RB_CIR_ID8366RB) {
DPRINTF(dev, "chip id 0x%04x\n", chipid);
sc->chip_type = RTL8366RB;
err = 0;
break;
}
if (i == 1 && chipid == RTL8366SR_CIR_ID8366SR) {
DPRINTF(dev, "chip id 0x%04x\n", chipid);
sc->chip_type = RTL8366SR;
err = 0;
break;
}
if (i == 0) {
iicbus_stop(iicbus);
iicbus_release_bus(iicbus, dev);
}
}
if (i == 2)
err = ENXIO;
out:
iicbus_stop(iicbus);
iicbus_release_bus(iicbus, dev);
return (err == 0 ? 0 : ENXIO);
}
static int
smi_acquire(struct rtl8366rb_softc *sc, int sleep)
{
int r = 0;
if (sleep == RTL_WAITOK)
RTL_LOCK(sc);
else
if (RTL_TRYLOCK(sc) == 0)
return (EWOULDBLOCK);
if (sc->smi_acquired == RTL_SMI_ACQUIRED)
r = EBUSY;
else {
r = iicbus_request_bus(device_get_parent(sc->dev), sc->dev, \
sleep == RTL_WAITOK ? IIC_WAIT : IIC_DONTWAIT);
if (r == 0)
sc->smi_acquired = RTL_SMI_ACQUIRED;
}
RTL_UNLOCK(sc);
return (r);
}
static int
smi_release(struct rtl8366rb_softc *sc, int sleep)
{
if (sleep == RTL_WAITOK)
RTL_LOCK(sc);
else
if (RTL_TRYLOCK(sc) == 0)
return (EWOULDBLOCK);
RTL_SMI_ACQUIRED_ASSERT(sc);
iicbus_release_bus(device_get_parent(sc->dev), sc->dev);
sc->smi_acquired = 0;
RTL_UNLOCK(sc);
return (0);
}
static int
smi_select(device_t dev, int op, int sleep)
{
struct rtl8366rb_softc *sc;
int err, i;
device_t iicbus;
struct iicbus_ivar *devi;
int slave;
sc = device_get_softc(dev);
iicbus = device_get_parent(dev);
devi = IICBUS_IVAR(dev);
slave = devi->addr;
RTL_SMI_ACQUIRED_ASSERT((struct rtl8366rb_softc *)device_get_softc(dev));
if (sc->chip_type == RTL8366SR) { // RTL8366SR work around
// this is same work around at probe
for (int i=3; i--; )
IICBUS_STOP(device_get_parent(device_get_parent(dev)));
}
/*
* The chip does not use clock stretching when it is busy,
* instead ignoring the command. Retry a few times.
*/
for (i = RTL_IICBUS_RETRIES; i--; ) {
err = iicbus_start(iicbus, slave | op, RTL_IICBUS_TIMEOUT);
if (err != IIC_ENOACK)
break;
if (sleep == RTL_WAITOK) {
DEBUG_INCRVAR(iic_select_retries);
pause("smi_select", RTL_IICBUS_RETRY_SLEEP);
} else
break;
}
return (err);
}
static int
smi_read_locked(struct rtl8366rb_softc *sc, uint16_t addr, uint16_t *data, int sleep)
{
int err;
device_t iicbus;
char bytes[2];
int xferd;
iicbus = device_get_parent(sc->dev);
RTL_SMI_ACQUIRED_ASSERT(sc);
bytes[0] = addr & 0xff;
bytes[1] = (addr >> 8) & 0xff;
err = smi_select(sc->dev, RTL_IICBUS_READ, sleep);
if (err != 0)
goto out;
err = iicbus_write(iicbus, bytes, 2, &xferd, RTL_IICBUS_TIMEOUT);
if (err != 0)
goto out;
err = iicbus_read(iicbus, bytes, 2, &xferd, IIC_LAST_READ, 0);
if (err != 0)
goto out;
*data = ((bytes[1] & 0xff) << 8) | (bytes[0] & 0xff);
out:
iicbus_stop(iicbus);
return (err);
}
static int
smi_write_locked(struct rtl8366rb_softc *sc, uint16_t addr, uint16_t data, int sleep)
{
int err;
device_t iicbus;
char bytes[4];
int xferd;
iicbus = device_get_parent(sc->dev);
RTL_SMI_ACQUIRED_ASSERT(sc);
bytes[0] = addr & 0xff;
bytes[1] = (addr >> 8) & 0xff;
bytes[2] = data & 0xff;
bytes[3] = (data >> 8) & 0xff;
err = smi_select(sc->dev, RTL_IICBUS_WRITE, sleep);
if (err == 0)
err = iicbus_write(iicbus, bytes, 4, &xferd, RTL_IICBUS_TIMEOUT);
iicbus_stop(iicbus);
return (err);
}
static int
smi_read(device_t dev, uint16_t addr, uint16_t *data, int sleep)
{
struct rtl8366rb_softc *sc;
int err;
sc = device_get_softc(dev);
err = smi_acquire(sc, sleep);
if (err != 0)
return (EBUSY);
err = smi_read_locked(sc, addr, data, sleep);
smi_release(sc, sleep);
DEVERR(dev, err, "smi_read()=%d: addr=%04x\n", addr);
return (err == 0 ? 0 : EIO);
}
static int
smi_write(device_t dev, uint16_t addr, uint16_t data, int sleep)
{
struct rtl8366rb_softc *sc;
int err;
sc = device_get_softc(dev);
err = smi_acquire(sc, sleep);
if (err != 0)
return (EBUSY);
err = smi_write_locked(sc, addr, data, sleep);
smi_release(sc, sleep);
DEVERR(dev, err, "smi_write()=%d: addr=%04x\n", addr);
return (err == 0 ? 0 : EIO);
}
static int
smi_rmw(device_t dev, uint16_t addr, uint16_t mask, uint16_t data, int sleep)
{
struct rtl8366rb_softc *sc;
int err;
uint16_t oldv, newv;
sc = device_get_softc(dev);
err = smi_acquire(sc, sleep);
if (err != 0)
return (EBUSY);
if (err == 0) {
err = smi_read_locked(sc, addr, &oldv, sleep);
if (err == 0) {
newv = oldv & ~mask;
newv |= data & mask;
if (newv != oldv)
err = smi_write_locked(sc, addr, newv, sleep);
}
}
smi_release(sc, sleep);
DEVERR(dev, err, "smi_rmw()=%d: addr=%04x\n", addr);
return (err == 0 ? 0 : EIO);
}
static etherswitch_info_t *
rtl_getinfo(device_t dev)
{
struct rtl8366rb_softc *sc;
sc = device_get_softc(dev);
return (&sc->info);
}
static int
rtl_readreg(device_t dev, int reg)
{
uint16_t data;
data = 0;
smi_read(dev, reg, &data, RTL_WAITOK);
return (data);
}
static int
rtl_writereg(device_t dev, int reg, int value)
{
return (smi_write(dev, reg, value, RTL_WAITOK));
}
static int
rtl_getport(device_t dev, etherswitch_port_t *p)
{
struct rtl8366rb_softc *sc;
struct ifmedia *ifm;
struct mii_data *mii;
struct ifmediareq *ifmr;
uint16_t v;
int err, vlangroup;
sc = device_get_softc(dev);
ifmr = &p->es_ifmr;
if (p->es_port < 0 || p->es_port >= (sc->numphys + 1))
return (ENXIO);
if (sc->phy4cpu && p->es_port == sc->numphys) {
vlangroup = RTL8366_PVCR_GET(p->es_port + 1,
rtl_readreg(dev, RTL8366_PVCR_REG(p->es_port + 1)));
} else {
vlangroup = RTL8366_PVCR_GET(p->es_port,
rtl_readreg(dev, RTL8366_PVCR_REG(p->es_port)));
}
p->es_pvid = sc->vid[vlangroup] & ETHERSWITCH_VID_MASK;
if (p->es_port < sc->numphys) {
mii = device_get_softc(sc->miibus[p->es_port]);
ifm = &mii->mii_media;
err = ifmedia_ioctl(sc->ifp[p->es_port], &p->es_ifr, ifm, SIOCGIFMEDIA);
if (err)
return (err);
} else {
/* fill in fixed values for CPU port */
p->es_flags |= ETHERSWITCH_PORT_CPU;
smi_read(dev, RTL8366_PLSR_BASE + (RTL8366_NUM_PHYS)/2, &v, RTL_WAITOK);
v = v >> (8 * ((RTL8366_NUM_PHYS) % 2));
rtl8366rb_update_ifmedia(v, &ifmr->ifm_status, &ifmr->ifm_active);
ifmr->ifm_current = ifmr->ifm_active;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
/* Return our static media list. */
if (ifmr->ifm_count > 0) {
ifmr->ifm_count = 1;
ifmr->ifm_ulist[0] = IFM_MAKEWORD(IFM_ETHER, IFM_1000_T,
IFM_FDX, 0);
} else
ifmr->ifm_count = 0;
}
return (0);
}
static int
rtl_setport(device_t dev, etherswitch_port_t *p)
{
struct rtl8366rb_softc *sc;
int i, err, vlangroup;
struct ifmedia *ifm;
struct mii_data *mii;
int port;
sc = device_get_softc(dev);
if (p->es_port < 0 || p->es_port >= (sc->numphys + 1))
return (ENXIO);
vlangroup = -1;
for (i = 0; i < RTL8366_NUM_VLANS; i++) {
if ((sc->vid[i] & ETHERSWITCH_VID_MASK) == p->es_pvid) {
vlangroup = i;
break;
}
}
if (vlangroup == -1)
return (ENXIO);
if (sc->phy4cpu && p->es_port == sc->numphys) {
port = p->es_port + 1;
} else {
port = p->es_port;
}
err = smi_rmw(dev, RTL8366_PVCR_REG(port),
RTL8366_PVCR_VAL(port, RTL8366_PVCR_PORT_MASK),
RTL8366_PVCR_VAL(port, vlangroup), RTL_WAITOK);
if (err)
return (err);
/* CPU Port */
if (p->es_port == sc->numphys)
return (0);
mii = device_get_softc(sc->miibus[p->es_port]);
ifm = &mii->mii_media;
err = ifmedia_ioctl(sc->ifp[p->es_port], &p->es_ifr, ifm, SIOCSIFMEDIA);
return (err);
}
static int
rtl_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
struct rtl8366rb_softc *sc;
uint16_t vmcr[3];
int i;
int member, untagged;
sc = device_get_softc(dev);
for (i=0; i<RTL8366_VMCR_MULT; i++)
vmcr[i] = rtl_readreg(dev, RTL8366_VMCR(i, vg->es_vlangroup));
vg->es_vid = sc->vid[vg->es_vlangroup];
member = RTL8366_VMCR_MEMBER(vmcr);
untagged = RTL8366_VMCR_UNTAG(vmcr);
if (sc->phy4cpu) {
vg->es_member_ports = ((member & 0x20) >> 1) | (member & 0x0f);
vg->es_untagged_ports = ((untagged & 0x20) >> 1) | (untagged & 0x0f);
} else {
vg->es_member_ports = member;
vg->es_untagged_ports = untagged;
}
vg->es_fid = RTL8366_VMCR_FID(vmcr);
return (0);
}
static int
rtl_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
struct rtl8366rb_softc *sc;
int g;
int member, untagged;
sc = device_get_softc(dev);
g = vg->es_vlangroup;
sc->vid[g] = vg->es_vid;
/* VLAN group disabled ? */
if (vg->es_member_ports == 0 && vg->es_untagged_ports == 0 && vg->es_vid == 0)
return (0);
sc->vid[g] |= ETHERSWITCH_VID_VALID;
rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_DOT1Q_REG, g),
(vg->es_vid << RTL8366_VMCR_DOT1Q_VID_SHIFT) & RTL8366_VMCR_DOT1Q_VID_MASK);
if (sc->phy4cpu) {
/* add space at phy4 */
member = (vg->es_member_ports & 0x0f) |
((vg->es_member_ports & 0x10) << 1);
untagged = (vg->es_untagged_ports & 0x0f) |
((vg->es_untagged_ports & 0x10) << 1);
} else {
member = vg->es_member_ports;
untagged = vg->es_untagged_ports;
}
if (sc->chip_type == RTL8366RB) {
rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, g),
((member << RTL8366_VMCR_MU_MEMBER_SHIFT) & RTL8366_VMCR_MU_MEMBER_MASK) |
((untagged << RTL8366_VMCR_MU_UNTAG_SHIFT) & RTL8366_VMCR_MU_UNTAG_MASK));
rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_FID_REG, g),
vg->es_fid);
} else {
rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, g),
((member << RTL8366_VMCR_MU_MEMBER_SHIFT) & RTL8366_VMCR_MU_MEMBER_MASK) |
((untagged << RTL8366_VMCR_MU_UNTAG_SHIFT) & RTL8366_VMCR_MU_UNTAG_MASK) |
((vg->es_fid << RTL8366_VMCR_FID_FID_SHIFT) & RTL8366_VMCR_FID_FID_MASK));
}
return (0);
}
static int
rtl_getconf(device_t dev, etherswitch_conf_t *conf)
{
/* Return the VLAN mode. */
conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
return (0);
}
static int
rtl_readphy(device_t dev, int phy, int reg)
{
struct rtl8366rb_softc *sc;
uint16_t data;
int err, i, sleep;
sc = device_get_softc(dev);
data = 0;
if (phy < 0 || phy >= RTL8366_NUM_PHYS)
return (ENXIO);
if (reg < 0 || reg >= RTL8366_NUM_PHY_REG)
return (ENXIO);
sleep = RTL_WAITOK;
err = smi_acquire(sc, sleep);
if (err != 0)
return (EBUSY);
for (i = RTL_IICBUS_RETRIES; i--; ) {
err = smi_write_locked(sc, RTL8366_PACR, RTL8366_PACR_READ, sleep);
if (err == 0)
err = smi_write_locked(sc, RTL8366_PHYREG(phy, 0, reg), 0, sleep);
if (err == 0) {
err = smi_read_locked(sc, RTL8366_PADR, &data, sleep);
break;
}
DEBUG_INCRVAR(phy_access_retries);
DPRINTF(dev, "rtl_readphy(): chip not responsive, retrying %d more times\n", i);
pause("rtl_readphy", RTL_IICBUS_RETRY_SLEEP);
}
smi_release(sc, sleep);
DEVERR(dev, err, "rtl_readphy()=%d: phy=%d.%02x\n", phy, reg);
return (data);
}
static int
rtl_writephy(device_t dev, int phy, int reg, int data)
{
struct rtl8366rb_softc *sc;
int err, i, sleep;
sc = device_get_softc(dev);
if (phy < 0 || phy >= RTL8366_NUM_PHYS)
return (ENXIO);
if (reg < 0 || reg >= RTL8366_NUM_PHY_REG)
return (ENXIO);
sleep = RTL_WAITOK;
err = smi_acquire(sc, sleep);
if (err != 0)
return (EBUSY);
for (i = RTL_IICBUS_RETRIES; i--; ) {
err = smi_write_locked(sc, RTL8366_PACR, RTL8366_PACR_WRITE, sleep);
if (err == 0)
err = smi_write_locked(sc, RTL8366_PHYREG(phy, 0, reg), data, sleep);
if (err == 0) {
break;
}
DEBUG_INCRVAR(phy_access_retries);
DPRINTF(dev, "rtl_writephy(): chip not responsive, retrying %d more tiems\n", i);
pause("rtl_writephy", RTL_IICBUS_RETRY_SLEEP);
}
smi_release(sc, sleep);
DEVERR(dev, err, "rtl_writephy()=%d: phy=%d.%02x\n", phy, reg);
return (err == 0 ? 0 : EIO);
}
static int
rtl8366rb_ifmedia_upd(if_t ifp)
{
struct rtl8366rb_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->miibus[if_getdunit(ifp)]);
mii_mediachg(mii);
return (0);
}
static void
rtl8366rb_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct rtl8366rb_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->miibus[if_getdunit(ifp)]);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static device_method_t rtl8366rb_methods[] = {
/* Device interface */
DEVMETHOD(device_identify, rtl8366rb_identify),
DEVMETHOD(device_probe, rtl8366rb_probe),
DEVMETHOD(device_attach, rtl8366rb_attach),
DEVMETHOD(device_detach, rtl8366rb_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, rtl_readphy),
DEVMETHOD(miibus_writereg, rtl_writephy),
/* MDIO interface */
DEVMETHOD(mdio_readreg, rtl_readphy),
DEVMETHOD(mdio_writereg, rtl_writephy),
/* etherswitch interface */
DEVMETHOD(etherswitch_getconf, rtl_getconf),
DEVMETHOD(etherswitch_getinfo, rtl_getinfo),
DEVMETHOD(etherswitch_readreg, rtl_readreg),
DEVMETHOD(etherswitch_writereg, rtl_writereg),
DEVMETHOD(etherswitch_readphyreg, rtl_readphy),
DEVMETHOD(etherswitch_writephyreg, rtl_writephy),
DEVMETHOD(etherswitch_getport, rtl_getport),
DEVMETHOD(etherswitch_setport, rtl_setport),
DEVMETHOD(etherswitch_getvgroup, rtl_getvgroup),
DEVMETHOD(etherswitch_setvgroup, rtl_setvgroup),
DEVMETHOD_END
};
DEFINE_CLASS_0(rtl8366rb, rtl8366rb_driver, rtl8366rb_methods,
sizeof(struct rtl8366rb_softc));
DRIVER_MODULE(rtl8366rb, iicbus, rtl8366rb_driver, 0, 0);
DRIVER_MODULE(miibus, rtl8366rb, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, rtl8366rb, mdio_driver, 0, 0);
DRIVER_MODULE(etherswitch, rtl8366rb, etherswitch_driver, 0, 0);
MODULE_VERSION(rtl8366rb, 1);
MODULE_DEPEND(rtl8366rb, iicbus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(rtl8366rb, miibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(rtl8366rb, etherswitch, 1, 1, 1); /* XXX which versions? */
diff --git a/sys/dev/etherswitch/ukswitch/ukswitch.c b/sys/dev/etherswitch/ukswitch/ukswitch.c
index c1e23f7c0f65..f2cf6ef74e90 100644
--- a/sys/dev/etherswitch/ukswitch/ukswitch.c
+++ b/sys/dev/etherswitch/ukswitch/ukswitch.c
@@ -1,581 +1,575 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013 Luiz Otavio O Souza.
* Copyright (c) 2011-2012 Stefan Bethke.
* Copyright (c) 2012 Adrian Chadd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <dev/etherswitch/etherswitch.h>
#include "mdio_if.h"
#include "miibus_if.h"
#include "etherswitch_if.h"
MALLOC_DECLARE(M_UKSWITCH);
MALLOC_DEFINE(M_UKSWITCH, "ukswitch", "ukswitch data structures");
struct ukswitch_softc {
struct mtx sc_mtx; /* serialize access to softc */
device_t sc_dev;
int media; /* cpu port media */
int cpuport; /* which PHY is connected to the CPU */
int phymask; /* PHYs we manage */
int phyoffset; /* PHYs register offset */
int numports; /* number of ports */
int ifpport[MII_NPHY];
int *portphy;
char **ifname;
device_t **miibus;
if_t *ifp;
struct callout callout_tick;
etherswitch_info_t info;
};
#define UKSWITCH_LOCK(_sc) \
mtx_lock(&(_sc)->sc_mtx)
#define UKSWITCH_UNLOCK(_sc) \
mtx_unlock(&(_sc)->sc_mtx)
#define UKSWITCH_LOCK_ASSERT(_sc, _what) \
mtx_assert(&(_sc)->sc_mtx, (_what))
#define UKSWITCH_TRYLOCK(_sc) \
mtx_trylock(&(_sc)->sc_mtx)
#if defined(DEBUG)
#define DPRINTF(dev, args...) device_printf(dev, args)
#else
#define DPRINTF(dev, args...)
#endif
static inline int ukswitch_portforphy(struct ukswitch_softc *, int);
static void ukswitch_tick(void *);
static int ukswitch_ifmedia_upd(if_t);
static void ukswitch_ifmedia_sts(if_t, struct ifmediareq *);
static int
ukswitch_probe(device_t dev)
{
struct ukswitch_softc *sc;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
device_set_desc(dev, "Generic MDIO switch driver");
return (BUS_PROBE_DEFAULT);
}
static int
ukswitch_attach_phys(struct ukswitch_softc *sc)
{
int phy, port = 0, err = 0;
char name[IFNAMSIZ];
/* PHYs need an interface, so we generate a dummy one */
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < MII_NPHY; phy++) {
if (((1 << phy) & sc->phymask) == 0)
continue;
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[port], sc);
if_setflags(sc->ifp[port], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX);
sc->ifname[port] = malloc(strlen(name)+1, M_UKSWITCH, M_WAITOK);
bcopy(name, sc->ifname[port], strlen(name)+1);
if_initname(sc->ifp[port], sc->ifname[port], port);
sc->miibus[port] = malloc(sizeof(device_t), M_UKSWITCH,
M_WAITOK | M_ZERO);
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
ukswitch_ifmedia_upd, ukswitch_ifmedia_sts, \
BMSR_DEFCAPMASK, phy + sc->phyoffset, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
if_name(sc->ifp[port]));
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
break;
}
sc->info.es_nports = port + 1;
if (++port >= sc->numports)
break;
}
return (err);
}
static int
ukswitch_attach(device_t dev)
{
struct ukswitch_softc *sc;
int err = 0;
sc = device_get_softc(dev);
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "ukswitch", NULL, MTX_DEF);
strlcpy(sc->info.es_name, device_get_desc(dev),
sizeof(sc->info.es_name));
/* XXX Defaults */
sc->numports = 6;
sc->phymask = 0x0f;
sc->phyoffset = 0;
sc->cpuport = -1;
sc->media = 100;
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"numports", &sc->numports);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"phymask", &sc->phymask);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"phyoffset", &sc->phyoffset);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"cpuport", &sc->cpuport);
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"media", &sc->media);
/* Support only fast and giga ethernet. */
if (sc->media != 100 && sc->media != 1000)
sc->media = 100;
if (sc->cpuport != -1)
/* Always attach the cpu port. */
sc->phymask |= (1 << sc->cpuport);
/* We do not support any vlan groups. */
sc->info.es_nvlangroups = 0;
sc->ifp = malloc(sizeof(if_t) * sc->numports, M_UKSWITCH,
M_WAITOK | M_ZERO);
sc->ifname = malloc(sizeof(char *) * sc->numports, M_UKSWITCH,
M_WAITOK | M_ZERO);
sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_UKSWITCH,
M_WAITOK | M_ZERO);
sc->portphy = malloc(sizeof(int) * sc->numports, M_UKSWITCH,
M_WAITOK | M_ZERO);
/*
* Attach the PHYs and complete the bus enumeration.
*/
err = ukswitch_attach_phys(sc);
if (err != 0)
return (err);
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
if (err != 0)
return (err);
callout_init(&sc->callout_tick, 0);
ukswitch_tick(sc);
return (err);
}
static int
ukswitch_detach(device_t dev)
{
struct ukswitch_softc *sc = device_get_softc(dev);
int i, port;
callout_drain(&sc->callout_tick);
for (i=0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = ukswitch_portforphy(sc, i);
if (sc->miibus[port] != NULL)
device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_UKSWITCH);
free(sc->miibus[port], M_UKSWITCH);
}
free(sc->portphy, M_UKSWITCH);
free(sc->miibus, M_UKSWITCH);
free(sc->ifname, M_UKSWITCH);
free(sc->ifp, M_UKSWITCH);
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
}
/*
* Convert PHY number to port number.
*/
static inline int
ukswitch_portforphy(struct ukswitch_softc *sc, int phy)
{
return (sc->ifpport[phy]);
}
static inline struct mii_data *
ukswitch_miiforport(struct ukswitch_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
return (device_get_softc(*sc->miibus[port]));
}
static inline if_t
ukswitch_ifpforport(struct ukswitch_softc *sc, int port)
{
if (port < 0 || port > sc->numports)
return (NULL);
return (sc->ifp[port]);
}
/*
* Poll the status for all PHYs.
*/
static void
ukswitch_miipollstat(struct ukswitch_softc *sc)
{
int i, port;
struct mii_data *mii;
struct mii_softc *miisc;
UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = ukswitch_portforphy(sc, i);
if ((*sc->miibus[port]) == NULL)
continue;
mii = device_get_softc(*sc->miibus[port]);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) !=
miisc->mii_inst)
continue;
ukphy_status(miisc);
mii_phy_update(miisc, MII_POLLSTAT);
}
}
}
static void
ukswitch_tick(void *arg)
{
struct ukswitch_softc *sc = arg;
ukswitch_miipollstat(sc);
callout_reset(&sc->callout_tick, hz, ukswitch_tick, sc);
}
static void
ukswitch_lock(device_t dev)
{
struct ukswitch_softc *sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED);
UKSWITCH_LOCK(sc);
}
static void
ukswitch_unlock(device_t dev)
{
struct ukswitch_softc *sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_OWNED);
UKSWITCH_UNLOCK(sc);
}
static etherswitch_info_t *
ukswitch_getinfo(device_t dev)
{
struct ukswitch_softc *sc = device_get_softc(dev);
return (&sc->info);
}
static int
ukswitch_getport(device_t dev, etherswitch_port_t *p)
{
struct ukswitch_softc *sc = device_get_softc(dev);
struct mii_data *mii;
struct ifmediareq *ifmr = &p->es_ifmr;
int err, phy;
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
p->es_pvid = 0;
phy = sc->portphy[p->es_port];
mii = ukswitch_miiforport(sc, p->es_port);
if (sc->cpuport != -1 && phy == sc->cpuport) {
/* fill in fixed values for CPU port */
p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr->ifm_count = 0;
if (sc->media == 100)
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_100_TX | IFM_FDX;
else
ifmr->ifm_current = ifmr->ifm_active =
IFM_ETHER | IFM_1000_T | IFM_FDX;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
} else if (mii != NULL) {
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
if (err)
return (err);
} else {
return (ENXIO);
}
return (0);
}
static int
ukswitch_setport(device_t dev, etherswitch_port_t *p)
{
struct ukswitch_softc *sc = device_get_softc(dev);
struct ifmedia *ifm;
struct mii_data *mii;
if_t ifp;
int err;
if (p->es_port < 0 || p->es_port >= sc->numports)
return (ENXIO);
if (sc->portphy[p->es_port] == sc->cpuport)
return (ENXIO);
mii = ukswitch_miiforport(sc, p->es_port);
if (mii == NULL)
return (ENXIO);
ifp = ukswitch_ifpforport(sc, p->es_port);
ifm = &mii->mii_media;
err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA);
return (err);
}
static int
ukswitch_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
/* Not supported. */
vg->es_vid = 0;
vg->es_member_ports = 0;
vg->es_untagged_ports = 0;
vg->es_fid = 0;
return (0);
}
static int
ukswitch_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
/* Not supported. */
return (0);
}
static void
ukswitch_statchg(device_t dev)
{
DPRINTF(dev, "%s\n", __func__);
}
static int
ukswitch_ifmedia_upd(if_t ifp)
{
struct ukswitch_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = ukswitch_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return (ENXIO);
mii_mediachg(mii);
return (0);
}
static void
ukswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct ukswitch_softc *sc = if_getsoftc(ifp);
struct mii_data *mii = ukswitch_miiforport(sc, if_getdunit(ifp));
DPRINTF(sc->sc_dev, "%s\n", __func__);
if (mii == NULL)
return;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static int
ukswitch_readphy(device_t dev, int phy, int reg)
{
struct ukswitch_softc *sc;
int data;
sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED);
if (phy < 0 || phy >= 32)
return (ENXIO);
if (reg < 0 || reg >= 32)
return (ENXIO);
UKSWITCH_LOCK(sc);
data = MDIO_READREG(device_get_parent(dev), phy, reg);
UKSWITCH_UNLOCK(sc);
return (data);
}
static int
ukswitch_writephy(device_t dev, int phy, int reg, int data)
{
struct ukswitch_softc *sc;
int err;
sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED);
if (phy < 0 || phy >= 32)
return (ENXIO);
if (reg < 0 || reg >= 32)
return (ENXIO);
UKSWITCH_LOCK(sc);
err = MDIO_WRITEREG(device_get_parent(dev), phy, reg, data);
UKSWITCH_UNLOCK(sc);
return (err);
}
static int
ukswitch_readreg(device_t dev, int addr)
{
struct ukswitch_softc *sc __diagused;
sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_OWNED);
/* Not supported. */
return (0);
}
static int
ukswitch_writereg(device_t dev, int addr, int value)
{
struct ukswitch_softc *sc __diagused;
sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_OWNED);
/* Not supported. */
return (0);
}
static device_method_t ukswitch_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ukswitch_probe),
DEVMETHOD(device_attach, ukswitch_attach),
DEVMETHOD(device_detach, ukswitch_detach),
/* bus interface */
DEVMETHOD(bus_add_child, device_add_child_ordered),
/* MII interface */
DEVMETHOD(miibus_readreg, ukswitch_readphy),
DEVMETHOD(miibus_writereg, ukswitch_writephy),
DEVMETHOD(miibus_statchg, ukswitch_statchg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, ukswitch_readphy),
DEVMETHOD(mdio_writereg, ukswitch_writephy),
/* etherswitch interface */
DEVMETHOD(etherswitch_lock, ukswitch_lock),
DEVMETHOD(etherswitch_unlock, ukswitch_unlock),
DEVMETHOD(etherswitch_getinfo, ukswitch_getinfo),
DEVMETHOD(etherswitch_readreg, ukswitch_readreg),
DEVMETHOD(etherswitch_writereg, ukswitch_writereg),
DEVMETHOD(etherswitch_readphyreg, ukswitch_readphy),
DEVMETHOD(etherswitch_writephyreg, ukswitch_writephy),
DEVMETHOD(etherswitch_getport, ukswitch_getport),
DEVMETHOD(etherswitch_setport, ukswitch_setport),
DEVMETHOD(etherswitch_getvgroup, ukswitch_getvgroup),
DEVMETHOD(etherswitch_setvgroup, ukswitch_setvgroup),
DEVMETHOD_END
};
DEFINE_CLASS_0(ukswitch, ukswitch_driver, ukswitch_methods,
sizeof(struct ukswitch_softc));
DRIVER_MODULE(ukswitch, mdio, ukswitch_driver, 0, 0);
DRIVER_MODULE(miibus, ukswitch, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, ukswitch, mdio_driver, 0, 0);
DRIVER_MODULE(etherswitch, ukswitch, etherswitch_driver, 0, 0);
MODULE_VERSION(ukswitch, 1);
MODULE_DEPEND(ukswitch, miibus, 1, 1, 1); /* XXX which versions? */
MODULE_DEPEND(ukswitch, etherswitch, 1, 1, 1); /* XXX which versions? */
diff --git a/sys/dev/firewire/if_fwe.c b/sys/dev/firewire/if_fwe.c
index 60a9806656e8..44d3425799e9 100644
--- a/sys/dev/firewire/if_fwe.c
+++ b/sys/dev/firewire/if_fwe.c
@@ -1,632 +1,628 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2002-2003
* Hidetoshi Shimokawa. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
*
* This product includes software developed by Hidetoshi Shimokawa.
*
* 4. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#include "opt_inet.h"
#endif
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <dev/firewire/firewire.h>
#include <dev/firewire/firewirereg.h>
#include <dev/firewire/if_fwevar.h>
#define FWEDEBUG if (fwedebug) if_printf
#define TX_MAX_QUEUE (FWMAXQUEUE - 1)
/* network interface */
static void fwe_start (if_t);
static int fwe_ioctl (if_t, u_long, caddr_t);
static void fwe_init (void *);
static void fwe_output_callback (struct fw_xfer *);
static void fwe_as_output (struct fwe_softc *, if_t);
static void fwe_as_input (struct fw_xferq *);
static int fwedebug = 0;
static int stream_ch = 1;
static int tx_speed = 2;
static int rx_queue_len = FWMAXQUEUE;
static MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface");
SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RWTUN, &fwedebug, 0, "");
SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Ethernet emulation subsystem");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RWTUN, &stream_ch, 0,
"Stream channel to use");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RWTUN, &tx_speed, 0,
"Transmission speed");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
0, "Length of the receive queue");
#ifdef DEVICE_POLLING
static poll_handler_t fwe_poll;
static int
fwe_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct fwe_softc *fwe;
struct firewire_comm *fc;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return (0);
fwe = ((struct fwe_eth_softc *)if_getsoftc(ifp))->fwe;
fc = fwe->fd.fc;
fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
return (0);
}
#endif /* DEVICE_POLLING */
static void
fwe_identify(driver_t *driver, device_t parent)
{
BUS_ADD_CHILD(parent, 0, "fwe", device_get_unit(parent));
}
static int
fwe_probe(device_t dev)
{
device_t pa;
pa = device_get_parent(dev);
if (device_get_unit(dev) != device_get_unit(pa)) {
return (ENXIO);
}
device_set_desc(dev, "Ethernet over FireWire");
return (0);
}
static int
fwe_attach(device_t dev)
{
struct fwe_softc *fwe;
if_t ifp;
int unit, s;
u_char eaddr[6];
struct fw_eui64 *eui;
fwe = ((struct fwe_softc *)device_get_softc(dev));
unit = device_get_unit(dev);
bzero(fwe, sizeof(struct fwe_softc));
mtx_init(&fwe->mtx, "fwe", NULL, MTX_DEF);
/* XXX */
fwe->stream_ch = stream_ch;
fwe->dma_ch = -1;
fwe->fd.fc = device_get_ivars(dev);
if (tx_speed < 0)
tx_speed = fwe->fd.fc->speed;
fwe->fd.dev = dev;
fwe->fd.post_explore = NULL;
fwe->eth_softc.fwe = fwe;
fwe->pkt_hdr.mode.stream.tcode = FWTCODE_STREAM;
fwe->pkt_hdr.mode.stream.sy = 0;
fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch;
/* generate fake MAC address: first and last 3bytes from eui64 */
#define LOCAL (0x02)
#define GROUP (0x01)
eui = &fwe->fd.fc->eui;
eaddr[0] = (FW_EUI64_BYTE(eui, 0) | LOCAL) & ~GROUP;
eaddr[1] = FW_EUI64_BYTE(eui, 1);
eaddr[2] = FW_EUI64_BYTE(eui, 2);
eaddr[3] = FW_EUI64_BYTE(eui, 5);
eaddr[4] = FW_EUI64_BYTE(eui, 6);
eaddr[5] = FW_EUI64_BYTE(eui, 7);
printf("if_fwe%d: Fake Ethernet address: "
"%02x:%02x:%02x:%02x:%02x:%02x\n", unit,
eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
/* fill the rest and attach interface */
ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- return (ENOSPC);
- }
if_setsoftc(ifp, &fwe->eth_softc);
if_initname(ifp, device_get_name(dev), unit);
if_setinitfn(ifp, fwe_init);
if_setstartfn(ifp, fwe_start);
if_setioctlfn(ifp, fwe_ioctl);
if_setflags(ifp, (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST));
if_setsendqlen(ifp, TX_MAX_QUEUE);
s = splimp();
ether_ifattach(ifp, eaddr);
splx(s);
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_POLLING, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
FWEDEBUG(ifp, "interface created\n");
- return 0;
+ return (0);
}
static void
fwe_stop(struct fwe_softc *fwe)
{
struct firewire_comm *fc;
struct fw_xferq *xferq;
if_t ifp = fwe->eth_softc.ifp;
struct fw_xfer *xfer, *next;
int i;
fc = fwe->fd.fc;
if (fwe->dma_ch >= 0) {
xferq = fc->ir[fwe->dma_ch];
if (xferq->flag & FWXFERQ_RUNNING)
fc->irx_disable(fc, fwe->dma_ch);
xferq->flag &=
~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
xferq->hand = NULL;
for (i = 0; i < xferq->bnchunk; i++)
m_freem(xferq->bulkxfer[i].mbuf);
free(xferq->bulkxfer, M_FWE);
for (xfer = STAILQ_FIRST(&fwe->xferlist); xfer != NULL;
xfer = next) {
next = STAILQ_NEXT(xfer, link);
fw_xfer_free(xfer);
}
STAILQ_INIT(&fwe->xferlist);
xferq->bulkxfer = NULL;
fwe->dma_ch = -1;
}
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
static int
fwe_detach(device_t dev)
{
struct fwe_softc *fwe;
if_t ifp;
int s;
fwe = device_get_softc(dev);
ifp = fwe->eth_softc.ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
s = splimp();
fwe_stop(fwe);
ether_ifdetach(ifp);
if_free(ifp);
splx(s);
mtx_destroy(&fwe->mtx);
return 0;
}
static void
fwe_init(void *arg)
{
struct fwe_softc *fwe = ((struct fwe_eth_softc *)arg)->fwe;
struct firewire_comm *fc;
if_t ifp = fwe->eth_softc.ifp;
struct fw_xferq *xferq;
struct fw_xfer *xfer;
struct mbuf *m;
int i;
FWEDEBUG(ifp, "initializing\n");
/* XXX keep promiscoud mode */
if_setflagbits(ifp, IFF_PROMISC, 0);
fc = fwe->fd.fc;
if (fwe->dma_ch < 0) {
fwe->dma_ch = fw_open_isodma(fc, /* tx */0);
if (fwe->dma_ch < 0)
return;
xferq = fc->ir[fwe->dma_ch];
xferq->flag |= FWXFERQ_EXTBUF |
FWXFERQ_HANDLER | FWXFERQ_STREAM;
fwe->stream_ch = stream_ch;
fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch;
xferq->flag &= ~0xff;
xferq->flag |= fwe->stream_ch & 0xff;
/* register fwe_input handler */
xferq->sc = (caddr_t) fwe;
xferq->hand = fwe_as_input;
xferq->bnchunk = rx_queue_len;
xferq->bnpacket = 1;
xferq->psize = MCLBYTES;
xferq->queued = 0;
xferq->buf = NULL;
xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
sizeof(struct fw_bulkxfer) * xferq->bnchunk,
M_FWE, M_WAITOK);
STAILQ_INIT(&xferq->stvalid);
STAILQ_INIT(&xferq->stfree);
STAILQ_INIT(&xferq->stdma);
xferq->stproc = NULL;
for (i = 0; i < xferq->bnchunk; i++) {
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xferq->bulkxfer[i].mbuf = m;
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree,
&xferq->bulkxfer[i], link);
}
STAILQ_INIT(&fwe->xferlist);
for (i = 0; i < TX_MAX_QUEUE; i++) {
xfer = fw_xfer_alloc(M_FWE);
if (xfer == NULL)
break;
xfer->send.spd = tx_speed;
xfer->fc = fwe->fd.fc;
xfer->sc = (caddr_t)fwe;
xfer->hand = fwe_output_callback;
STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link);
}
} else
xferq = fc->ir[fwe->dma_ch];
/* start dma */
if ((xferq->flag & FWXFERQ_RUNNING) == 0)
fc->irx_enable(fc, fwe->dma_ch);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
#if 0
/* attempt to start output */
fwe_start(ifp);
#endif
}
static int
fwe_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct fwe_softc *fwe = ((struct fwe_eth_softc *)if_getsoftc(ifp))->fwe;
struct ifstat *ifs = NULL;
int s, error;
switch (cmd) {
case SIOCSIFFLAGS:
s = splimp();
if (if_getflags(ifp) & IFF_UP) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
fwe_init(&fwe->eth_softc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
fwe_stop(fwe);
}
/* XXX keep promiscoud mode */
if_setflagbits(ifp, IFF_PROMISC, 0);
splx(s);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCGIFSTATUS:
s = splimp();
ifs = (struct ifstat *)data;
snprintf(ifs->ascii, sizeof(ifs->ascii),
"\tch %d dma %d\n", fwe->stream_ch, fwe->dma_ch);
splx(s);
break;
case SIOCSIFCAP:
#ifdef DEVICE_POLLING
{
struct ifreq *ifr = (struct ifreq *) data;
struct firewire_comm *fc = fwe->fd.fc;
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(if_getcapenable(ifp) & IFCAP_POLLING)) {
error = ether_poll_register(fwe_poll, ifp);
if (error)
return (error);
/* Disable interrupts */
fc->set_intr(fc, 0);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
if_getcapenable(ifp) & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
fc->set_intr(fc, 1);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
return (error);
}
}
#endif /* DEVICE_POLLING */
break;
default:
s = splimp();
error = ether_ioctl(ifp, cmd, data);
splx(s);
return (error);
}
return (0);
}
static void
fwe_output_callback(struct fw_xfer *xfer)
{
struct fwe_softc *fwe;
if_t ifp;
int s;
fwe = (struct fwe_softc *)xfer->sc;
ifp = fwe->eth_softc.ifp;
/* XXX error check */
FWEDEBUG(ifp, "resp = %d\n", xfer->resp);
if (xfer->resp != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(xfer->mbuf);
fw_xfer_unload(xfer);
s = splimp();
FWE_LOCK(fwe);
STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link);
FWE_UNLOCK(fwe);
splx(s);
/* for queue full */
if (!if_sendq_empty(ifp))
fwe_start(ifp);
}
static void
fwe_start(if_t ifp)
{
struct fwe_softc *fwe = ((struct fwe_eth_softc *)if_getsoftc(ifp))->fwe;
int s;
FWEDEBUG(ifp, "starting\n");
if (fwe->dma_ch < 0) {
struct mbuf *m = NULL;
FWEDEBUG(ifp, "not ready\n");
s = splimp();
do {
m = if_dequeue(ifp);
if (m != NULL)
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} while (m != NULL);
splx(s);
return;
}
s = splimp();
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if (!if_sendq_empty(ifp))
fwe_as_output(fwe, ifp);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
splx(s);
}
#define HDR_LEN 4
#ifndef ETHER_ALIGN
#define ETHER_ALIGN 2
#endif
/* Async. stream output */
static void
fwe_as_output(struct fwe_softc *fwe, if_t ifp)
{
struct mbuf *m;
struct fw_xfer *xfer;
struct fw_xferq *xferq;
struct fw_pkt *fp;
int i = 0;
xfer = NULL;
xferq = fwe->fd.fc->atq;
while ((xferq->queued < xferq->maxq - 1) &&
!if_sendq_empty(ifp)) {
FWE_LOCK(fwe);
xfer = STAILQ_FIRST(&fwe->xferlist);
if (xfer == NULL) {
#if 0
printf("if_fwe: lack of xfer\n");
#endif
FWE_UNLOCK(fwe);
break;
}
STAILQ_REMOVE_HEAD(&fwe->xferlist, link);
FWE_UNLOCK(fwe);
m = if_dequeue(ifp);
if (m == NULL) {
FWE_LOCK(fwe);
STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link);
FWE_UNLOCK(fwe);
break;
}
BPF_MTAP(ifp, m);
/* keep ip packet alignment for alpha */
M_PREPEND(m, ETHER_ALIGN, M_NOWAIT);
fp = &xfer->send.hdr;
*(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr;
fp->mode.stream.len = m->m_pkthdr.len;
xfer->mbuf = m;
xfer->send.pay_len = m->m_pkthdr.len;
if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) {
/* error */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
/* XXX set error code */
fwe_output_callback(xfer);
} else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
i++;
}
}
#if 0
if (i > 1)
printf("%d queued\n", i);
#endif
if (i > 0)
xferq->start(fwe->fd.fc);
}
/* Async. stream output */
static void
fwe_as_input(struct fw_xferq *xferq)
{
struct mbuf *m, *m0;
if_t ifp;
struct fwe_softc *fwe;
struct fw_bulkxfer *sxfer;
struct fw_pkt *fp;
#if 0
u_char *c;
#endif
fwe = (struct fwe_softc *)xferq->sc;
ifp = fwe->eth_softc.ifp;
/* We do not need a lock here because the bottom half is serialized */
while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
fp = mtod(sxfer->mbuf, struct fw_pkt *);
if (fwe->fd.fc->irx_post != NULL)
fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld);
m = sxfer->mbuf;
/* insert new rbuf */
sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m0 != NULL) {
m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
} else
printf("%s: m_getcl failed\n", __FUNCTION__);
if (sxfer->resp != 0 || fp->mode.stream.len <
ETHER_ALIGN + sizeof(struct ether_header)) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
m->m_data += HDR_LEN + ETHER_ALIGN;
#if 0
c = mtod(m, u_char *);
#endif
m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN;
m->m_pkthdr.rcvif = ifp;
#if 0
FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n"
"%02x %02x %02x %02x %02x %02x\n"
"%02x %02x %02x %02x\n"
"%02x %02x %02x %02x\n"
"%02x %02x %02x %02x\n"
"%02x %02x %02x %02x\n",
c[0], c[1], c[2], c[3], c[4], c[5],
c[6], c[7], c[8], c[9], c[10], c[11],
c[12], c[13], c[14], c[15],
c[16], c[17], c[18], c[19],
c[20], c[21], c[22], c[23],
c[20], c[21], c[22], c[23]
);
#endif
if_input(ifp, m);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
}
if (STAILQ_FIRST(&xferq->stfree) != NULL)
fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch);
}
static device_method_t fwe_methods[] = {
/* device interface */
DEVMETHOD(device_identify, fwe_identify),
DEVMETHOD(device_probe, fwe_probe),
DEVMETHOD(device_attach, fwe_attach),
DEVMETHOD(device_detach, fwe_detach),
{ 0, 0 }
};
static driver_t fwe_driver = {
"fwe",
fwe_methods,
sizeof(struct fwe_softc),
};
DRIVER_MODULE(fwe, firewire, fwe_driver, 0, 0);
MODULE_VERSION(fwe, 1);
MODULE_DEPEND(fwe, firewire, 1, 1, 1);
diff --git a/sys/dev/firewire/if_fwip.c b/sys/dev/firewire/if_fwip.c
index b698db6c9620..6350ec9cb56e 100644
--- a/sys/dev/firewire/if_fwip.c
+++ b/sys/dev/firewire/if_fwip.c
@@ -1,937 +1,935 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2004
* Doug Rabson
* Copyright (c) 2002-2003
* Hidetoshi Shimokawa. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
*
* This product includes software developed by Hidetoshi Shimokawa.
*
* 4. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#include "opt_inet.h"
#endif
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/firewire.h>
#include <net/if_arp.h>
#include <net/if_types.h>
#include <dev/firewire/firewire.h>
#include <dev/firewire/firewirereg.h>
#include <dev/firewire/iec13213.h>
#include <dev/firewire/if_fwipvar.h>
/*
* We really need a mechanism for allocating regions in the FIFO
* address space. We pick a address in the OHCI controller's 'middle'
* address space. This means that the controller will automatically
* send responses for us, which is fine since we don't have any
* important information to put in the response anyway.
*/
#define INET_FIFO 0xfffe00000000LL
#define FWIPDEBUG if (fwipdebug) if_printf
#define TX_MAX_QUEUE (FWMAXQUEUE - 1)
/* network interface */
static void fwip_start (if_t);
static int fwip_ioctl (if_t, u_long, caddr_t);
static void fwip_init (void *);
static void fwip_post_busreset (void *);
static void fwip_output_callback (struct fw_xfer *);
static void fwip_async_output (struct fwip_softc *, if_t);
static void fwip_start_send (void *, int);
static void fwip_stream_input (struct fw_xferq *);
static void fwip_unicast_input(struct fw_xfer *);
static int fwipdebug = 0;
static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */
static int tx_speed = 2;
static int rx_queue_len = FWMAXQUEUE;
static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface");
SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Firewire ip subsystem");
SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
0, "Length of the receive queue");
#ifdef DEVICE_POLLING
static poll_handler_t fwip_poll;
static int
fwip_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct fwip_softc *fwip;
struct firewire_comm *fc;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return (0);
fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
fc = fwip->fd.fc;
fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
return (0);
}
#endif /* DEVICE_POLLING */
static void
fwip_identify(driver_t *driver, device_t parent)
{
BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent));
}
static int
fwip_probe(device_t dev)
{
device_t pa;
pa = device_get_parent(dev);
if (device_get_unit(dev) != device_get_unit(pa)) {
return (ENXIO);
}
device_set_desc(dev, "IP over FireWire");
return (0);
}
static int
fwip_attach(device_t dev)
{
struct fwip_softc *fwip;
if_t ifp;
int unit, s;
struct fw_hwaddr *hwaddr;
fwip = ((struct fwip_softc *)device_get_softc(dev));
unit = device_get_unit(dev);
ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394);
- if (ifp == NULL)
- return (ENOSPC);
mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF);
/* XXX */
fwip->dma_ch = -1;
fwip->fd.fc = device_get_ivars(dev);
if (tx_speed < 0)
tx_speed = fwip->fd.fc->speed;
fwip->fd.dev = dev;
fwip->fd.post_explore = NULL;
fwip->fd.post_busreset = fwip_post_busreset;
fwip->fw_softc.fwip = fwip;
TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip);
/*
* Encode our hardware the way that arp likes it.
*/
hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr;
hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi);
hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo);
hwaddr->sender_max_rec = fwip->fd.fc->maxrec;
hwaddr->sspd = fwip->fd.fc->speed;
hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
/* fill the rest and attach interface */
if_setsoftc(ifp, &fwip->fw_softc);
if_initname(ifp, device_get_name(dev), unit);
if_setinitfn(ifp, fwip_init);
if_setstartfn(ifp, fwip_start);
if_setioctlfn(ifp, fwip_ioctl);
if_setflags(ifp, (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST));
if_setsendqlen(ifp, TX_MAX_QUEUE);
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
s = splimp();
firewire_ifattach(ifp, hwaddr);
splx(s);
FWIPDEBUG(ifp, "interface created\n");
- return 0;
+ return (0);
}
static void
fwip_stop(struct fwip_softc *fwip)
{
struct firewire_comm *fc;
struct fw_xferq *xferq;
if_t ifp = fwip->fw_softc.fwip_ifp;
struct fw_xfer *xfer, *next;
int i;
fc = fwip->fd.fc;
if (fwip->dma_ch >= 0) {
xferq = fc->ir[fwip->dma_ch];
if (xferq->flag & FWXFERQ_RUNNING)
fc->irx_disable(fc, fwip->dma_ch);
xferq->flag &=
~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
xferq->hand = NULL;
for (i = 0; i < xferq->bnchunk; i++)
m_freem(xferq->bulkxfer[i].mbuf);
free(xferq->bulkxfer, M_FWIP);
fw_bindremove(fc, &fwip->fwb);
for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL;
xfer = next) {
next = STAILQ_NEXT(xfer, link);
fw_xfer_free(xfer);
}
for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL;
xfer = next) {
next = STAILQ_NEXT(xfer, link);
fw_xfer_free(xfer);
}
STAILQ_INIT(&fwip->xferlist);
xferq->bulkxfer = NULL;
fwip->dma_ch = -1;
}
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
static int
fwip_detach(device_t dev)
{
struct fwip_softc *fwip;
if_t ifp;
int s;
fwip = (struct fwip_softc *)device_get_softc(dev);
ifp = fwip->fw_softc.fwip_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
s = splimp();
fwip_stop(fwip);
firewire_ifdetach(ifp);
if_free(ifp);
mtx_destroy(&fwip->mtx);
splx(s);
return 0;
}
static void
fwip_init(void *arg)
{
struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip;
struct firewire_comm *fc;
if_t ifp = fwip->fw_softc.fwip_ifp;
struct fw_xferq *xferq;
struct fw_xfer *xfer;
struct mbuf *m;
int i;
FWIPDEBUG(ifp, "initializing\n");
fc = fwip->fd.fc;
#define START 0
if (fwip->dma_ch < 0) {
fwip->dma_ch = fw_open_isodma(fc, /* tx */0);
if (fwip->dma_ch < 0)
return;
xferq = fc->ir[fwip->dma_ch];
xferq->flag |= FWXFERQ_EXTBUF |
FWXFERQ_HANDLER | FWXFERQ_STREAM;
xferq->flag &= ~0xff;
xferq->flag |= broadcast_channel & 0xff;
/* register fwip_input handler */
xferq->sc = (caddr_t) fwip;
xferq->hand = fwip_stream_input;
xferq->bnchunk = rx_queue_len;
xferq->bnpacket = 1;
xferq->psize = MCLBYTES;
xferq->queued = 0;
xferq->buf = NULL;
xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
sizeof(struct fw_bulkxfer) * xferq->bnchunk,
M_FWIP, M_WAITOK);
if (xferq->bulkxfer == NULL) {
printf("if_fwip: malloc failed\n");
return;
}
STAILQ_INIT(&xferq->stvalid);
STAILQ_INIT(&xferq->stfree);
STAILQ_INIT(&xferq->stdma);
xferq->stproc = NULL;
for (i = 0; i < xferq->bnchunk; i++) {
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xferq->bulkxfer[i].mbuf = m;
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree,
&xferq->bulkxfer[i], link);
}
fwip->fwb.start = INET_FIFO;
fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */
/* pre-allocate xfer */
STAILQ_INIT(&fwip->fwb.xferlist);
for (i = 0; i < rx_queue_len; i++) {
xfer = fw_xfer_alloc(M_FWIP);
if (xfer == NULL)
break;
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xfer->recv.payload = mtod(m, uint32_t *);
xfer->recv.pay_len = MCLBYTES;
xfer->hand = fwip_unicast_input;
xfer->fc = fc;
xfer->sc = (caddr_t)fwip;
xfer->mbuf = m;
STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
}
fw_bindadd(fc, &fwip->fwb);
STAILQ_INIT(&fwip->xferlist);
for (i = 0; i < TX_MAX_QUEUE; i++) {
xfer = fw_xfer_alloc(M_FWIP);
if (xfer == NULL)
break;
xfer->send.spd = tx_speed;
xfer->fc = fwip->fd.fc;
xfer->sc = (caddr_t)fwip;
xfer->hand = fwip_output_callback;
STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
}
} else
xferq = fc->ir[fwip->dma_ch];
fwip->last_dest.hi = 0;
fwip->last_dest.lo = 0;
/* start dma */
if ((xferq->flag & FWXFERQ_RUNNING) == 0)
fc->irx_enable(fc, fwip->dma_ch);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
#if 0
/* attempt to start output */
fwip_start(ifp);
#endif
}
static int
fwip_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct fwip_softc *fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
int s, error;
switch (cmd) {
case SIOCSIFFLAGS:
s = splimp();
if (if_getflags(ifp) & IFF_UP) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
fwip_init(&fwip->fw_softc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
fwip_stop(fwip);
}
splx(s);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCSIFCAP:
#ifdef DEVICE_POLLING
{
struct ifreq *ifr = (struct ifreq *) data;
struct firewire_comm *fc = fwip->fd.fc;
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(if_getcapenable(ifp) & IFCAP_POLLING)) {
error = ether_poll_register(fwip_poll, ifp);
if (error)
return (error);
/* Disable interrupts */
fc->set_intr(fc, 0);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
if_getcapenable(ifp) & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
fc->set_intr(fc, 1);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
return (error);
}
}
#endif /* DEVICE_POLLING */
break;
default:
s = splimp();
error = firewire_ioctl(ifp, cmd, data);
splx(s);
return (error);
}
return (0);
}
static void
fwip_post_busreset(void *arg)
{
struct fwip_softc *fwip = arg;
struct crom_src *src;
struct crom_chunk *root;
src = fwip->fd.fc->crom_src;
root = fwip->fd.fc->crom_root;
/* RFC2734 IPv4 over IEEE1394 */
bzero(&fwip->unit4, sizeof(struct crom_chunk));
crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR);
crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF);
crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA");
crom_add_entry(&fwip->unit4, CSRKEY_VER, 1);
crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4");
/* RFC3146 IPv6 over IEEE1394 */
bzero(&fwip->unit6, sizeof(struct crom_chunk));
crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR);
crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF);
crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA");
crom_add_entry(&fwip->unit6, CSRKEY_VER, 2);
crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6");
fwip->last_dest.hi = 0;
fwip->last_dest.lo = 0;
firewire_busreset(fwip->fw_softc.fwip_ifp);
}
static void
fwip_output_callback(struct fw_xfer *xfer)
{
struct fwip_softc *fwip;
if_t ifp;
int s;
fwip = (struct fwip_softc *)xfer->sc;
ifp = fwip->fw_softc.fwip_ifp;
/* XXX error check */
FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
if (xfer->resp != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(xfer->mbuf);
fw_xfer_unload(xfer);
s = splimp();
FWIP_LOCK(fwip);
STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
FWIP_UNLOCK(fwip);
splx(s);
/* for queue full */
if (!if_sendq_empty(ifp)) {
fwip_start(ifp);
}
}
static void
fwip_start(if_t ifp)
{
struct fwip_softc *fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
int s;
FWIPDEBUG(ifp, "starting\n");
if (fwip->dma_ch < 0) {
struct mbuf *m = NULL;
FWIPDEBUG(ifp, "not ready\n");
s = splimp();
do {
m = if_dequeue(ifp);
if (m != NULL)
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} while (m != NULL);
splx(s);
return;
}
s = splimp();
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if (!if_sendq_empty(ifp))
fwip_async_output(fwip, ifp);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
splx(s);
}
/* Async. stream output */
static void
fwip_async_output(struct fwip_softc *fwip, if_t ifp)
{
struct firewire_comm *fc = fwip->fd.fc;
struct mbuf *m;
struct m_tag *mtag;
struct fw_hwaddr *destfw;
struct fw_xfer *xfer;
struct fw_xferq *xferq;
struct fw_pkt *fp;
uint16_t nodeid;
int error;
int i = 0;
xfer = NULL;
xferq = fc->atq;
while ((xferq->queued < xferq->maxq - 1) &&
!if_sendq_empty(ifp)) {
FWIP_LOCK(fwip);
xfer = STAILQ_FIRST(&fwip->xferlist);
if (xfer == NULL) {
FWIP_UNLOCK(fwip);
#if 0
printf("if_fwip: lack of xfer\n");
#endif
break;
}
STAILQ_REMOVE_HEAD(&fwip->xferlist, link);
FWIP_UNLOCK(fwip);
m = if_dequeue(ifp);
if (m == NULL) {
FWIP_LOCK(fwip);
STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link);
FWIP_UNLOCK(fwip);
break;
}
/*
* Dig out the link-level address which
* firewire_output got via arp or neighbour
* discovery. If we don't have a link-level address,
* just stick the thing on the broadcast channel.
*/
mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0);
if (mtag == NULL)
destfw = NULL;
else
destfw = (struct fw_hwaddr *) (mtag + 1);
/*
* We don't do any bpf stuff here - the generic code
* in firewire_output gives the packet to bpf before
* it adds the link-level encapsulation.
*/
/*
* Put the mbuf in the xfer early in case we hit an
* error case below - fwip_output_callback will free
* the mbuf.
*/
xfer->mbuf = m;
/*
* We use the arp result (if any) to add a suitable firewire
* packet header before handing off to the bus.
*/
fp = &xfer->send.hdr;
nodeid = FWLOCALBUS | fc->nodeid;
if ((m->m_flags & M_BCAST) || !destfw) {
/*
* Broadcast packets are sent as GASP packets with
* specifier ID 0x00005e, version 1 on the broadcast
* channel. To be conservative, we send at the
* slowest possible speed.
*/
uint32_t *p;
M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT);
p = mtod(m, uint32_t *);
fp->mode.stream.len = m->m_pkthdr.len;
fp->mode.stream.chtag = broadcast_channel;
fp->mode.stream.tcode = FWTCODE_STREAM;
fp->mode.stream.sy = 0;
xfer->send.spd = 0;
p[0] = htonl(nodeid << 16);
p[1] = htonl((0x5e << 24) | 1);
} else {
/*
* Unicast packets are sent as block writes to the
* target's unicast fifo address. If we can't
* find the node address, we just give up. We
* could broadcast it but that might overflow
* the packet size limitations due to the
* extra GASP header. Note: the hardware
* address is stored in network byte order to
* make life easier for ARP.
*/
struct fw_device *fd;
struct fw_eui64 eui;
eui.hi = ntohl(destfw->sender_unique_ID_hi);
eui.lo = ntohl(destfw->sender_unique_ID_lo);
if (fwip->last_dest.hi != eui.hi ||
fwip->last_dest.lo != eui.lo) {
fd = fw_noderesolve_eui64(fc, &eui);
if (!fd) {
/* error */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
/* XXX set error code */
fwip_output_callback(xfer);
continue;
}
fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst;
fwip->last_hdr.mode.wreqb.tlrt = 0;
fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB;
fwip->last_hdr.mode.wreqb.pri = 0;
fwip->last_hdr.mode.wreqb.src = nodeid;
fwip->last_hdr.mode.wreqb.dest_hi =
ntohs(destfw->sender_unicast_FIFO_hi);
fwip->last_hdr.mode.wreqb.dest_lo =
ntohl(destfw->sender_unicast_FIFO_lo);
fwip->last_hdr.mode.wreqb.extcode = 0;
fwip->last_dest = eui;
}
fp->mode.wreqb = fwip->last_hdr.mode.wreqb;
fp->mode.wreqb.len = m->m_pkthdr.len;
xfer->send.spd = min(destfw->sspd, fc->speed);
}
xfer->send.pay_len = m->m_pkthdr.len;
error = fw_asyreq(fc, -1, xfer);
if (error == EAGAIN) {
/*
* We ran out of tlabels - requeue the packet
* for later transmission.
*/
xfer->mbuf = 0;
FWIP_LOCK(fwip);
STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
FWIP_UNLOCK(fwip);
if_sendq_prepend(ifp, m);
break;
}
if (error) {
/* error */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
/* XXX set error code */
fwip_output_callback(xfer);
continue;
} else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
i++;
}
}
#if 0
if (i > 1)
printf("%d queued\n", i);
#endif
if (i > 0)
xferq->start(fc);
}
static void
fwip_start_send (void *arg, int count)
{
struct fwip_softc *fwip = arg;
fwip->fd.fc->atq->start(fwip->fd.fc);
}
/* Async. stream output */
static void
fwip_stream_input(struct fw_xferq *xferq)
{
struct epoch_tracker et;
struct mbuf *m, *m0;
struct m_tag *mtag;
if_t ifp;
struct fwip_softc *fwip;
struct fw_bulkxfer *sxfer;
struct fw_pkt *fp;
uint16_t src;
uint32_t *p;
fwip = (struct fwip_softc *)xferq->sc;
ifp = fwip->fw_softc.fwip_ifp;
NET_EPOCH_ENTER(et);
while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
fp = mtod(sxfer->mbuf, struct fw_pkt *);
if (fwip->fd.fc->irx_post != NULL)
fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld);
m = sxfer->mbuf;
/* insert new rbuf */
sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m0 != NULL) {
m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
} else
printf("fwip_as_input: m_getcl failed\n");
/*
* We must have a GASP header - leave the
* encapsulation sanity checks to the generic
* code. Remember that we also have the firewire async
* stream header even though that isn't accounted for
* in mode.stream.len.
*/
if (sxfer->resp != 0 || fp->mode.stream.len <
2*sizeof(uint32_t)) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
m->m_len = m->m_pkthdr.len = fp->mode.stream.len
+ sizeof(fp->mode.stream);
/*
* If we received the packet on the broadcast channel,
* mark it as broadcast, otherwise we assume it must
* be multicast.
*/
if (fp->mode.stream.chtag == broadcast_channel)
m->m_flags |= M_BCAST;
else
m->m_flags |= M_MCAST;
/*
* Make sure we recognise the GASP specifier and
* version.
*/
p = mtod(m, uint32_t *);
if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e
|| (ntohl(p[2]) & 0xffffff) != 1) {
FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
ntohl(p[1]), ntohl(p[2]));
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
/*
* Record the sender ID for possible BPF usage.
*/
src = ntohl(p[1]) >> 16;
if (bpf_peers_present_if(ifp)) {
mtag = m_tag_alloc(MTAG_FIREWIRE,
MTAG_FIREWIRE_SENDER_EUID,
2*sizeof(uint32_t), M_NOWAIT);
if (mtag) {
/* bpf wants it in network byte order */
struct fw_device *fd;
uint32_t *p = (uint32_t *) (mtag + 1);
fd = fw_noderesolve_nodeid(fwip->fd.fc,
src & 0x3f);
if (fd) {
p[0] = htonl(fd->eui.hi);
p[1] = htonl(fd->eui.lo);
} else {
p[0] = 0;
p[1] = 0;
}
m_tag_prepend(m, mtag);
}
}
/*
* Trim off the GASP header
*/
m_adj(m, 3*sizeof(uint32_t));
m->m_pkthdr.rcvif = ifp;
firewire_input(ifp, m, src);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
}
NET_EPOCH_EXIT(et);
if (STAILQ_FIRST(&xferq->stfree) != NULL)
fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch);
}
static __inline void
fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer)
{
struct mbuf *m;
/*
* We have finished with a unicast xfer. Allocate a new
* cluster and stick it on the back of the input queue.
*/
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xfer->mbuf = m;
xfer->recv.payload = mtod(m, uint32_t *);
xfer->recv.pay_len = MCLBYTES;
xfer->mbuf = m;
STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
}
static void
fwip_unicast_input(struct fw_xfer *xfer)
{
uint64_t address;
struct mbuf *m;
struct m_tag *mtag;
struct epoch_tracker et;
if_t ifp;
struct fwip_softc *fwip;
struct fw_pkt *fp;
//struct fw_pkt *sfp;
int rtcode;
fwip = (struct fwip_softc *)xfer->sc;
ifp = fwip->fw_softc.fwip_ifp;
m = xfer->mbuf;
xfer->mbuf = 0;
fp = &xfer->recv.hdr;
/*
* Check the fifo address - we only accept addresses of
* exactly INET_FIFO.
*/
address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
| fp->mode.wreqb.dest_lo;
if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
rtcode = FWRCODE_ER_TYPE;
} else if (address != INET_FIFO) {
rtcode = FWRCODE_ER_ADDR;
} else {
rtcode = FWRCODE_COMPLETE;
}
NET_EPOCH_ENTER(et);
/*
* Pick up a new mbuf and stick it on the back of the receive
* queue.
*/
fwip_unicast_input_recycle(fwip, xfer);
/*
* If we've already rejected the packet, give up now.
*/
if (rtcode != FWRCODE_COMPLETE) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
goto done;
}
if (bpf_peers_present_if(ifp)) {
/*
* Record the sender ID for possible BPF usage.
*/
mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID,
2*sizeof(uint32_t), M_NOWAIT);
if (mtag) {
/* bpf wants it in network byte order */
struct fw_device *fd;
uint32_t *p = (uint32_t *) (mtag + 1);
fd = fw_noderesolve_nodeid(fwip->fd.fc,
fp->mode.wreqb.src & 0x3f);
if (fd) {
p[0] = htonl(fd->eui.hi);
p[1] = htonl(fd->eui.lo);
} else {
p[0] = 0;
p[1] = 0;
}
m_tag_prepend(m, mtag);
}
}
/*
* Hand off to the generic encapsulation code. We don't use
* ifp->if_input so that we can pass the source nodeid as an
* argument to facilitate link-level fragment reassembly.
*/
m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
m->m_pkthdr.rcvif = ifp;
firewire_input(ifp, m, fp->mode.wreqb.src);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
done:
NET_EPOCH_EXIT(et);
}
static device_method_t fwip_methods[] = {
/* device interface */
DEVMETHOD(device_identify, fwip_identify),
DEVMETHOD(device_probe, fwip_probe),
DEVMETHOD(device_attach, fwip_attach),
DEVMETHOD(device_detach, fwip_detach),
{ 0, 0 }
};
static driver_t fwip_driver = {
"fwip",
fwip_methods,
sizeof(struct fwip_softc),
};
DRIVER_MODULE(fwip, firewire, fwip_driver, 0, 0);
MODULE_VERSION(fwip, 1);
MODULE_DEPEND(fwip, firewire, 1, 1, 1);
diff --git a/sys/dev/fxp/if_fxp.c b/sys/dev/fxp/if_fxp.c
index d5c977782440..7684b94df316 100644
--- a/sys/dev/fxp/if_fxp.c
+++ b/sys/dev/fxp/if_fxp.c
@@ -1,3264 +1,3259 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1995, David Greenman
* Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
/*
* Intel EtherExpress Pro/100B PCI Fast Ethernet driver
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h> /* for PCIM_CMD_xxx */
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/fxp/if_fxpreg.h>
#include <dev/fxp/if_fxpvar.h>
#include <dev/fxp/rcvbundl.h>
MODULE_DEPEND(fxp, pci, 1, 1, 1);
MODULE_DEPEND(fxp, ether, 1, 1, 1);
MODULE_DEPEND(fxp, miibus, 1, 1, 1);
#include "miibus_if.h"
/*
* NOTE! On !x86 we typically have an alignment constraint. The
* card DMAs the packet immediately following the RFA. However,
* the first thing in the packet is a 14-byte Ethernet header.
* This means that the packet is misaligned. To compensate,
* we actually offset the RFA 2 bytes into the cluster. This
* alignes the packet after the Ethernet header at a 32-bit
* boundary. HOWEVER! This means that the RFA is misaligned!
*/
#define RFA_ALIGNMENT_FUDGE 2
/*
* Set initial transmit threshold at 64 (512 bytes). This is
* increased by 64 (512 bytes) at a time, to maximum of 192
* (1536 bytes), if an underrun occurs.
*/
static int tx_threshold = 64;
/*
* The configuration byte map has several undefined fields which
* must be one or must be zero. Set up a template for these bits.
* The actual configuration is performed in fxp_init_body.
*
* See struct fxp_cb_config for the bit definitions.
*/
static const u_char fxp_cb_config_template[] = {
0x0, 0x0, /* cb_status */
0x0, 0x0, /* cb_command */
0x0, 0x0, 0x0, 0x0, /* link_addr */
0x0, /* 0 */
0x0, /* 1 */
0x0, /* 2 */
0x0, /* 3 */
0x0, /* 4 */
0x0, /* 5 */
0x32, /* 6 */
0x0, /* 7 */
0x0, /* 8 */
0x0, /* 9 */
0x6, /* 10 */
0x0, /* 11 */
0x0, /* 12 */
0x0, /* 13 */
0xf2, /* 14 */
0x48, /* 15 */
0x0, /* 16 */
0x40, /* 17 */
0xf0, /* 18 */
0x0, /* 19 */
0x3f, /* 20 */
0x5, /* 21 */
0x0, /* 22 */
0x0, /* 23 */
0x0, /* 24 */
0x0, /* 25 */
0x0, /* 26 */
0x0, /* 27 */
0x0, /* 28 */
0x0, /* 29 */
0x0, /* 30 */
0x0 /* 31 */
};
/*
* Claim various Intel PCI device identifiers for this driver. The
* sub-vendor and sub-device field are extensively used to identify
* particular variants, but we don't currently differentiate between
* them.
*/
static const struct fxp_ident fxp_ident_table[] = {
{ 0x8086, 0x1029, -1, 0, "Intel 82559 PCI/CardBus Pro/100" },
{ 0x8086, 0x1030, -1, 0, "Intel 82559 Pro/100 Ethernet" },
{ 0x8086, 0x1031, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
{ 0x8086, 0x1032, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
{ 0x8086, 0x1033, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
{ 0x8086, 0x1034, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
{ 0x8086, 0x1035, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
{ 0x8086, 0x1036, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
{ 0x8086, 0x1037, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
{ 0x8086, 0x1038, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
{ 0x8086, 0x1039, -1, 4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
{ 0x8086, 0x103A, -1, 4, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
{ 0x8086, 0x103B, -1, 4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
{ 0x8086, 0x103C, -1, 4, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
{ 0x8086, 0x103D, -1, 4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
{ 0x8086, 0x103E, -1, 4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
{ 0x8086, 0x1050, -1, 5, "Intel 82801BA (D865) Pro/100 VE Ethernet" },
{ 0x8086, 0x1051, -1, 5, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
{ 0x8086, 0x1059, -1, 0, "Intel 82551QM Pro/100 M Mobile Connection" },
{ 0x8086, 0x1064, -1, 6, "Intel 82562EZ (ICH6)" },
{ 0x8086, 0x1065, -1, 6, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" },
{ 0x8086, 0x1068, -1, 6, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
{ 0x8086, 0x1069, -1, 6, "Intel 82562EM/EX/GX Pro/100 Ethernet" },
{ 0x8086, 0x1091, -1, 7, "Intel 82562GX Pro/100 Ethernet" },
{ 0x8086, 0x1092, -1, 7, "Intel Pro/100 VE Network Connection" },
{ 0x8086, 0x1093, -1, 7, "Intel Pro/100 VM Network Connection" },
{ 0x8086, 0x1094, -1, 7, "Intel Pro/100 946GZ (ICH7) Network Connection" },
{ 0x8086, 0x1209, -1, 0, "Intel 82559ER Embedded 10/100 Ethernet" },
{ 0x8086, 0x1229, 0x01, 0, "Intel 82557 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x02, 0, "Intel 82557 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x03, 0, "Intel 82557 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x04, 0, "Intel 82558 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x05, 0, "Intel 82558 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x06, 0, "Intel 82559 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x07, 0, "Intel 82559 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x08, 0, "Intel 82559 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x09, 0, "Intel 82559ER Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x0c, 0, "Intel 82550 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x0d, 0, "Intel 82550C Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x0e, 0, "Intel 82550 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x0f, 0, "Intel 82551 Pro/100 Ethernet" },
{ 0x8086, 0x1229, 0x10, 0, "Intel 82551 Pro/100 Ethernet" },
{ 0x8086, 0x1229, -1, 0, "Intel 82557/8/9 Pro/100 Ethernet" },
{ 0x8086, 0x2449, -1, 2, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
{ 0x8086, 0x27dc, -1, 7, "Intel 82801GB (ICH7) 10/100 Ethernet" },
{ 0, 0, -1, 0, NULL },
};
#ifdef FXP_IP_CSUM_WAR
#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
#else
#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
#endif
static int fxp_probe(device_t dev);
static int fxp_attach(device_t dev);
static int fxp_detach(device_t dev);
static int fxp_shutdown(device_t dev);
static int fxp_suspend(device_t dev);
static int fxp_resume(device_t dev);
static const struct fxp_ident *fxp_find_ident(device_t dev);
static void fxp_intr(void *xsc);
static void fxp_rxcsum(struct fxp_softc *sc, if_t ifp,
struct mbuf *m, uint16_t status, int pos);
static int fxp_intr_body(struct fxp_softc *sc, if_t ifp,
uint8_t statack, int count);
static void fxp_init(void *xsc);
static void fxp_init_body(struct fxp_softc *sc, int);
static void fxp_tick(void *xsc);
static void fxp_start(if_t ifp);
static void fxp_start_body(if_t ifp);
static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head);
static void fxp_txeof(struct fxp_softc *sc);
static void fxp_stop(struct fxp_softc *sc);
static void fxp_release(struct fxp_softc *sc);
static int fxp_ioctl(if_t ifp, u_long command,
caddr_t data);
static void fxp_watchdog(struct fxp_softc *sc);
static void fxp_add_rfabuf(struct fxp_softc *sc,
struct fxp_rx *rxp);
static void fxp_discard_rfabuf(struct fxp_softc *sc,
struct fxp_rx *rxp);
static int fxp_new_rfabuf(struct fxp_softc *sc,
struct fxp_rx *rxp);
static void fxp_mc_addrs(struct fxp_softc *sc);
static void fxp_mc_setup(struct fxp_softc *sc);
static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset,
int autosize);
static void fxp_eeprom_putword(struct fxp_softc *sc, int offset,
uint16_t data);
static void fxp_autosize_eeprom(struct fxp_softc *sc);
static void fxp_load_eeprom(struct fxp_softc *sc);
static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
int offset, int words);
static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
int offset, int words);
static int fxp_ifmedia_upd(if_t ifp);
static void fxp_ifmedia_sts(if_t ifp,
struct ifmediareq *ifmr);
static int fxp_serial_ifmedia_upd(if_t ifp);
static void fxp_serial_ifmedia_sts(if_t ifp,
struct ifmediareq *ifmr);
static int fxp_miibus_readreg(device_t dev, int phy, int reg);
static int fxp_miibus_writereg(device_t dev, int phy, int reg,
int value);
static void fxp_miibus_statchg(device_t dev);
static void fxp_load_ucode(struct fxp_softc *sc);
static void fxp_update_stats(struct fxp_softc *sc);
static void fxp_sysctl_node(struct fxp_softc *sc);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS,
int low, int high);
static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
static void fxp_scb_wait(struct fxp_softc *sc);
static void fxp_scb_cmd(struct fxp_softc *sc, int cmd);
static void fxp_dma_wait(struct fxp_softc *sc,
volatile uint16_t *status, bus_dma_tag_t dmat,
bus_dmamap_t map);
static device_method_t fxp_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, fxp_probe),
DEVMETHOD(device_attach, fxp_attach),
DEVMETHOD(device_detach, fxp_detach),
DEVMETHOD(device_shutdown, fxp_shutdown),
DEVMETHOD(device_suspend, fxp_suspend),
DEVMETHOD(device_resume, fxp_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, fxp_miibus_readreg),
DEVMETHOD(miibus_writereg, fxp_miibus_writereg),
DEVMETHOD(miibus_statchg, fxp_miibus_statchg),
DEVMETHOD_END
};
static driver_t fxp_driver = {
"fxp",
fxp_methods,
sizeof(struct fxp_softc),
};
DRIVER_MODULE_ORDERED(fxp, pci, fxp_driver, NULL, NULL, SI_ORDER_ANY);
MODULE_PNP_INFO("U16:vendor;U16:device", pci, fxp, fxp_ident_table,
nitems(fxp_ident_table) - 1);
DRIVER_MODULE(miibus, fxp, miibus_driver, NULL, NULL);
static struct resource_spec fxp_res_spec_mem[] = {
{ SYS_RES_MEMORY, FXP_PCI_MMBA, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0 }
};
static struct resource_spec fxp_res_spec_io[] = {
{ SYS_RES_IOPORT, FXP_PCI_IOBA, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0 }
};
/*
* Wait for the previous command to be accepted (but not necessarily
* completed).
*/
static void
fxp_scb_wait(struct fxp_softc *sc)
{
union {
uint16_t w;
uint8_t b[2];
} flowctl;
int i = 10000;
while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
DELAY(2);
if (i == 0) {
flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FC_THRESH);
flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FC_STATUS);
device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w);
}
}
static void
fxp_scb_cmd(struct fxp_softc *sc, int cmd)
{
if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
fxp_scb_wait(sc);
}
CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
}
static void
fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
bus_dma_tag_t dmat, bus_dmamap_t map)
{
int i;
for (i = 10000; i > 0; i--) {
DELAY(2);
bus_dmamap_sync(dmat, map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if ((le16toh(*status) & FXP_CB_STATUS_C) != 0)
break;
}
if (i == 0)
device_printf(sc->dev, "DMA timeout\n");
}
static const struct fxp_ident *
fxp_find_ident(device_t dev)
{
uint16_t vendor;
uint16_t device;
uint8_t revid;
const struct fxp_ident *ident;
vendor = pci_get_vendor(dev);
device = pci_get_device(dev);
revid = pci_get_revid(dev);
for (ident = fxp_ident_table; ident->name != NULL; ident++) {
if (ident->vendor == vendor && ident->device == device &&
(ident->revid == revid || ident->revid == -1)) {
return (ident);
}
}
return (NULL);
}
/*
* Return identification string if this device is ours.
*/
static int
fxp_probe(device_t dev)
{
const struct fxp_ident *ident;
ident = fxp_find_ident(dev);
if (ident != NULL) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static void
fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
uint32_t *addr;
if (error)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
addr = arg;
*addr = segs->ds_addr;
}
static int
fxp_attach(device_t dev)
{
struct fxp_softc *sc;
struct fxp_cb_tx *tcbp;
struct fxp_tx *txp;
struct fxp_rx *rxp;
if_t ifp;
uint32_t val;
uint16_t data;
u_char eaddr[ETHER_ADDR_LEN];
int error, flags, i, pmc, prefer_iomap;
error = 0;
sc = device_get_softc(dev);
sc->dev = dev;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0);
ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
fxp_serial_ifmedia_sts);
ifp = sc->ifp = if_gethandle(IFT_ETHER);
- if (ifp == (void *)NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/*
* Enable bus mastering.
*/
pci_enable_busmaster(dev);
/*
* Figure out which we should try first - memory mapping or i/o mapping?
* We default to memory mapping. Then we accept an override from the
* command line. Then we check to see which one is enabled.
*/
prefer_iomap = 0;
resource_int_value(device_get_name(dev), device_get_unit(dev),
"prefer_iomap", &prefer_iomap);
if (prefer_iomap)
sc->fxp_spec = fxp_res_spec_io;
else
sc->fxp_spec = fxp_res_spec_mem;
error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
if (error) {
if (sc->fxp_spec == fxp_res_spec_mem)
sc->fxp_spec = fxp_res_spec_io;
else
sc->fxp_spec = fxp_res_spec_mem;
error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
}
if (error) {
device_printf(dev, "could not allocate resources\n");
error = ENXIO;
goto fail;
}
if (bootverbose) {
device_printf(dev, "using %s space register mapping\n",
sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O");
}
/*
* Put CU/RU idle state and prepare full reset.
*/
CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
DELAY(10);
/* Full reset and disable interrupts. */
CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
DELAY(10);
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
/*
* Find out how large of an SEEPROM we have.
*/
fxp_autosize_eeprom(sc);
fxp_load_eeprom(sc);
/*
* Find out the chip revision; lump all 82557 revs together.
*/
sc->ident = fxp_find_ident(dev);
if (sc->ident->ich > 0) {
/* Assume ICH controllers are 82559. */
sc->revision = FXP_REV_82559_A0;
} else {
data = sc->eeprom[FXP_EEPROM_MAP_CNTR];
if ((data >> 8) == 1)
sc->revision = FXP_REV_82557;
else
sc->revision = pci_get_revid(dev);
}
/*
* Check availability of WOL. 82559ER does not support WOL.
*/
if (sc->revision >= FXP_REV_82558_A4 &&
sc->revision != FXP_REV_82559S_A) {
data = sc->eeprom[FXP_EEPROM_MAP_ID];
if ((data & 0x20) != 0 &&
pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0)
sc->flags |= FXP_FLAG_WOLCAP;
}
if (sc->revision == FXP_REV_82550_C) {
/*
* 82550C with server extension requires microcode to
* receive fragmented UDP datagrams. However if the
* microcode is used for client-only featured 82550C
* it locks up controller.
*/
data = sc->eeprom[FXP_EEPROM_MAP_COMPAT];
if ((data & 0x0400) == 0)
sc->flags |= FXP_FLAG_NO_UCODE;
}
/* Receiver lock-up workaround detection. */
if (sc->revision < FXP_REV_82558_A4) {
data = sc->eeprom[FXP_EEPROM_MAP_COMPAT];
if ((data & 0x03) != 0x03) {
sc->flags |= FXP_FLAG_RXBUG;
device_printf(dev, "Enabling Rx lock-up workaround\n");
}
}
/*
* Determine whether we must use the 503 serial interface.
*/
data = sc->eeprom[FXP_EEPROM_MAP_PRI_PHY];
if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
&& (data & FXP_PHY_SERIAL_ONLY))
sc->flags |= FXP_FLAG_SERIAL_MEDIA;
fxp_sysctl_node(sc);
/*
* Enable workarounds for certain chip revision deficiencies.
*
* Systems based on the ICH2/ICH2-M chip from Intel, and possibly
* some systems based a normal 82559 design, have a defect where
* the chip can cause a PCI protocol violation if it receives
* a CU_RESUME command when it is entering the IDLE state. The
* workaround is to disable Dynamic Standby Mode, so the chip never
* deasserts CLKRUN#, and always remains in an active state.
*
* See Intel 82801BA/82801BAM Specification Update, Errata #30.
*/
if ((sc->ident->ich >= 2 && sc->ident->ich <= 3) ||
(sc->ident->ich == 0 && sc->revision >= FXP_REV_82559_A0)) {
data = sc->eeprom[FXP_EEPROM_MAP_ID];
if (data & 0x02) { /* STB enable */
uint16_t cksum;
int i;
device_printf(dev,
"Disabling dynamic standby mode in EEPROM\n");
data &= ~0x02;
sc->eeprom[FXP_EEPROM_MAP_ID] = data;
fxp_write_eeprom(sc, &data, FXP_EEPROM_MAP_ID, 1);
device_printf(dev, "New EEPROM ID: 0x%x\n", data);
cksum = 0;
for (i = 0; i < (1 << sc->eeprom_size) - 1; i++)
cksum += sc->eeprom[i];
i = (1 << sc->eeprom_size) - 1;
cksum = 0xBABA - cksum;
fxp_write_eeprom(sc, &cksum, i, 1);
device_printf(dev,
"EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
i, sc->eeprom[i], cksum);
sc->eeprom[i] = cksum;
/*
* If the user elects to continue, try the software
* workaround, as it is better than nothing.
*/
sc->flags |= FXP_FLAG_CU_RESUME_BUG;
}
}
/*
* If we are not a 82557 chip, we can enable extended features.
*/
if (sc->revision != FXP_REV_82557) {
/*
* If MWI is enabled in the PCI configuration, and there
* is a valid cacheline size (8 or 16 dwords), then tell
* the board to turn on MWI.
*/
val = pci_read_config(dev, PCIR_COMMAND, 2);
if (val & PCIM_CMD_MWRICEN &&
pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
sc->flags |= FXP_FLAG_MWI_ENABLE;
/* turn on the extended TxCB feature */
sc->flags |= FXP_FLAG_EXT_TXCB;
/* enable reception of long frames for VLAN */
sc->flags |= FXP_FLAG_LONG_PKT_EN;
} else {
/* a hack to get long VLAN frames on a 82557 */
sc->flags |= FXP_FLAG_SAVE_BAD;
}
/* For 82559 or later chips, Rx checksum offload is supported. */
if (sc->revision >= FXP_REV_82559_A0) {
/* 82559ER does not support Rx checksum offloading. */
if (sc->ident->device != 0x1209)
sc->flags |= FXP_FLAG_82559_RXCSUM;
}
/*
* Enable use of extended RFDs and TCBs for 82550
* and later chips. Note: we need extended TXCB support
* too, but that's already enabled by the code above.
* Be careful to do this only on the right devices.
*/
if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
|| sc->revision == FXP_REV_82551_10) {
sc->rfa_size = sizeof (struct fxp_rfa);
sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
sc->flags |= FXP_FLAG_EXT_RFA;
/* Use extended RFA instead of 82559 checksum mode. */
sc->flags &= ~FXP_FLAG_82559_RXCSUM;
} else {
sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
sc->tx_cmd = FXP_CB_COMMAND_XMIT;
}
/*
* Allocate DMA tags and DMA safe memory.
*/
sc->maxtxseg = FXP_NTXSEG;
sc->maxsegsize = MCLBYTES;
if (sc->flags & FXP_FLAG_EXT_RFA) {
sc->maxtxseg--;
sc->maxsegsize = FXP_TSO_SEGSIZE;
}
error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
sc->maxsegsize * sc->maxtxseg + sizeof(struct ether_vlan_header),
sc->maxtxseg, sc->maxsegsize, 0, NULL, NULL, &sc->fxp_txmtag);
if (error) {
device_printf(dev, "could not create TX DMA tag\n");
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->fxp_rxmtag);
if (error) {
device_printf(dev, "could not create RX DMA tag\n");
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0,
NULL, NULL, &sc->fxp_stag);
if (error) {
device_printf(dev, "could not create stats DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->fxp_smap);
if (error) {
device_printf(dev, "could not allocate stats DMA memory\n");
goto fail;
}
error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "could not load the stats DMA buffer\n");
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0, NULL, NULL, &sc->cbl_tag);
if (error) {
device_printf(dev, "could not create TxCB DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->cbl_map);
if (error) {
device_printf(dev, "could not allocate TxCB DMA memory\n");
goto fail;
}
error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
&sc->fxp_desc.cbl_addr, BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "could not load TxCB DMA buffer\n");
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0,
NULL, NULL, &sc->mcs_tag);
if (error) {
device_printf(dev,
"could not create multicast setup DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->mcs_map);
if (error) {
device_printf(dev,
"could not allocate multicast setup DMA memory\n");
goto fail;
}
error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(dev,
"can't load the multicast setup DMA buffer\n");
goto fail;
}
/*
* Pre-allocate the TX DMA maps and setup the pointers to
* the TX command blocks.
*/
txp = sc->fxp_desc.tx_list;
tcbp = sc->fxp_desc.cbl_list;
for (i = 0; i < FXP_NTXCB; i++) {
txp[i].tx_cb = tcbp + i;
error = bus_dmamap_create(sc->fxp_txmtag, 0, &txp[i].tx_map);
if (error) {
device_printf(dev, "can't create DMA map for TX\n");
goto fail;
}
}
error = bus_dmamap_create(sc->fxp_rxmtag, 0, &sc->spare_map);
if (error) {
device_printf(dev, "can't create spare DMA map\n");
goto fail;
}
/*
* Pre-allocate our receive buffers.
*/
sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
for (i = 0; i < FXP_NRFABUFS; i++) {
rxp = &sc->fxp_desc.rx_list[i];
error = bus_dmamap_create(sc->fxp_rxmtag, 0, &rxp->rx_map);
if (error) {
device_printf(dev, "can't create DMA map for RX\n");
goto fail;
}
if (fxp_new_rfabuf(sc, rxp) != 0) {
error = ENOMEM;
goto fail;
}
fxp_add_rfabuf(sc, rxp);
}
/*
* Read MAC address.
*/
eaddr[0] = sc->eeprom[FXP_EEPROM_MAP_IA0] & 0xff;
eaddr[1] = sc->eeprom[FXP_EEPROM_MAP_IA0] >> 8;
eaddr[2] = sc->eeprom[FXP_EEPROM_MAP_IA1] & 0xff;
eaddr[3] = sc->eeprom[FXP_EEPROM_MAP_IA1] >> 8;
eaddr[4] = sc->eeprom[FXP_EEPROM_MAP_IA2] & 0xff;
eaddr[5] = sc->eeprom[FXP_EEPROM_MAP_IA2] >> 8;
if (bootverbose) {
device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
pci_get_vendor(dev), pci_get_device(dev),
pci_get_subvendor(dev), pci_get_subdevice(dev),
pci_get_revid(dev));
device_printf(dev, "Dynamic Standby mode is %s\n",
sc->eeprom[FXP_EEPROM_MAP_ID] & 0x02 ? "enabled" :
"disabled");
}
/*
* If this is only a 10Mbps device, then there is no MII, and
* the PHY will use a serial interface instead.
*
* The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
* doesn't have a programming interface of any sort. The
* media is sensed automatically based on how the link partner
* is configured. This is, in essence, manual configuration.
*/
if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
} else {
/*
* i82557 wedge when isolating all of their PHYs.
*/
flags = MIIF_NOISOLATE;
if (sc->revision >= FXP_REV_82558_A4)
flags |= MIIF_DOPAUSE;
error = mii_attach(dev, &sc->miibus, ifp,
(ifm_change_cb_t)fxp_ifmedia_upd,
(ifm_stat_cb_t)fxp_ifmedia_sts, BMSR_DEFCAPMASK,
MII_PHY_ANY, MII_OFFSET_ANY, flags);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setdev(ifp, dev);
if_setinitfn(ifp, fxp_init);
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, fxp_ioctl);
if_setstartfn(ifp, fxp_start);
if_setcapabilities(ifp, 0);
if_setcapenable(ifp, 0);
/* Enable checksum offload/TSO for 82550 or better chips */
if (sc->flags & FXP_FLAG_EXT_RFA) {
if_sethwassist(ifp, FXP_CSUM_FEATURES | CSUM_TSO);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
if_setcapenablebit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
}
if (sc->flags & FXP_FLAG_82559_RXCSUM) {
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
}
if (sc->flags & FXP_FLAG_WOLCAP) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0);
}
#ifdef DEVICE_POLLING
/* Inform the world we support polling. */
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
* Attach the interface.
*/
ether_ifattach(ifp, eaddr);
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
if ((sc->flags & FXP_FLAG_EXT_RFA) != 0) {
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
}
/*
* Let the system queue as many packets as we have available
* TX descriptors.
*/
if_setsendqlen(ifp, FXP_NTXCB - 1);
if_setsendqready(ifp);
/*
* Hook our interrupt after all initialization is complete.
*/
error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE,
NULL, fxp_intr, sc, &sc->ih);
if (error) {
device_printf(dev, "could not setup irq\n");
ether_ifdetach(sc->ifp);
goto fail;
}
/*
* Configure hardware to reject magic frames otherwise
* system will hang on recipt of magic frames.
*/
if ((sc->flags & FXP_FLAG_WOLCAP) != 0) {
FXP_LOCK(sc);
/* Clear wakeup events. */
CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR));
fxp_init_body(sc, 0);
fxp_stop(sc);
FXP_UNLOCK(sc);
}
fail:
if (error)
fxp_release(sc);
return (error);
}
/*
* Release all resources. The softc lock should not be held and the
* interrupt should already be torn down.
*/
static void
fxp_release(struct fxp_softc *sc)
{
struct fxp_rx *rxp;
struct fxp_tx *txp;
int i;
FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
KASSERT(sc->ih == NULL,
("fxp_release() called with intr handle still active"));
if (sc->miibus)
device_delete_child(sc->dev, sc->miibus);
bus_generic_detach(sc->dev);
ifmedia_removeall(&sc->sc_media);
if (sc->fxp_desc.cbl_list) {
bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
sc->cbl_map);
}
if (sc->fxp_stats) {
bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
}
if (sc->mcsp) {
bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
}
bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res);
if (sc->fxp_rxmtag) {
for (i = 0; i < FXP_NRFABUFS; i++) {
rxp = &sc->fxp_desc.rx_list[i];
if (rxp->rx_mbuf != NULL) {
bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map);
m_freem(rxp->rx_mbuf);
}
bus_dmamap_destroy(sc->fxp_rxmtag, rxp->rx_map);
}
bus_dmamap_destroy(sc->fxp_rxmtag, sc->spare_map);
bus_dma_tag_destroy(sc->fxp_rxmtag);
}
if (sc->fxp_txmtag) {
for (i = 0; i < FXP_NTXCB; i++) {
txp = &sc->fxp_desc.tx_list[i];
if (txp->tx_mbuf != NULL) {
bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map);
m_freem(txp->tx_mbuf);
}
bus_dmamap_destroy(sc->fxp_txmtag, txp->tx_map);
}
bus_dma_tag_destroy(sc->fxp_txmtag);
}
if (sc->fxp_stag)
bus_dma_tag_destroy(sc->fxp_stag);
if (sc->cbl_tag)
bus_dma_tag_destroy(sc->cbl_tag);
if (sc->mcs_tag)
bus_dma_tag_destroy(sc->mcs_tag);
if (sc->ifp)
if_free(sc->ifp);
mtx_destroy(&sc->sc_mtx);
}
/*
* Detach interface.
*/
static int
fxp_detach(device_t dev)
{
struct fxp_softc *sc = device_get_softc(dev);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
ether_poll_deregister(sc->ifp);
#endif
FXP_LOCK(sc);
/*
* Stop DMA and drop transmit queue, but disable interrupts first.
*/
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
fxp_stop(sc);
FXP_UNLOCK(sc);
callout_drain(&sc->stat_ch);
/*
* Close down routes etc.
*/
ether_ifdetach(sc->ifp);
/*
* Unhook interrupt before dropping lock. This is to prevent
* races with fxp_intr().
*/
bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih);
sc->ih = NULL;
/* Release our allocated resources. */
fxp_release(sc);
return (0);
}
/*
* Device shutdown routine. Called at system shutdown after sync. The
* main purpose of this routine is to shut off receiver DMA so that
* kernel memory doesn't get clobbered during warmboot.
*/
static int
fxp_shutdown(device_t dev)
{
/*
* Make sure that DMA is disabled prior to reboot. Not doing
* do could allow DMA to corrupt kernel memory during the
* reboot before the driver initializes.
*/
return (fxp_suspend(dev));
}
/*
* Device suspend routine. Stop the interface and save some PCI
* settings in case the BIOS doesn't restore them properly on
* resume.
*/
static int
fxp_suspend(device_t dev)
{
struct fxp_softc *sc = device_get_softc(dev);
if_t ifp;
int pmc;
uint16_t pmstat;
FXP_LOCK(sc);
ifp = sc->ifp;
if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
/* Request PME. */
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
sc->flags |= FXP_FLAG_WOL;
/* Reconfigure hardware to accept magic frames. */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
fxp_init_body(sc, 0);
}
pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
fxp_stop(sc);
sc->suspended = 1;
FXP_UNLOCK(sc);
return (0);
}
/*
* Device resume routine. re-enable busmastering, and restart the interface if
* appropriate.
*/
static int
fxp_resume(device_t dev)
{
struct fxp_softc *sc = device_get_softc(dev);
if_t ifp = sc->ifp;
int pmc;
uint16_t pmstat;
FXP_LOCK(sc);
if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
sc->flags &= ~FXP_FLAG_WOL;
pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
/* Disable PME and clear PME status. */
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
if ((sc->flags & FXP_FLAG_WOLCAP) != 0)
CSR_WRITE_1(sc, FXP_CSR_PMDR,
CSR_READ_1(sc, FXP_CSR_PMDR));
}
CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
DELAY(10);
/* reinitialize interface if necessary */
if (if_getflags(ifp) & IFF_UP)
fxp_init_body(sc, 1);
sc->suspended = 0;
FXP_UNLOCK(sc);
return (0);
}
static void
fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
{
uint16_t reg;
int x;
/*
* Shift in data.
*/
for (x = 1 << (length - 1); x; x >>= 1) {
if (data & x)
reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
else
reg = FXP_EEPROM_EECS;
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
DELAY(1);
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
DELAY(1);
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
DELAY(1);
}
}
/*
* Read from the serial EEPROM. Basically, you manually shift in
* the read opcode (one bit at a time) and then shift in the address,
* and then you shift out the data (all of this one bit at a time).
* The word size is 16 bits, so you have to provide the address for
* every 16 bits of data.
*/
static uint16_t
fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
{
uint16_t reg, data;
int x;
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
/*
* Shift in read opcode.
*/
fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
/*
* Shift in address.
*/
data = 0;
for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
if (offset & x)
reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
else
reg = FXP_EEPROM_EECS;
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
DELAY(1);
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
DELAY(1);
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
DELAY(1);
reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
data++;
if (autosize && reg == 0) {
sc->eeprom_size = data;
break;
}
}
/*
* Shift out data.
*/
data = 0;
reg = FXP_EEPROM_EECS;
for (x = 1 << 15; x; x >>= 1) {
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
DELAY(1);
if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
data |= x;
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
DELAY(1);
}
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
DELAY(1);
return (data);
}
static void
fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
{
int i;
/*
* Erase/write enable.
*/
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
fxp_eeprom_shiftin(sc, 0x4, 3);
fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
DELAY(1);
/*
* Shift in write opcode, address, data.
*/
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
fxp_eeprom_shiftin(sc, data, 16);
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
DELAY(1);
/*
* Wait for EEPROM to finish up.
*/
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
DELAY(1);
for (i = 0; i < 1000; i++) {
if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
break;
DELAY(50);
}
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
DELAY(1);
/*
* Erase/write disable.
*/
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
fxp_eeprom_shiftin(sc, 0x4, 3);
fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
DELAY(1);
}
/*
* From NetBSD:
*
* Figure out EEPROM size.
*
* 559's can have either 64-word or 256-word EEPROMs, the 558
* datasheet only talks about 64-word EEPROMs, and the 557 datasheet
* talks about the existence of 16 to 256 word EEPROMs.
*
* The only known sizes are 64 and 256, where the 256 version is used
* by CardBus cards to store CIS information.
*
* The address is shifted in msb-to-lsb, and after the last
* address-bit the EEPROM is supposed to output a `dummy zero' bit,
* after which follows the actual data. We try to detect this zero, by
* probing the data-out bit in the EEPROM control register just after
* having shifted in a bit. If the bit is zero, we assume we've
* shifted enough address bits. The data-out should be tri-state,
* before this, which should translate to a logical one.
*/
static void
fxp_autosize_eeprom(struct fxp_softc *sc)
{
/* guess maximum size of 256 words */
sc->eeprom_size = 8;
/* autosize */
(void) fxp_eeprom_getword(sc, 0, 1);
}
static void
fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
{
int i;
for (i = 0; i < words; i++)
data[i] = fxp_eeprom_getword(sc, offset + i, 0);
}
static void
fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
{
int i;
for (i = 0; i < words; i++)
fxp_eeprom_putword(sc, offset + i, data[i]);
}
static void
fxp_load_eeprom(struct fxp_softc *sc)
{
int i;
uint16_t cksum;
fxp_read_eeprom(sc, sc->eeprom, 0, 1 << sc->eeprom_size);
cksum = 0;
for (i = 0; i < (1 << sc->eeprom_size) - 1; i++)
cksum += sc->eeprom[i];
cksum = 0xBABA - cksum;
if (cksum != sc->eeprom[(1 << sc->eeprom_size) - 1])
device_printf(sc->dev,
"EEPROM checksum mismatch! (0x%04x -> 0x%04x)\n",
cksum, sc->eeprom[(1 << sc->eeprom_size) - 1]);
}
/*
* Grab the softc lock and call the real fxp_start_body() routine
*/
static void
fxp_start(if_t ifp)
{
struct fxp_softc *sc = if_getsoftc(ifp);
FXP_LOCK(sc);
fxp_start_body(ifp);
FXP_UNLOCK(sc);
}
/*
* Start packet transmission on the interface.
* This routine must be called with the softc lock held, and is an
* internal entry point only.
*/
static void
fxp_start_body(if_t ifp)
{
struct fxp_softc *sc = if_getsoftc(ifp);
struct mbuf *mb_head;
int txqueued;
FXP_LOCK_ASSERT(sc, MA_OWNED);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
if (sc->tx_queued > FXP_NTXCB_HIWAT)
fxp_txeof(sc);
/*
* We're finished if there is nothing more to add to the list or if
* we're all filled up with buffers to transmit.
* NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
* a NOP command when needed.
*/
txqueued = 0;
while (!if_sendq_empty(ifp) && sc->tx_queued < FXP_NTXCB - 1) {
/*
* Grab a packet to transmit.
*/
mb_head = if_dequeue(ifp);
if (mb_head == NULL)
break;
if (fxp_encap(sc, &mb_head)) {
if (mb_head == NULL)
break;
if_sendq_prepend(ifp, mb_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
}
txqueued++;
/*
* Pass packet to bpf if there is a listener.
*/
bpf_mtap_if(ifp, mb_head);
}
/*
* We're finished. If we added to the list, issue a RESUME to get DMA
* going again if suspended.
*/
if (txqueued > 0) {
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
fxp_scb_wait(sc);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
/*
* Set a 5 second timer just in case we don't hear
* from the card again.
*/
sc->watchdog_timer = 5;
}
}
static int
fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
{
struct mbuf *m;
struct fxp_tx *txp;
struct fxp_cb_tx *cbp;
struct tcphdr *tcp;
bus_dma_segment_t segs[FXP_NTXSEG];
int error, i, nseg, tcp_payload;
FXP_LOCK_ASSERT(sc, MA_OWNED);
tcp_payload = 0;
tcp = NULL;
/*
* Get pointer to next available tx desc.
*/
txp = sc->fxp_desc.tx_last->tx_next;
/*
* A note in Appendix B of the Intel 8255x 10/100 Mbps
* Ethernet Controller Family Open Source Software
* Developer Manual says:
* Using software parsing is only allowed with legal
* TCP/IP or UDP/IP packets.
* ...
* For all other datagrams, hardware parsing must
* be used.
* Software parsing appears to truncate ICMP and
* fragmented UDP packets that contain one to three
* bytes in the second (and final) mbuf of the packet.
*/
if (sc->flags & FXP_FLAG_EXT_RFA)
txp->tx_cb->ipcb_ip_activation_high =
FXP_IPCB_HARDWAREPARSING_ENABLE;
m = *m_head;
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
/*
* 82550/82551 requires ethernet/IP/TCP headers must be
* contained in the first active transmit buffer.
*/
struct ether_header *eh;
struct ip *ip;
uint32_t ip_off, poff;
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
ip_off = sizeof(struct ether_header);
m = m_pullup(*m_head, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
/* Check the existence of VLAN tag. */
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ip_off = sizeof(struct ether_vlan_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
}
m = m_pullup(m, ip_off + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + ip_off);
poff = ip_off + (ip->ip_hl << 2);
m = m_pullup(m, poff + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
m = m_pullup(m, poff + (tcp->th_off << 2));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
/*
* Since 82550/82551 doesn't modify IP length and pseudo
* checksum in the first frame driver should compute it.
*/
ip = (struct ip *)(mtod(m, char *) + ip_off);
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
ip->ip_sum = 0;
ip->ip_len = htons(m->m_pkthdr.tso_segsz + (ip->ip_hl << 2) +
(tcp->th_off << 2));
tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
htons(IPPROTO_TCP + (tcp->th_off << 2) +
m->m_pkthdr.tso_segsz));
/* Compute total TCP payload. */
tcp_payload = m->m_pkthdr.len - ip_off - (ip->ip_hl << 2);
tcp_payload -= tcp->th_off << 2;
*m_head = m;
} else if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) {
/*
* Deal with TCP/IP checksum offload. Note that
* in order for TCP checksum offload to work,
* the pseudo header checksum must have already
* been computed and stored in the checksum field
* in the TCP header. The stack should have
* already done this for us.
*/
txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
if (m->m_pkthdr.csum_flags & CSUM_TCP)
txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET;
#ifdef FXP_IP_CSUM_WAR
/*
* XXX The 82550 chip appears to have trouble
* dealing with IP header checksums in very small
* datagrams, namely fragments from 1 to 3 bytes
* in size. For example, say you want to transmit
* a UDP packet of 1473 bytes. The packet will be
* fragmented over two IP datagrams, the latter
* containing only one byte of data. The 82550 will
* botch the header checksum on the 1-byte fragment.
* As long as the datagram contains 4 or more bytes
* of data, you're ok.
*
* The following code attempts to work around this
* problem: if the datagram is less than 38 bytes
* in size (14 bytes ether header, 20 bytes IP header,
* plus 4 bytes of data), we punt and compute the IP
* header checksum by hand. This workaround doesn't
* work very well, however, since it can be fooled
* by things like VLAN tags and IP options that make
* the header sizes/offsets vary.
*/
if (m->m_pkthdr.csum_flags & CSUM_IP) {
if (m->m_pkthdr.len < 38) {
struct ip *ip;
m->m_data += ETHER_HDR_LEN;
ip = mtod(m, struct ip *);
ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
m->m_data -= ETHER_HDR_LEN;
m->m_pkthdr.csum_flags &= ~CSUM_IP;
} else {
txp->tx_cb->ipcb_ip_activation_high =
FXP_IPCB_HARDWAREPARSING_ENABLE;
txp->tx_cb->ipcb_ip_schedule |=
FXP_IPCB_IP_CHECKSUM_ENABLE;
}
}
#endif
}
error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, *m_head,
segs, &nseg, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, sc->maxtxseg);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map,
*m_head, segs, &nseg, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
} else if (error != 0)
return (error);
if (nseg == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments"));
bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, BUS_DMASYNC_PREWRITE);
cbp = txp->tx_cb;
for (i = 0; i < nseg; i++) {
/*
* If this is an 82550/82551, then we're using extended
* TxCBs _and_ we're using checksum offload. This means
* that the TxCB is really an IPCB. One major difference
* between the two is that with plain extended TxCBs,
* the bottom half of the TxCB contains two entries from
* the TBD array, whereas IPCBs contain just one entry:
* one entry (8 bytes) has been sacrificed for the TCP/IP
* checksum offload control bits. So to make things work
* right, we have to start filling in the TBD array
* starting from a different place depending on whether
* the chip is an 82550/82551 or not.
*/
if (sc->flags & FXP_FLAG_EXT_RFA) {
cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
} else {
cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
}
}
if (sc->flags & FXP_FLAG_EXT_RFA) {
/* Configure dynamic TBD for 82550/82551. */
cbp->tbd_number = 0xFF;
cbp->tbd[nseg].tb_size |= htole32(0x8000);
} else
cbp->tbd_number = nseg;
/* Configure TSO. */
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
cbp->tbdtso.tb_size = htole32(m->m_pkthdr.tso_segsz << 16);
cbp->tbd[1].tb_size |= htole32(tcp_payload << 16);
cbp->ipcb_ip_schedule |= FXP_IPCB_LARGESEND_ENABLE |
FXP_IPCB_IP_CHECKSUM_ENABLE |
FXP_IPCB_TCP_PACKET |
FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
}
/* Configure VLAN hardware tag insertion. */
if ((m->m_flags & M_VLANTAG) != 0) {
cbp->ipcb_vlan_id = htons(m->m_pkthdr.ether_vtag);
txp->tx_cb->ipcb_ip_activation_high |=
FXP_IPCB_INSERTVLAN_ENABLE;
}
txp->tx_mbuf = m;
txp->tx_cb->cb_status = 0;
txp->tx_cb->byte_count = 0;
if (sc->tx_queued != FXP_CXINT_THRESH - 1)
txp->tx_cb->cb_command =
htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
FXP_CB_COMMAND_S);
else
txp->tx_cb->cb_command =
htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0)
txp->tx_cb->tx_threshold = tx_threshold;
/*
* Advance the end of list forward.
*/
sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S);
sc->fxp_desc.tx_last = txp;
/*
* Advance the beginning of the list forward if there are
* no other packets queued (when nothing is queued, tx_first
* sits on the last TxCB that was sent out).
*/
if (sc->tx_queued == 0)
sc->fxp_desc.tx_first = txp;
sc->tx_queued++;
return (0);
}
#ifdef DEVICE_POLLING
static poll_handler_t fxp_poll;
static int
fxp_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct fxp_softc *sc = if_getsoftc(ifp);
uint8_t statack;
int rx_npkts = 0;
FXP_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
FXP_UNLOCK(sc);
return (rx_npkts);
}
statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
FXP_SCB_STATACK_FR;
if (cmd == POLL_AND_CHECK_STATUS) {
uint8_t tmp;
tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
if (tmp == 0xff || tmp == 0) {
FXP_UNLOCK(sc);
return (rx_npkts); /* nothing to do */
}
tmp &= ~statack;
/* ack what we can */
if (tmp != 0)
CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
statack |= tmp;
}
rx_npkts = fxp_intr_body(sc, ifp, statack, count);
FXP_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
/*
* Process interface interrupts.
*/
static void
fxp_intr(void *xsc)
{
struct fxp_softc *sc = xsc;
if_t ifp = sc->ifp;
uint8_t statack;
FXP_LOCK(sc);
if (sc->suspended) {
FXP_UNLOCK(sc);
return;
}
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
FXP_UNLOCK(sc);
return;
}
#endif
while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
/*
* It should not be possible to have all bits set; the
* FXP_SCB_INTR_SWI bit always returns 0 on a read. If
* all bits are set, this may indicate that the card has
* been physically ejected, so ignore it.
*/
if (statack == 0xff) {
FXP_UNLOCK(sc);
return;
}
/*
* First ACK all the interrupts in this pass.
*/
CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
fxp_intr_body(sc, ifp, statack, -1);
}
FXP_UNLOCK(sc);
}
static void
fxp_txeof(struct fxp_softc *sc)
{
if_t ifp;
struct fxp_tx *txp;
ifp = sc->ifp;
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
(le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
txp = txp->tx_next) {
if (txp->tx_mbuf != NULL) {
bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map);
m_freem(txp->tx_mbuf);
txp->tx_mbuf = NULL;
/* clear this to reset csum offload bits */
txp->tx_cb->tbd[0].tb_addr = 0;
}
sc->tx_queued--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
sc->fxp_desc.tx_first = txp;
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (sc->tx_queued == 0)
sc->watchdog_timer = 0;
}
static void
fxp_rxcsum(struct fxp_softc *sc, if_t ifp, struct mbuf *m,
uint16_t status, int pos)
{
struct ether_header *eh;
struct ip *ip;
struct udphdr *uh;
int32_t hlen, len, pktlen, temp32;
uint16_t csum, *opts;
if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) {
if ((status & FXP_RFA_STATUS_PARSE) != 0) {
if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID)
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if (status & FXP_RFDX_CS_IP_CSUM_VALID)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
(status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
return;
}
pktlen = m->m_pkthdr.len;
if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
return;
eh = mtod(m, struct ether_header *);
if (eh->ether_type != htons(ETHERTYPE_IP))
return;
ip = (struct ip *)(eh + 1);
if (ip->ip_v != IPVERSION)
return;
hlen = ip->ip_hl << 2;
pktlen -= sizeof(struct ether_header);
if (hlen < sizeof(struct ip))
return;
if (ntohs(ip->ip_len) < hlen)
return;
if (ntohs(ip->ip_len) != pktlen)
return;
if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
return; /* can't handle fragmented packet */
switch (ip->ip_p) {
case IPPROTO_TCP:
if (pktlen < (hlen + sizeof(struct tcphdr)))
return;
break;
case IPPROTO_UDP:
if (pktlen < (hlen + sizeof(struct udphdr)))
return;
uh = (struct udphdr *)((caddr_t)ip + hlen);
if (uh->uh_sum == 0)
return; /* no checksum */
break;
default:
return;
}
/* Extract computed checksum. */
csum = be16dec(mtod(m, char *) + pos);
/* checksum fixup for IP options */
len = hlen - sizeof(struct ip);
if (len > 0) {
opts = (uint16_t *)(ip + 1);
for (; len > 0; len -= sizeof(uint16_t), opts++) {
temp32 = csum - *opts;
temp32 = (temp32 >> 16) + (temp32 & 65535);
csum = temp32 & 65535;
}
}
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
m->m_pkthdr.csum_data = csum;
}
static int
fxp_intr_body(struct fxp_softc *sc, if_t ifp, uint8_t statack,
int count)
{
struct mbuf *m;
struct fxp_rx *rxp;
struct fxp_rfa *rfa;
int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
int rx_npkts;
uint16_t status;
rx_npkts = 0;
FXP_LOCK_ASSERT(sc, MA_OWNED);
if (rnr)
sc->rnr++;
#ifdef DEVICE_POLLING
/* Pick up a deferred RNR condition if `count' ran out last time. */
if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
rnr = 1;
}
#endif
/*
* Free any finished transmit mbuf chains.
*
* Handle the CNA event likt a CXTNO event. It used to
* be that this event (control unit not ready) was not
* encountered, but it is now with the SMPng modifications.
* The exact sequence of events that occur when the interface
* is brought up are different now, and if this event
* goes unhandled, the configuration/rxfilter setup sequence
* can stall for several seconds. The result is that no
* packets go out onto the wire for about 5 to 10 seconds
* after the interface is ifconfig'ed for the first time.
*/
if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA))
fxp_txeof(sc);
/*
* Try to start more packets transmitting.
*/
if (!if_sendq_empty(ifp))
fxp_start_body(ifp);
/*
* Just return if nothing happened on the receive side.
*/
if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
return (rx_npkts);
/*
* Process receiver interrupts. If a no-resource (RNR)
* condition exists, get whatever packets we can and
* re-start the receiver.
*
* When using polling, we do not process the list to completion,
* so when we get an RNR interrupt we must defer the restart
* until we hit the last buffer with the C bit set.
* If we run out of cycles and rfa_headm has the C bit set,
* record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
* that the info will be used in the subsequent polling cycle.
*/
for (;;) {
rxp = sc->fxp_desc.rx_head;
m = rxp->rx_mbuf;
rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
RFA_ALIGNMENT_FUDGE);
bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
if (count >= 0 && count-- == 0) {
if (rnr) {
/* Defer RNR processing until the next time. */
sc->flags |= FXP_FLAG_DEFERRED_RNR;
rnr = 0;
}
break;
}
#endif /* DEVICE_POLLING */
status = le16toh(rfa->rfa_status);
if ((status & FXP_RFA_STATUS_C) == 0)
break;
if ((status & FXP_RFA_STATUS_RNR) != 0)
rnr++;
/*
* Advance head forward.
*/
sc->fxp_desc.rx_head = rxp->rx_next;
/*
* Add a new buffer to the receive chain.
* If this fails, the old buffer is recycled
* instead.
*/
if (fxp_new_rfabuf(sc, rxp) == 0) {
int total_len;
/*
* Fetch packet length (the top 2 bits of
* actual_size are flags set by the controller
* upon completion), and drop the packet in case
* of bogus length or CRC errors.
*/
total_len = le16toh(rfa->actual_size) & 0x3fff;
if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
(if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
/* Adjust for appended checksum bytes. */
total_len -= 2;
}
if (total_len < (int)sizeof(struct ether_header) ||
total_len > (MCLBYTES - RFA_ALIGNMENT_FUDGE -
sc->rfa_size) ||
status & (FXP_RFA_STATUS_CRC |
FXP_RFA_STATUS_ALIGN | FXP_RFA_STATUS_OVERRUN)) {
m_freem(m);
fxp_add_rfabuf(sc, rxp);
continue;
}
m->m_pkthdr.len = m->m_len = total_len;
if_setrcvif(m, ifp);
/* Do IP checksum checking. */
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
fxp_rxcsum(sc, ifp, m, status, total_len);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
(status & FXP_RFA_STATUS_VLAN) != 0) {
m->m_pkthdr.ether_vtag =
ntohs(rfa->rfax_vlan_id);
m->m_flags |= M_VLANTAG;
}
/*
* Drop locks before calling if_input() since it
* may re-enter fxp_start() in the netisr case.
* This would result in a lock reversal. Better
* performance might be obtained by chaining all
* packets received, dropping the lock, and then
* calling if_input() on each one.
*/
FXP_UNLOCK(sc);
if_input(ifp, m);
FXP_LOCK(sc);
rx_npkts++;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return (rx_npkts);
} else {
/* Reuse RFA and loaded DMA map. */
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
fxp_discard_rfabuf(sc, rxp);
}
fxp_add_rfabuf(sc, rxp);
}
if (rnr) {
fxp_scb_wait(sc);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
sc->fxp_desc.rx_head->rx_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
}
return (rx_npkts);
}
static void
fxp_update_stats(struct fxp_softc *sc)
{
if_t ifp = sc->ifp;
struct fxp_stats *sp = sc->fxp_stats;
struct fxp_hwstats *hsp;
uint32_t *status;
FXP_LOCK_ASSERT(sc, MA_OWNED);
bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Update statistical counters. */
if (sc->revision >= FXP_REV_82559_A0)
status = &sp->completion_status;
else if (sc->revision >= FXP_REV_82558_A4)
status = (uint32_t *)&sp->tx_tco;
else
status = &sp->tx_pause;
if (*status == htole32(FXP_STATS_DR_COMPLETE)) {
hsp = &sc->fxp_hwstats;
hsp->tx_good += le32toh(sp->tx_good);
hsp->tx_maxcols += le32toh(sp->tx_maxcols);
hsp->tx_latecols += le32toh(sp->tx_latecols);
hsp->tx_underruns += le32toh(sp->tx_underruns);
hsp->tx_lostcrs += le32toh(sp->tx_lostcrs);
hsp->tx_deffered += le32toh(sp->tx_deffered);
hsp->tx_single_collisions += le32toh(sp->tx_single_collisions);
hsp->tx_multiple_collisions +=
le32toh(sp->tx_multiple_collisions);
hsp->tx_total_collisions += le32toh(sp->tx_total_collisions);
hsp->rx_good += le32toh(sp->rx_good);
hsp->rx_crc_errors += le32toh(sp->rx_crc_errors);
hsp->rx_alignment_errors += le32toh(sp->rx_alignment_errors);
hsp->rx_rnr_errors += le32toh(sp->rx_rnr_errors);
hsp->rx_overrun_errors += le32toh(sp->rx_overrun_errors);
hsp->rx_cdt_errors += le32toh(sp->rx_cdt_errors);
hsp->rx_shortframes += le32toh(sp->rx_shortframes);
hsp->tx_pause += le32toh(sp->tx_pause);
hsp->rx_pause += le32toh(sp->rx_pause);
hsp->rx_controls += le32toh(sp->rx_controls);
hsp->tx_tco += le16toh(sp->tx_tco);
hsp->rx_tco += le16toh(sp->rx_tco);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, le32toh(sp->tx_good));
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
le32toh(sp->tx_total_collisions));
if (sp->rx_good) {
if_inc_counter(ifp, IFCOUNTER_IPACKETS,
le32toh(sp->rx_good));
sc->rx_idle_secs = 0;
} else if (sc->flags & FXP_FLAG_RXBUG) {
/*
* Receiver's been idle for another second.
*/
sc->rx_idle_secs++;
}
if_inc_counter(ifp, IFCOUNTER_IERRORS,
le32toh(sp->rx_crc_errors) +
le32toh(sp->rx_alignment_errors) +
le32toh(sp->rx_rnr_errors) +
le32toh(sp->rx_overrun_errors));
/*
* If any transmit underruns occurred, bump up the transmit
* threshold by another 512 bytes (64 * 8).
*/
if (sp->tx_underruns) {
if_inc_counter(ifp, IFCOUNTER_OERRORS,
le32toh(sp->tx_underruns));
if (tx_threshold < 192)
tx_threshold += 64;
}
*status = 0;
bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
}
/*
* Update packet in/out/collision statistics. The i82557 doesn't
* allow you to access these counters without doing a fairly
* expensive DMA to get _all_ of the statistics it maintains, so
* we do this operation here only once per second. The statistics
* counters in the kernel are updated from the previous dump-stats
* DMA and then a new dump-stats DMA is started. The on-chip
* counters are zeroed when the DMA completes. If we can't start
* the DMA immediately, we don't wait - we just prepare to read
* them again next time.
*/
static void
fxp_tick(void *xsc)
{
struct fxp_softc *sc = xsc;
if_t ifp = sc->ifp;
FXP_LOCK_ASSERT(sc, MA_OWNED);
/* Update statistical counters. */
fxp_update_stats(sc);
/*
* Release any xmit buffers that have completed DMA. This isn't
* strictly necessary to do here, but it's advantagous for mbufs
* with external storage to be released in a timely manner rather
* than being defered for a potentially long time. This limits
* the delay to a maximum of one second.
*/
fxp_txeof(sc);
/*
* If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
* then assume the receiver has locked up and attempt to clear
* the condition by reprogramming the multicast filter. This is
* a work-around for a bug in the 82557 where the receiver locks
* up if it gets certain types of garbage in the synchronization
* bits prior to the packet header. This bug is supposed to only
* occur in 10Mbps mode, but has been seen to occur in 100Mbps
* mode as well (perhaps due to a 10/100 speed transition).
*/
if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
sc->rx_idle_secs = 0;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
fxp_init_body(sc, 1);
}
return;
}
/*
* If there is no pending command, start another stats
* dump. Otherwise punt for now.
*/
if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
/*
* Start another stats dump.
*/
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
}
if (sc->miibus != NULL)
mii_tick(device_get_softc(sc->miibus));
/*
* Check that chip hasn't hung.
*/
fxp_watchdog(sc);
/*
* Schedule another timeout one second from now.
*/
callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
}
/*
* Stop the interface. Cancels the statistics updater and resets
* the interface.
*/
static void
fxp_stop(struct fxp_softc *sc)
{
if_t ifp = sc->ifp;
struct fxp_tx *txp;
int i;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->watchdog_timer = 0;
/*
* Cancel stats updater.
*/
callout_stop(&sc->stat_ch);
/*
* Preserve PCI configuration, configure, IA/multicast
* setup and put RU and CU into idle state.
*/
CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
DELAY(50);
/* Disable interrupts. */
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
fxp_update_stats(sc);
/*
* Release any xmit buffers.
*/
txp = sc->fxp_desc.tx_list;
for (i = 0; i < FXP_NTXCB; i++) {
if (txp[i].tx_mbuf != NULL) {
bus_dmamap_sync(sc->fxp_txmtag, txp[i].tx_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->fxp_txmtag, txp[i].tx_map);
m_freem(txp[i].tx_mbuf);
txp[i].tx_mbuf = NULL;
/* clear this to reset csum offload bits */
txp[i].tx_cb->tbd[0].tb_addr = 0;
}
}
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->tx_queued = 0;
}
/*
* Watchdog/transmission transmit timeout handler. Called when a
* transmission is started on the interface, but no interrupt is
* received before the timeout. This usually indicates that the
* card has wedged for some reason.
*/
static void
fxp_watchdog(struct fxp_softc *sc)
{
if_t ifp = sc->ifp;
FXP_LOCK_ASSERT(sc, MA_OWNED);
if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
return;
device_printf(sc->dev, "device timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
fxp_init_body(sc, 1);
}
/*
* Acquire locks and then call the real initialization function. This
* is necessary because ether_ioctl() calls if_init() and this would
* result in mutex recursion if the mutex was held.
*/
static void
fxp_init(void *xsc)
{
struct fxp_softc *sc = xsc;
FXP_LOCK(sc);
fxp_init_body(sc, 1);
FXP_UNLOCK(sc);
}
/*
* Perform device initialization. This routine must be called with the
* softc lock held.
*/
static void
fxp_init_body(struct fxp_softc *sc, int setmedia)
{
if_t ifp = sc->ifp;
struct mii_data *mii;
struct fxp_cb_config *cbp;
struct fxp_cb_ias *cb_ias;
struct fxp_cb_tx *tcbp;
struct fxp_tx *txp;
int i, prm;
FXP_LOCK_ASSERT(sc, MA_OWNED);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
/*
* Cancel any pending I/O
*/
fxp_stop(sc);
/*
* Issue software reset, which also unloads the microcode.
*/
sc->flags &= ~FXP_FLAG_UCODE;
CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
DELAY(50);
prm = (if_getflags(ifp) & IFF_PROMISC) ? 1 : 0;
/*
* Initialize base of CBL and RFA memory. Loading with zero
* sets it up for regular linear addressing.
*/
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
fxp_scb_wait(sc);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
/*
* Initialize base of dump-stats buffer.
*/
fxp_scb_wait(sc);
bzero(sc->fxp_stats, sizeof(struct fxp_stats));
bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
/*
* Attempt to load microcode if requested.
* For ICH based controllers do not load microcode.
*/
if (sc->ident->ich == 0) {
if (if_getflags(ifp) & IFF_LINK0 &&
(sc->flags & FXP_FLAG_UCODE) == 0)
fxp_load_ucode(sc);
}
/*
* Set IFF_ALLMULTI status. It's needed in configure action
* command.
*/
fxp_mc_addrs(sc);
/*
* We temporarily use memory that contains the TxCB list to
* construct the config CB. The TxCB list memory is rebuilt
* later.
*/
cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
/*
* This bcopy is kind of disgusting, but there are a bunch of must be
* zero and must be one bits in this structure and this is the easiest
* way to initialize them all to proper values.
*/
bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
cbp->cb_status = 0;
cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG |
FXP_CB_COMMAND_EL);
cbp->link_addr = 0xffffffff; /* (no) next command */
cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
cbp->type_enable = 0; /* actually reserved */
cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
cbp->dma_mbce = 0; /* (disable) dma max counters */
cbp->late_scb = 0; /* (don't) defer SCB update */
cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */
cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */
cbp->ci_int = 1; /* interrupt on CU idle */
cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
cbp->ext_stats_dis = 1; /* disable extended counters */
cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */
cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
cbp->disc_short_rx = !prm; /* discard short packets */
cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */
cbp->two_frames = 0; /* do not limit FIFO to 2 frames */
cbp->dyn_tbd = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
cbp->csma_dis = 0; /* (don't) disable link */
cbp->tcp_udp_cksum = ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
(if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) ? 1 : 0;
cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */
cbp->link_wake_en = 0; /* (don't) assert PME# on link change */
cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */
cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */
cbp->nsai = 1; /* (don't) disable source addr insert */
cbp->preamble_length = 2; /* (7 byte) preamble */
cbp->loopback = 0; /* (don't) loopback */
cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
cbp->linear_pri_mode = 0; /* (wait after xmit only) */
cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
cbp->promiscuous = prm; /* promiscuous mode */
cbp->bcast_disable = 0; /* (don't) disable broadcasts */
cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/
cbp->ignore_ul = 0; /* consider U/L bit in IA matching */
cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */
cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
cbp->stripping = !prm; /* truncate rx packet to byte count */
cbp->padding = 1; /* (do) pad short tx packets */
cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
cbp->ia_wake_en = 0; /* (don't) wake up on address match */
cbp->magic_pkt_dis = sc->flags & FXP_FLAG_WOL ? 0 : 1;
cbp->force_fdx = 0; /* (don't) force full duplex */
cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
cbp->multi_ia = 0; /* (don't) accept multiple IAs */
cbp->mc_all = if_getflags(ifp) & IFF_ALLMULTI ? 1 : prm;
cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
cbp->vlan_strip_en = ((sc->flags & FXP_FLAG_EXT_RFA) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) ? 1 : 0;
if (sc->revision == FXP_REV_82557) {
/*
* The 82557 has no hardware flow control, the values
* below are the defaults for the chip.
*/
cbp->fc_delay_lsb = 0;
cbp->fc_delay_msb = 0x40;
cbp->pri_fc_thresh = 3;
cbp->tx_fc_dis = 0;
cbp->rx_fc_restop = 0;
cbp->rx_fc_restart = 0;
cbp->fc_filter = 0;
cbp->pri_fc_loc = 1;
} else {
/* Set pause RX FIFO threshold to 1KB. */
CSR_WRITE_1(sc, FXP_CSR_FC_THRESH, 1);
/* Set pause time. */
cbp->fc_delay_lsb = 0xff;
cbp->fc_delay_msb = 0xff;
cbp->pri_fc_thresh = 3;
mii = device_get_softc(sc->miibus);
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_TXPAUSE) != 0)
/* enable transmit FC */
cbp->tx_fc_dis = 0;
else
/* disable transmit FC */
cbp->tx_fc_dis = 1;
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0) {
/* enable FC restart/restop frames */
cbp->rx_fc_restart = 1;
cbp->rx_fc_restop = 1;
} else {
/* disable FC restart/restop frames */
cbp->rx_fc_restart = 0;
cbp->rx_fc_restop = 0;
}
cbp->fc_filter = !prm; /* drop FC frames to host */
cbp->pri_fc_loc = 1; /* FC pri location (byte31) */
}
/* Enable 82558 and 82559 extended statistics functionality. */
if (sc->revision >= FXP_REV_82558_A4) {
if (sc->revision >= FXP_REV_82559_A0) {
/*
* Extend configuration table size to 32
* to include TCO configuration.
*/
cbp->byte_count = 32;
cbp->ext_stats_dis = 1;
/* Enable TCO stats. */
cbp->tno_int_or_tco_en = 1;
cbp->gamla_rx = 1;
} else
cbp->ext_stats_dis = 0;
}
/*
* Start the config command/DMA.
*/
fxp_scb_wait(sc);
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
/* ...and wait for it to complete. */
fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
/*
* Now initialize the station address. Temporarily use the TxCB
* memory area like we did above for the config CB.
*/
cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
cb_ias->cb_status = 0;
cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
cb_ias->link_addr = 0xffffffff;
bcopy(if_getlladdr(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN);
/*
* Start the IAS (Individual Address Setup) command/DMA.
*/
fxp_scb_wait(sc);
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
/* ...and wait for it to complete. */
fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
/*
* Initialize the multicast address list.
*/
fxp_mc_setup(sc);
/*
* Initialize transmit control block (TxCB) list.
*/
txp = sc->fxp_desc.tx_list;
tcbp = sc->fxp_desc.cbl_list;
bzero(tcbp, FXP_TXCB_SZ);
for (i = 0; i < FXP_NTXCB; i++) {
txp[i].tx_mbuf = NULL;
tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
(((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
if (sc->flags & FXP_FLAG_EXT_TXCB)
tcbp[i].tbd_array_addr =
htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
else
tcbp[i].tbd_array_addr =
htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
}
/*
* Set the suspend flag on the first TxCB and start the control
* unit. It will execute the NOP and then suspend.
*/
tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
sc->tx_queued = 1;
fxp_scb_wait(sc);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
/*
* Initialize receiver buffer area - RFA.
*/
fxp_scb_wait(sc);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
if (sc->miibus != NULL && setmedia != 0)
mii_mediachg(device_get_softc(sc->miibus));
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
/*
* Enable interrupts.
*/
#ifdef DEVICE_POLLING
/*
* ... but only do that if we are not polling. And because (presumably)
* the default is interrupts on, we need to disable them explicitly!
*/
if (if_getcapenable(ifp) & IFCAP_POLLING )
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
else
#endif /* DEVICE_POLLING */
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
/*
* Start stats updater.
*/
callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
}
static int
fxp_serial_ifmedia_upd(if_t ifp)
{
return (0);
}
static void
fxp_serial_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
}
/*
* Change media according to request.
*/
static int
fxp_ifmedia_upd(if_t ifp)
{
struct fxp_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
struct mii_softc *miisc;
mii = device_get_softc(sc->miibus);
FXP_LOCK(sc);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
mii_mediachg(mii);
FXP_UNLOCK(sc);
return (0);
}
/*
* Notify the world which media we're using.
*/
static void
fxp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct fxp_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
mii = device_get_softc(sc->miibus);
FXP_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
FXP_UNLOCK(sc);
}
/*
* Add a buffer to the end of the RFA buffer list.
* Return 0 if successful, 1 for failure. A failure results in
* reusing the RFA buffer.
* The RFA struct is stuck at the beginning of mbuf cluster and the
* data pointer is fixed up to point just past it.
*/
static int
fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
{
struct mbuf *m;
struct fxp_rfa *rfa;
bus_dmamap_t tmp_map;
int error;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
/*
* Move the data pointer up so that the incoming data packet
* will be 32-bit aligned.
*/
m->m_data += RFA_ALIGNMENT_FUDGE;
/*
* Get a pointer to the base of the mbuf cluster and move
* data start past it.
*/
rfa = mtod(m, struct fxp_rfa *);
m->m_data += sc->rfa_size;
rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
rfa->rfa_status = 0;
rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
rfa->actual_size = 0;
m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE -
sc->rfa_size;
/*
* Initialize the rest of the RFA. Note that since the RFA
* is misaligned, we cannot store values directly. We're thus
* using the le32enc() function which handles endianness and
* is also alignment-safe.
*/
le32enc(&rfa->link_addr, 0xffffffff);
le32enc(&rfa->rbd_addr, 0xffffffff);
/* Map the RFA into DMA memory. */
error = bus_dmamap_load(sc->fxp_rxmtag, sc->spare_map, rfa,
MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
&rxp->rx_addr, BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
return (error);
}
if (rxp->rx_mbuf != NULL)
bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map);
tmp_map = sc->spare_map;
sc->spare_map = rxp->rx_map;
rxp->rx_map = tmp_map;
rxp->rx_mbuf = m;
bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
{
struct fxp_rfa *p_rfa;
struct fxp_rx *p_rx;
/*
* If there are other buffers already on the list, attach this
* one to the end by fixing up the tail to point to this one.
*/
if (sc->fxp_desc.rx_head != NULL) {
p_rx = sc->fxp_desc.rx_tail;
p_rfa = (struct fxp_rfa *)
(p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
p_rx->rx_next = rxp;
le32enc(&p_rfa->link_addr, rxp->rx_addr);
p_rfa->rfa_control = 0;
bus_dmamap_sync(sc->fxp_rxmtag, p_rx->rx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
} else {
rxp->rx_next = NULL;
sc->fxp_desc.rx_head = rxp;
}
sc->fxp_desc.rx_tail = rxp;
}
static void
fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
{
struct mbuf *m;
struct fxp_rfa *rfa;
m = rxp->rx_mbuf;
m->m_data = m->m_ext.ext_buf;
/*
* Move the data pointer up so that the incoming data packet
* will be 32-bit aligned.
*/
m->m_data += RFA_ALIGNMENT_FUDGE;
/*
* Get a pointer to the base of the mbuf cluster and move
* data start past it.
*/
rfa = mtod(m, struct fxp_rfa *);
m->m_data += sc->rfa_size;
rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
rfa->rfa_status = 0;
rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
rfa->actual_size = 0;
/*
* Initialize the rest of the RFA. Note that since the RFA
* is misaligned, we cannot store values directly. We're thus
* using the le32enc() function which handles endianness and
* is also alignment-safe.
*/
le32enc(&rfa->link_addr, 0xffffffff);
le32enc(&rfa->rbd_addr, 0xffffffff);
bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
fxp_miibus_readreg(device_t dev, int phy, int reg)
{
struct fxp_softc *sc = device_get_softc(dev);
int count = 10000;
int value;
CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
(FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
&& count--)
DELAY(10);
if (count <= 0)
device_printf(dev, "fxp_miibus_readreg: timed out\n");
return (value & 0xffff);
}
static int
fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
{
struct fxp_softc *sc = device_get_softc(dev);
int count = 10000;
CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
(FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
(value & 0xffff));
while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
count--)
DELAY(10);
if (count <= 0)
device_printf(dev, "fxp_miibus_writereg: timed out\n");
return (0);
}
static void
fxp_miibus_statchg(device_t dev)
{
struct fxp_softc *sc;
struct mii_data *mii;
if_t ifp;
sc = device_get_softc(dev);
mii = device_get_softc(sc->miibus);
ifp = sc->ifp;
if (mii == NULL || ifp == (void *)NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
(mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
(IFM_AVALID | IFM_ACTIVE))
return;
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T &&
sc->flags & FXP_FLAG_CU_RESUME_BUG)
sc->cu_resume_bug = 1;
else
sc->cu_resume_bug = 0;
/*
* Call fxp_init_body in order to adjust the flow control settings.
* Note that the 82557 doesn't support hardware flow control.
*/
if (sc->revision == FXP_REV_82557)
return;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
fxp_init_body(sc, 0);
}
static int
fxp_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct fxp_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
struct mii_data *mii;
int flag, mask, error = 0, reinit;
switch (command) {
case SIOCSIFFLAGS:
FXP_LOCK(sc);
/*
* If interface is marked up and not running, then start it.
* If it is marked down and running, stop it.
* XXX If it's up then re-initialize it. This is so flags
* such as IFF_PROMISC are handled.
*/
if (if_getflags(ifp) & IFF_UP) {
if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) &&
((if_getflags(ifp) ^ sc->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI | IFF_LINK0)) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
fxp_init_body(sc, 0);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
fxp_init_body(sc, 1);
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
fxp_stop(sc);
}
sc->if_flags = if_getflags(ifp);
FXP_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
FXP_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
fxp_init_body(sc, 0);
}
FXP_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
if (sc->miibus != NULL) {
mii = device_get_softc(sc->miibus);
error = ifmedia_ioctl(ifp, ifr,
&mii->mii_media, command);
} else {
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
}
break;
case SIOCSIFCAP:
reinit = 0;
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(fxp_poll, ifp);
if (error)
return(error);
FXP_LOCK(sc);
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
FXP_SCB_INTR_DISABLE);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
FXP_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts in any case */
FXP_LOCK(sc);
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
FXP_UNLOCK(sc);
}
}
#endif
FXP_LOCK(sc);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, FXP_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, FXP_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0)
reinit++;
}
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_VLAN_MTU) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_MTU) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
if (sc->revision != FXP_REV_82557)
flag = FXP_FLAG_LONG_PKT_EN;
else /* a hack to get long frames on the old chip */
flag = FXP_FLAG_SAVE_BAD;
sc->flags ^= flag;
if (if_getflags(ifp) & IFF_UP)
reinit++;
}
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO |
IFCAP_VLAN_HWCSUM);
reinit++;
}
if (reinit > 0 &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
fxp_init_body(sc, 0);
}
FXP_UNLOCK(sc);
if_vlancap(ifp);
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
static u_int
fxp_setup_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct fxp_softc *sc = arg;
struct fxp_cb_mcs *mcsp = sc->mcsp;
if (mcsp->mc_cnt < MAXMCADDR)
bcopy(LLADDR(sdl), mcsp->mc_addr[mcsp->mc_cnt * ETHER_ADDR_LEN],
ETHER_ADDR_LEN);
mcsp->mc_cnt++;
return (1);
}
/*
* Fill in the multicast address list and return number of entries.
*/
static void
fxp_mc_addrs(struct fxp_softc *sc)
{
struct fxp_cb_mcs *mcsp = sc->mcsp;
if_t ifp = sc->ifp;
if ((if_getflags(ifp) & IFF_ALLMULTI) == 0) {
mcsp->mc_cnt = 0;
if_foreach_llmaddr(sc->ifp, fxp_setup_maddr, sc);
if (mcsp->mc_cnt >= MAXMCADDR) {
if_setflagbits(ifp, IFF_ALLMULTI, 0);
mcsp->mc_cnt = 0;
}
}
mcsp->mc_cnt = htole16(mcsp->mc_cnt * ETHER_ADDR_LEN);
}
/*
* Program the multicast filter.
*
* We have an artificial restriction that the multicast setup command
* must be the first command in the chain, so we take steps to ensure
* this. By requiring this, it allows us to keep up the performance of
* the pre-initialized command ring (esp. link pointers) by not actually
* inserting the mcsetup command in the ring - i.e. its link pointer
* points to the TxCB ring, but the mcsetup descriptor itself is not part
* of it. We then can do 'CU_START' on the mcsetup descriptor and have it
* lead into the regular TxCB ring when it completes.
*/
static void
fxp_mc_setup(struct fxp_softc *sc)
{
struct fxp_cb_mcs *mcsp;
int count;
FXP_LOCK_ASSERT(sc, MA_OWNED);
mcsp = sc->mcsp;
mcsp->cb_status = 0;
mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
mcsp->link_addr = 0xffffffff;
fxp_mc_addrs(sc);
/*
* Wait until command unit is idle. This should never be the
* case when nothing is queued, but make sure anyway.
*/
count = 100;
while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) !=
FXP_SCB_CUS_IDLE && --count)
DELAY(10);
if (count == 0) {
device_printf(sc->dev, "command queue timeout\n");
return;
}
/*
* Start the multicast setup command.
*/
fxp_scb_wait(sc);
bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
/* ...and wait for it to complete. */
fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
}
static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE;
#define UCODE(x) x, sizeof(x)/sizeof(uint32_t)
static const struct ucode {
uint32_t revision;
uint32_t *ucode;
int length;
u_short int_delay_offset;
u_short bundle_max_offset;
} ucode_table[] = {
{ FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
{ FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
{ FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
{ FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
{ FXP_REV_82550, UCODE(fxp_ucode_d102),
D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
{ FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
{ FXP_REV_82551_F, UCODE(fxp_ucode_d102e),
D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
{ FXP_REV_82551_10, UCODE(fxp_ucode_d102e),
D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
{ 0, NULL, 0, 0, 0 }
};
static void
fxp_load_ucode(struct fxp_softc *sc)
{
const struct ucode *uc;
struct fxp_cb_ucode *cbp;
int i;
if (sc->flags & FXP_FLAG_NO_UCODE)
return;
for (uc = ucode_table; uc->ucode != NULL; uc++)
if (sc->revision == uc->revision)
break;
if (uc->ucode == NULL)
return;
cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
cbp->cb_status = 0;
cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
cbp->link_addr = 0xffffffff; /* (no) next command */
for (i = 0; i < uc->length; i++)
cbp->ucode[i] = htole32(uc->ucode[i]);
if (uc->int_delay_offset)
*(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
if (uc->bundle_max_offset)
*(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
htole16(sc->tunable_bundle_max);
/*
* Download the ucode to the chip.
*/
fxp_scb_wait(sc);
bus_dmamap_sync(sc->cbl_tag, sc->cbl_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
/* ...and wait for it to complete. */
fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
device_printf(sc->dev,
"Microcode loaded, int_delay: %d usec bundle_max: %d\n",
sc->tunable_int_delay,
uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
sc->flags |= FXP_FLAG_UCODE;
bzero(cbp, FXP_TXCB_SZ);
}
#define FXP_SYSCTL_STAT_ADD(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
static void
fxp_sysctl_node(struct fxp_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct fxp_hwstats *hsp;
ctx = device_get_sysctl_ctx(sc->dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_delay",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
&sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
"FXP driver receive interrupt microcode bundling delay");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "bundle_max",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
&sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
"FXP driver receive interrupt microcode bundle size limit");
SYSCTL_ADD_INT(ctx, child,OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
"FXP RNR events");
/*
* Pull in device tunables.
*/
sc->tunable_int_delay = TUNABLE_INT_DELAY;
sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
(void) resource_int_value(device_get_name(sc->dev),
device_get_unit(sc->dev), "int_delay", &sc->tunable_int_delay);
(void) resource_int_value(device_get_name(sc->dev),
device_get_unit(sc->dev), "bundle_max", &sc->tunable_bundle_max);
sc->rnr = 0;
hsp = &sc->fxp_hwstats;
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "FXP statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx MAC statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
child = SYSCTL_CHILDREN(tree);
FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames",
&hsp->rx_good, "Good frames");
FXP_SYSCTL_STAT_ADD(ctx, child, "crc_errors",
&hsp->rx_crc_errors, "CRC errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "alignment_errors",
&hsp->rx_alignment_errors, "Alignment errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "rnr_errors",
&hsp->rx_rnr_errors, "RNR errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "overrun_errors",
&hsp->rx_overrun_errors, "Overrun errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "cdt_errors",
&hsp->rx_cdt_errors, "Collision detect errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "shortframes",
&hsp->rx_shortframes, "Short frame errors");
if (sc->revision >= FXP_REV_82558_A4) {
FXP_SYSCTL_STAT_ADD(ctx, child, "pause",
&hsp->rx_pause, "Pause frames");
FXP_SYSCTL_STAT_ADD(ctx, child, "controls",
&hsp->rx_controls, "Unsupported control frames");
}
if (sc->revision >= FXP_REV_82559_A0)
FXP_SYSCTL_STAT_ADD(ctx, child, "tco",
&hsp->rx_tco, "TCO frames");
/* Tx MAC statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames",
&hsp->tx_good, "Good frames");
FXP_SYSCTL_STAT_ADD(ctx, child, "maxcols",
&hsp->tx_maxcols, "Maximum collisions errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "latecols",
&hsp->tx_latecols, "Late collisions errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "underruns",
&hsp->tx_underruns, "Underrun errors");
FXP_SYSCTL_STAT_ADD(ctx, child, "lostcrs",
&hsp->tx_lostcrs, "Lost carrier sense");
FXP_SYSCTL_STAT_ADD(ctx, child, "deffered",
&hsp->tx_deffered, "Deferred");
FXP_SYSCTL_STAT_ADD(ctx, child, "single_collisions",
&hsp->tx_single_collisions, "Single collisions");
FXP_SYSCTL_STAT_ADD(ctx, child, "multiple_collisions",
&hsp->tx_multiple_collisions, "Multiple collisions");
FXP_SYSCTL_STAT_ADD(ctx, child, "total_collisions",
&hsp->tx_total_collisions, "Total collisions");
if (sc->revision >= FXP_REV_82558_A4)
FXP_SYSCTL_STAT_ADD(ctx, child, "pause",
&hsp->tx_pause, "Pause frames");
if (sc->revision >= FXP_REV_82559_A0)
FXP_SYSCTL_STAT_ADD(ctx, child, "tco",
&hsp->tx_tco, "TCO frames");
}
#undef FXP_SYSCTL_STAT_ADD
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || !req->newptr)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
/*
* Interrupt delay is expressed in microseconds, a multiplier is used
* to convert this to the appropriate clock ticks before using.
*/
static int
sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
}
static int
sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
}
diff --git a/sys/dev/gem/if_gem.c b/sys/dev/gem/if_gem.c
index 91389d0dfd2c..434c5309d019 100644
--- a/sys/dev/gem/if_gem.c
+++ b/sys/dev/gem/if_gem.c
@@ -1,2234 +1,2232 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 2001 Eduardo Horvath.
* Copyright (c) 2001-2003 Thomas Moestl
* Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
*/
#include <sys/cdefs.h>
/*
* Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
*/
#if 0
#define GEM_DEBUG
#endif
#if 0 /* XXX: In case of emergency, re-enable this. */
#define GEM_RINT_TIMEOUT
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/callout.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/gem/if_gemreg.h>
#include <dev/gem/if_gemvar.h>
CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
#define GEM_TRIES 10000
/*
* The hardware supports basic TCP/UDP checksum offloading. However,
* the hardware doesn't compensate the checksum for UDP datagram which
* can yield to 0x0. As a safe guard, UDP checksum offload is disabled
* by default. It can be reactivated by setting special link option
* link0 with ifconfig(8).
*/
#define GEM_CSUM_FEATURES (CSUM_TCP)
static int gem_add_rxbuf(struct gem_softc *sc, int idx);
static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr,
uint32_t set);
static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
int nsegs, int error);
static int gem_disable_rx(struct gem_softc *sc);
static int gem_disable_tx(struct gem_softc *sc);
static void gem_eint(struct gem_softc *sc, u_int status);
static void gem_init(void *xsc);
static void gem_init_locked(struct gem_softc *sc);
static void gem_init_regs(struct gem_softc *sc);
static int gem_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
static int gem_meminit(struct gem_softc *sc);
static void gem_mifinit(struct gem_softc *sc);
static void gem_reset(struct gem_softc *sc);
static int gem_reset_rx(struct gem_softc *sc);
static void gem_reset_rxdma(struct gem_softc *sc);
static int gem_reset_tx(struct gem_softc *sc);
static u_int gem_ringsize(u_int sz);
static void gem_rint(struct gem_softc *sc);
#ifdef GEM_RINT_TIMEOUT
static void gem_rint_timeout(void *arg);
#endif
static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
static void gem_rxdrain(struct gem_softc *sc);
static void gem_setladrf(struct gem_softc *sc);
static void gem_start(if_t ifp);
static void gem_start_locked(if_t ifp);
static void gem_stop(if_t ifp, int disable);
static void gem_tick(void *arg);
static void gem_tint(struct gem_softc *sc);
static inline void gem_txkick(struct gem_softc *sc);
static int gem_watchdog(struct gem_softc *sc);
DRIVER_MODULE(miibus, gem, miibus_driver, 0, 0);
MODULE_DEPEND(gem, miibus, 1, 1, 1);
#ifdef GEM_DEBUG
#include <sys/ktr.h>
#define KTR_GEM KTR_SPARE2
#endif
int
gem_attach(struct gem_softc *sc)
{
struct gem_txsoft *txs;
if_t ifp;
int error, i, phy;
uint32_t v;
if (bootverbose)
device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
/* Set up ifnet structure. */
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOSPC);
sc->sc_csum_features = GEM_CSUM_FEATURES;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setstartfn(ifp, gem_start);
if_setioctlfn(ifp, gem_ioctl);
if_setinitfn(ifp, gem_init);
if_setsendqlen(ifp, GEM_TXQUEUELEN);
if_setsendqready(ifp);
callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
#ifdef GEM_RINT_TIMEOUT
callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
#endif
/* Make sure the chip is stopped. */
gem_reset(sc);
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
NULL, &sc->sc_pdmatag);
if (error != 0)
goto fail_ifnet;
error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
if (error != 0)
goto fail_ptag;
error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
if (error != 0)
goto fail_rtag;
error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
sizeof(struct gem_control_data), 1,
sizeof(struct gem_control_data), 0,
NULL, NULL, &sc->sc_cdmatag);
if (error != 0)
goto fail_ttag;
/*
* Allocate the control data structures, create and load the
* DMA map for it.
*/
if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
(void **)&sc->sc_control_data,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->sc_cddmamap)) != 0) {
device_printf(sc->sc_dev,
"unable to allocate control data, error = %d\n", error);
goto fail_ctag;
}
sc->sc_cddma = 0;
if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
sc->sc_control_data, sizeof(struct gem_control_data),
gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
device_printf(sc->sc_dev,
"unable to load control data DMA map, error = %d\n",
error);
goto fail_cmem;
}
/*
* Initialize the transmit job descriptors.
*/
STAILQ_INIT(&sc->sc_txfreeq);
STAILQ_INIT(&sc->sc_txdirtyq);
/*
* Create the transmit buffer DMA maps.
*/
error = ENOMEM;
for (i = 0; i < GEM_TXQUEUELEN; i++) {
txs = &sc->sc_txsoft[i];
txs->txs_mbuf = NULL;
txs->txs_ndescs = 0;
if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
&txs->txs_dmamap)) != 0) {
device_printf(sc->sc_dev,
"unable to create TX DMA map %d, error = %d\n",
i, error);
goto fail_txd;
}
STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
}
/*
* Create the receive buffer DMA maps.
*/
for (i = 0; i < GEM_NRXDESC; i++) {
if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
&sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
device_printf(sc->sc_dev,
"unable to create RX DMA map %d, error = %d\n",
i, error);
goto fail_rxd;
}
sc->sc_rxsoft[i].rxs_mbuf = NULL;
}
/* Bypass probing PHYs if we already know for sure to use a SERDES. */
if ((sc->sc_flags & GEM_SERDES) != 0)
goto serdes;
GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII);
GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
gem_mifinit(sc);
/*
* Look for an external PHY.
*/
error = ENXIO;
v = GEM_READ_4(sc, GEM_MIF_CONFIG);
if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
v |= GEM_MIF_CONFIG_PHY_SEL;
GEM_WRITE_4(sc, GEM_MIF_CONFIG, v);
GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
}
/*
* Fall back on an internal PHY if no external PHY was found.
* Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
* trusted when the firmware has powered down the chip.
*/
if (error != 0 &&
((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) {
v &= ~GEM_MIF_CONFIG_PHY_SEL;
GEM_WRITE_4(sc, GEM_MIF_CONFIG, v);
GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
switch (sc->sc_variant) {
case GEM_APPLE_K2_GMAC:
phy = GEM_PHYAD_INTERNAL;
break;
case GEM_APPLE_GMAC:
phy = GEM_PHYAD_EXTERNAL;
break;
default:
phy = MII_PHY_ANY;
break;
}
error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
MII_OFFSET_ANY, MIIF_DOPAUSE);
}
/*
* Try the external PCS SERDES if we didn't find any PHYs.
*/
if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
serdes:
GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
GEM_MII_DATAPATH_SERDES);
GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
BUS_SPACE_BARRIER_WRITE);
GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
BUS_SPACE_BARRIER_WRITE);
GEM_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
BUS_SPACE_BARRIER_WRITE);
sc->sc_flags |= GEM_SERDES;
error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
}
if (error != 0) {
device_printf(sc->sc_dev, "attaching PHYs failed\n");
goto fail_rxd;
}
sc->sc_mii = device_get_softc(sc->sc_miibus);
/*
* From this point forward, the attachment cannot fail. A failure
* before this point releases all resources that may have been
* allocated.
*/
/* Get RX FIFO size. */
sc->sc_rxfifosize = 64 *
GEM_READ_4(sc, GEM_RX_FIFO_SIZE);
/* Get TX FIFO size. */
v = GEM_READ_4(sc, GEM_TX_FIFO_SIZE);
device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
sc->sc_rxfifosize / 1024, v / 16);
/* Attach the interface. */
ether_ifattach(ifp, sc->sc_enaddr);
/*
* Tell the upper layer(s) we support long frames/checksum offloads.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0);
if_sethwassistbits(ifp, sc->sc_csum_features, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0);
return (0);
/*
* Free any resources we've allocated during the failed attach
* attempt. Do this in reverse order and fall through.
*/
fail_rxd:
for (i = 0; i < GEM_NRXDESC; i++)
if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_rdmatag,
sc->sc_rxsoft[i].rxs_dmamap);
fail_txd:
for (i = 0; i < GEM_TXQUEUELEN; i++)
if (sc->sc_txsoft[i].txs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_tdmatag,
sc->sc_txsoft[i].txs_dmamap);
bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
fail_cmem:
bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
sc->sc_cddmamap);
fail_ctag:
bus_dma_tag_destroy(sc->sc_cdmatag);
fail_ttag:
bus_dma_tag_destroy(sc->sc_tdmatag);
fail_rtag:
bus_dma_tag_destroy(sc->sc_rdmatag);
fail_ptag:
bus_dma_tag_destroy(sc->sc_pdmatag);
fail_ifnet:
if_free(ifp);
return (error);
}
void
gem_detach(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
int i;
ether_ifdetach(ifp);
GEM_LOCK(sc);
gem_stop(ifp, 1);
GEM_UNLOCK(sc);
callout_drain(&sc->sc_tick_ch);
#ifdef GEM_RINT_TIMEOUT
callout_drain(&sc->sc_rx_ch);
#endif
if_free(ifp);
device_delete_child(sc->sc_dev, sc->sc_miibus);
for (i = 0; i < GEM_NRXDESC; i++)
if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_rdmatag,
sc->sc_rxsoft[i].rxs_dmamap);
for (i = 0; i < GEM_TXQUEUELEN; i++)
if (sc->sc_txsoft[i].txs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_tdmatag,
sc->sc_txsoft[i].txs_dmamap);
GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
sc->sc_cddmamap);
bus_dma_tag_destroy(sc->sc_cdmatag);
bus_dma_tag_destroy(sc->sc_tdmatag);
bus_dma_tag_destroy(sc->sc_rdmatag);
bus_dma_tag_destroy(sc->sc_pdmatag);
}
void
gem_suspend(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
GEM_LOCK(sc);
gem_stop(ifp, 0);
GEM_UNLOCK(sc);
}
void
gem_resume(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
GEM_LOCK(sc);
/*
* On resume all registers have to be initialized again like
* after power-on.
*/
sc->sc_flags &= ~GEM_INITED;
if (if_getflags(ifp) & IFF_UP)
gem_init_locked(sc);
GEM_UNLOCK(sc);
}
static inline void
gem_rxcksum(struct mbuf *m, uint64_t flags)
{
struct ether_header *eh;
struct ip *ip;
struct udphdr *uh;
uint16_t *opts;
int32_t hlen, len, pktlen;
uint32_t temp32;
uint16_t cksum;
pktlen = m->m_pkthdr.len;
if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
return;
eh = mtod(m, struct ether_header *);
if (eh->ether_type != htons(ETHERTYPE_IP))
return;
ip = (struct ip *)(eh + 1);
if (ip->ip_v != IPVERSION)
return;
hlen = ip->ip_hl << 2;
pktlen -= sizeof(struct ether_header);
if (hlen < sizeof(struct ip))
return;
if (ntohs(ip->ip_len) < hlen)
return;
if (ntohs(ip->ip_len) != pktlen)
return;
if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
return; /* Cannot handle fragmented packet. */
switch (ip->ip_p) {
case IPPROTO_TCP:
if (pktlen < (hlen + sizeof(struct tcphdr)))
return;
break;
case IPPROTO_UDP:
if (pktlen < (hlen + sizeof(struct udphdr)))
return;
uh = (struct udphdr *)((uint8_t *)ip + hlen);
if (uh->uh_sum == 0)
return; /* no checksum */
break;
default:
return;
}
cksum = ~(flags & GEM_RD_CHECKSUM);
/* checksum fixup for IP options */
len = hlen - sizeof(struct ip);
if (len > 0) {
opts = (uint16_t *)(ip + 1);
for (; len > 0; len -= sizeof(uint16_t), opts++) {
temp32 = cksum - *opts;
temp32 = (temp32 >> 16) + (temp32 & 65535);
cksum = temp32 & 65535;
}
}
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
m->m_pkthdr.csum_data = cksum;
}
static void
gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
{
struct gem_softc *sc = xsc;
if (error != 0)
return;
if (nsegs != 1)
panic("%s: bad control buffer segment count", __func__);
sc->sc_cddma = segs[0].ds_addr;
}
static void
gem_tick(void *arg)
{
struct gem_softc *sc = arg;
if_t ifp = sc->sc_ifp;
uint32_t v;
GEM_LOCK_ASSERT(sc, MA_OWNED);
/*
* Unload collision and error counters.
*/
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
GEM_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
GEM_READ_4(sc, GEM_MAC_FIRST_COLL_CNT));
v = GEM_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
GEM_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v);
if_inc_counter(ifp, IFCOUNTER_OERRORS, v);
if_inc_counter(ifp, IFCOUNTER_IERRORS,
GEM_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
GEM_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
GEM_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
GEM_READ_4(sc, GEM_MAC_RX_CODE_VIOL));
/*
* Then clear the hardware counters.
*/
GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
mii_tick(sc->sc_mii);
if (gem_watchdog(sc) == EJUSTRETURN)
return;
callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
}
static int
gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set)
{
int i;
uint32_t reg;
for (i = GEM_TRIES; i--; DELAY(100)) {
reg = GEM_READ_4(sc, r);
if ((reg & clr) == 0 && (reg & set) == set)
return (1);
}
return (0);
}
static void
gem_reset(struct gem_softc *sc)
{
#ifdef GEM_DEBUG
CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
gem_reset_rx(sc);
gem_reset_tx(sc);
/* Do a full reset. */
GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
GEM_BARRIER(sc, GEM_RESET, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
device_printf(sc->sc_dev, "cannot reset device\n");
}
static void
gem_rxdrain(struct gem_softc *sc)
{
struct gem_rxsoft *rxs;
int i;
for (i = 0; i < GEM_NRXDESC; i++) {
rxs = &sc->sc_rxsoft[i];
if (rxs->rxs_mbuf != NULL) {
bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
m_freem(rxs->rxs_mbuf);
rxs->rxs_mbuf = NULL;
}
}
}
static void
gem_stop(if_t ifp, int disable)
{
struct gem_softc *sc = if_getsoftc(ifp);
struct gem_txsoft *txs;
#ifdef GEM_DEBUG
CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
callout_stop(&sc->sc_tick_ch);
#ifdef GEM_RINT_TIMEOUT
callout_stop(&sc->sc_rx_ch);
#endif
gem_reset_tx(sc);
gem_reset_rx(sc);
/*
* Release any queued transmit buffers.
*/
while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
if (txs->txs_ndescs != 0) {
bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
if (txs->txs_mbuf != NULL) {
m_freem(txs->txs_mbuf);
txs->txs_mbuf = NULL;
}
}
STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
}
if (disable)
gem_rxdrain(sc);
/*
* Mark the interface down and cancel the watchdog timer.
*/
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->sc_flags &= ~GEM_LINK;
sc->sc_wdog_timer = 0;
}
static int
gem_reset_rx(struct gem_softc *sc)
{
/*
* Resetting while DMA is in progress can cause a bus hang, so we
* disable DMA first.
*/
(void)gem_disable_rx(sc);
GEM_WRITE_4(sc, GEM_RX_CONFIG, 0);
GEM_BARRIER(sc, GEM_RX_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
device_printf(sc->sc_dev, "cannot disable RX DMA\n");
/* Wait 5ms extra. */
DELAY(5000);
/* Reset the ERX. */
GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX);
GEM_BARRIER(sc, GEM_RESET, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) {
device_printf(sc->sc_dev, "cannot reset receiver\n");
return (1);
}
/* Finally, reset RX MAC. */
GEM_WRITE_4(sc, GEM_MAC_RXRESET, 1);
GEM_BARRIER(sc, GEM_MAC_RXRESET, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_MAC_RXRESET, 1, 0)) {
device_printf(sc->sc_dev, "cannot reset RX MAC\n");
return (1);
}
return (0);
}
/*
* Reset the receiver DMA engine.
*
* Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
* etc in order to reset the receiver DMA engine only and not do a full
* reset which amongst others also downs the link and clears the FIFOs.
*/
static void
gem_reset_rxdma(struct gem_softc *sc)
{
int i;
if (gem_reset_rx(sc) != 0) {
if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
return (gem_init_locked(sc));
}
for (i = 0; i < GEM_NRXDESC; i++)
if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
GEM_UPDATE_RXDESC(sc, i);
sc->sc_rxptr = 0;
GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* NOTE: we use only 32-bit DMA addresses here. */
GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
GEM_WRITE_4(sc, GEM_RX_CONFIG,
gem_ringsize(GEM_NRXDESC /* XXX */) |
((ETHER_HDR_LEN + sizeof(struct ip)) <<
GEM_RX_CONFIG_CXM_START_SHFT) |
(GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
(ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
GEM_WRITE_4(sc, GEM_RX_BLANKING,
((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
GEM_RX_BLANKING_TIME_SHIFT) | 6);
GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
(3 * sc->sc_rxfifosize / 256) |
((sc->sc_rxfifosize / 256) << 12));
GEM_WRITE_4(sc, GEM_RX_CONFIG,
GEM_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
GEM_WRITE_4(sc, GEM_MAC_RX_MASK,
GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
/*
* Clear the RX filter and reprogram it. This will also set the
* current RX MAC configuration and enable it.
*/
gem_setladrf(sc);
}
static int
gem_reset_tx(struct gem_softc *sc)
{
/*
* Resetting while DMA is in progress can cause a bus hang, so we
* disable DMA first.
*/
(void)gem_disable_tx(sc);
GEM_WRITE_4(sc, GEM_TX_CONFIG, 0);
GEM_BARRIER(sc, GEM_TX_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
device_printf(sc->sc_dev, "cannot disable TX DMA\n");
/* Wait 5ms extra. */
DELAY(5000);
/* Finally, reset the ETX. */
GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_TX);
GEM_BARRIER(sc, GEM_RESET, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
device_printf(sc->sc_dev, "cannot reset transmitter\n");
return (1);
}
return (0);
}
static int
gem_disable_rx(struct gem_softc *sc)
{
GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG,
GEM_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
return (1);
device_printf(sc->sc_dev, "cannot disable RX MAC\n");
return (0);
}
static int
gem_disable_tx(struct gem_softc *sc)
{
GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG,
GEM_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
GEM_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
return (1);
device_printf(sc->sc_dev, "cannot disable TX MAC\n");
return (0);
}
static int
gem_meminit(struct gem_softc *sc)
{
struct gem_rxsoft *rxs;
int error, i;
GEM_LOCK_ASSERT(sc, MA_OWNED);
/*
* Initialize the transmit descriptor ring.
*/
for (i = 0; i < GEM_NTXDESC; i++) {
sc->sc_txdescs[i].gd_flags = 0;
sc->sc_txdescs[i].gd_addr = 0;
}
sc->sc_txfree = GEM_MAXTXFREE;
sc->sc_txnext = 0;
sc->sc_txwin = 0;
/*
* Initialize the receive descriptor and receive job
* descriptor rings.
*/
for (i = 0; i < GEM_NRXDESC; i++) {
rxs = &sc->sc_rxsoft[i];
if (rxs->rxs_mbuf == NULL) {
if ((error = gem_add_rxbuf(sc, i)) != 0) {
device_printf(sc->sc_dev,
"unable to allocate or map RX buffer %d, "
"error = %d\n", i, error);
/*
* XXX we should attempt to run with fewer
* receive buffers instead of just failing.
*/
gem_rxdrain(sc);
return (1);
}
} else
GEM_INIT_RXDESC(sc, i);
}
sc->sc_rxptr = 0;
GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static u_int
gem_ringsize(u_int sz)
{
switch (sz) {
case 32:
return (GEM_RING_SZ_32);
case 64:
return (GEM_RING_SZ_64);
case 128:
return (GEM_RING_SZ_128);
case 256:
return (GEM_RING_SZ_256);
case 512:
return (GEM_RING_SZ_512);
case 1024:
return (GEM_RING_SZ_1024);
case 2048:
return (GEM_RING_SZ_2048);
case 4096:
return (GEM_RING_SZ_4096);
case 8192:
return (GEM_RING_SZ_8192);
default:
printf("%s: invalid ring size %d\n", __func__, sz);
return (GEM_RING_SZ_32);
}
}
static void
gem_init(void *xsc)
{
struct gem_softc *sc = xsc;
GEM_LOCK(sc);
gem_init_locked(sc);
GEM_UNLOCK(sc);
}
/*
* Initialization of interface; set up initialization block
* and transmit/receive descriptor rings.
*/
static void
gem_init_locked(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
uint32_t v;
GEM_LOCK_ASSERT(sc, MA_OWNED);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
#ifdef GEM_DEBUG
CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
__func__);
#endif
/*
* Initialization sequence. The numbered steps below correspond
* to the sequence outlined in section 6.3.5.1 in the Ethernet
* Channel Engine manual (part of the PCIO manual).
* See also the STP2002-STQ document from Sun Microsystems.
*/
/* step 1 & 2. Reset the Ethernet Channel. */
gem_stop(ifp, 0);
gem_reset(sc);
#ifdef GEM_DEBUG
CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
__func__);
#endif
if ((sc->sc_flags & GEM_SERDES) == 0)
/* Re-initialize the MIF. */
gem_mifinit(sc);
/* step 3. Setup data structures in host memory. */
if (gem_meminit(sc) != 0)
return;
/* step 4. TX MAC registers & counters */
gem_init_regs(sc);
/* step 5. RX MAC registers & counters */
/* step 6 & 7. Program Descriptor Ring Base Addresses. */
/* NOTE: we use only 32-bit DMA addresses here. */
GEM_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
GEM_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
#endif
/* step 8. Global Configuration & Interrupt Mask */
/*
* Set the internal arbitration to "infinite" bursts of the
* maximum length of 31 * 64 bytes so DMA transfers aren't
* split up in cache line size chunks. This greatly improves
* RX performance.
* Enable silicon bug workarounds for the Apple variants.
*/
GEM_WRITE_4(sc, GEM_CONFIG,
GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
GEM_WRITE_4(sc, GEM_INTMASK,
~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
GEM_INTR_BERR
#ifdef GEM_DEBUG
| GEM_INTR_PCS | GEM_INTR_MIF
#endif
));
GEM_WRITE_4(sc, GEM_MAC_RX_MASK,
GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
GEM_WRITE_4(sc, GEM_MAC_TX_MASK,
GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
GEM_MAC_TX_PEAK_EXP);
#ifdef GEM_DEBUG
GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
#else
GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
#endif
/* step 9. ETX Configuration: use mostly default values. */
/* Enable DMA. */
v = gem_ringsize(GEM_NTXDESC);
/* Set TX FIFO threshold and enable DMA. */
v |= (0x4ff << 10) & GEM_TX_CONFIG_TXFIFO_TH;
GEM_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
/* step 10. ERX Configuration */
/* Encode Receive Descriptor ring size. */
v = gem_ringsize(GEM_NRXDESC /* XXX */);
/* RX TCP/UDP checksum offset */
v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
GEM_RX_CONFIG_CXM_START_SHFT);
/* Set RX FIFO threshold, set first byte offset and enable DMA. */
GEM_WRITE_4(sc, GEM_RX_CONFIG,
v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
(ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
GEM_RX_CONFIG_RXDMA_EN);
GEM_WRITE_4(sc, GEM_RX_BLANKING,
((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
GEM_RX_BLANKING_TIME_SHIFT) | 6);
/*
* The following value is for an OFF Threshold of about 3/4 full
* and an ON Threshold of 1/4 full.
*/
GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
(3 * sc->sc_rxfifosize / 256) |
((sc->sc_rxfifosize / 256) << 12));
/* step 11. Configure Media. */
/* step 12. RX_MAC Configuration Register */
v = GEM_READ_4(sc, GEM_MAC_RX_CONFIG);
v &= ~GEM_MAC_RX_ENABLE;
v |= GEM_MAC_RX_STRIP_CRC;
sc->sc_mac_rxcfg = v;
/*
* Clear the RX filter and reprogram it. This will also set the
* current RX MAC configuration and enable it.
*/
gem_setladrf(sc);
/* step 13. TX_MAC Configuration Register */
v = GEM_READ_4(sc, GEM_MAC_TX_CONFIG);
v |= GEM_MAC_TX_ENABLE;
(void)gem_disable_tx(sc);
GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
/* step 14. Issue Transmit Pending command. */
/* step 15. Give the receiver a swift kick. */
GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
mii_mediachg(sc->sc_mii);
/* Start the one second timer. */
sc->sc_wdog_timer = 0;
callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
}
static int
gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
{
bus_dma_segment_t txsegs[GEM_NTXSEGS];
struct gem_txsoft *txs;
struct ip *ip;
struct mbuf *m;
uint64_t cflags, flags;
int error, nexttx, nsegs, offset, seg;
GEM_LOCK_ASSERT(sc, MA_OWNED);
/* Get a work queue entry. */
if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
/* Ran out of descriptors. */
return (ENOBUFS);
}
cflags = 0;
if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
if (M_WRITABLE(*m_head) == 0) {
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
*m_head = m;
if (m == NULL)
return (ENOBUFS);
}
offset = sizeof(struct ether_header);
m = m_pullup(*m_head, offset + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, caddr_t) + offset);
offset += (ip->ip_hl << 2);
cflags = offset << GEM_TD_CXSUM_STARTSHFT |
((offset + m->m_pkthdr.csum_data) <<
GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
*m_head = m;
}
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
txs->txs_dmamap, *m_head, txsegs, &nsegs,
BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
/* If nsegs is wrong then the stack is corrupt. */
KASSERT(nsegs <= GEM_NTXSEGS,
("%s: too many DMA segments (%d)", __func__, nsegs));
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/*
* Ensure we have enough descriptors free to describe
* the packet. Note, we always reserve one descriptor
* at the end of the ring as a termination point, in
* order to prevent wrap-around.
*/
if (nsegs > sc->sc_txfree - 1) {
txs->txs_ndescs = 0;
bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
return (ENOBUFS);
}
txs->txs_ndescs = nsegs;
txs->txs_firstdesc = sc->sc_txnext;
nexttx = txs->txs_firstdesc;
for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
#ifdef GEM_DEBUG
CTR6(KTR_GEM,
"%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
__func__, seg, nexttx, txsegs[seg].ds_len,
txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr));
#endif
sc->sc_txdescs[nexttx].gd_addr = htole64(txsegs[seg].ds_addr);
KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
("%s: segment size too large!", __func__));
flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
sc->sc_txdescs[nexttx].gd_flags = htole64(flags | cflags);
txs->txs_lastdesc = nexttx;
}
/* Set EOP on the last descriptor. */
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
__func__, seg, nexttx);
#endif
sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
htole64(GEM_TD_END_OF_PACKET);
/* Lastly set SOP on the first descriptor. */
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
__func__, seg, nexttx);
#endif
if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
sc->sc_txwin = 0;
sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
htole64(GEM_TD_INTERRUPT_ME | GEM_TD_START_OF_PACKET);
} else
sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
htole64(GEM_TD_START_OF_PACKET);
/* Sync the DMA map. */
bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
BUS_DMASYNC_PREWRITE);
#ifdef GEM_DEBUG
CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
__func__, txs->txs_firstdesc, txs->txs_lastdesc,
txs->txs_ndescs);
#endif
STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
txs->txs_mbuf = *m_head;
sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
sc->sc_txfree -= txs->txs_ndescs;
return (0);
}
static void
gem_init_regs(struct gem_softc *sc)
{
const u_char *laddr = if_getlladdr(sc->sc_ifp);
GEM_LOCK_ASSERT(sc, MA_OWNED);
/* These registers are not cleared on reset. */
if ((sc->sc_flags & GEM_INITED) == 0) {
/* magic values */
GEM_WRITE_4(sc, GEM_MAC_IPG0, 0);
GEM_WRITE_4(sc, GEM_MAC_IPG1, 8);
GEM_WRITE_4(sc, GEM_MAC_IPG2, 4);
/* min frame length */
GEM_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
/* max frame length and max burst size */
GEM_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
(ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
/* more magic values */
GEM_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
GEM_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
GEM_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
GEM_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808);
/* random number seed */
GEM_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
((laddr[5] << 8) | laddr[4]) & 0x3ff);
/* secondary MAC address: 0:0:0:0:0:0 */
GEM_WRITE_4(sc, GEM_MAC_ADDR3, 0);
GEM_WRITE_4(sc, GEM_MAC_ADDR4, 0);
GEM_WRITE_4(sc, GEM_MAC_ADDR5, 0);
/* MAC control address: 01:80:c2:00:00:01 */
GEM_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
GEM_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
GEM_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
/* MAC filter address: 0:0:0:0:0:0 */
GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
sc->sc_flags |= GEM_INITED;
}
/* Counters need to be zeroed. */
GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
/* Set XOFF PAUSE time. */
GEM_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
/* Set the station address. */
GEM_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
GEM_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
GEM_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
/* Enable MII outputs. */
GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
}
static void
gem_start(if_t ifp)
{
struct gem_softc *sc = if_getsoftc(ifp);
GEM_LOCK(sc);
gem_start_locked(ifp);
GEM_UNLOCK(sc);
}
static inline void
gem_txkick(struct gem_softc *sc)
{
/*
* Update the TX kick register. This register has to point to the
* descriptor after the last valid one and for optimum performance
* should be incremented in multiples of 4 (the DMA engine fetches/
* updates descriptors in batches of 4).
*/
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "%s: %s: kicking TX %d",
device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
#endif
GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
GEM_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
}
static void
gem_start_locked(if_t ifp)
{
struct gem_softc *sc = if_getsoftc(ifp);
struct mbuf *m;
int kicked, ntx;
GEM_LOCK_ASSERT(sc, MA_OWNED);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
return;
#ifdef GEM_DEBUG
CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
sc->sc_txnext);
#endif
ntx = 0;
kicked = 0;
for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) {
m = if_dequeue(ifp);
if (m == NULL)
break;
if (gem_load_txmbuf(sc, &m) != 0) {
if (m == NULL)
break;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m);
break;
}
if ((sc->sc_txnext % 4) == 0) {
gem_txkick(sc);
kicked = 1;
} else
kicked = 0;
ntx++;
BPF_MTAP(ifp, m);
}
if (ntx > 0) {
if (kicked == 0)
gem_txkick(sc);
#ifdef GEM_DEBUG
CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
device_get_name(sc->sc_dev), sc->sc_txnext);
#endif
/* Set a watchdog timer in case the chip flakes out. */
sc->sc_wdog_timer = 5;
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "%s: %s: watchdog %d",
device_get_name(sc->sc_dev), __func__,
sc->sc_wdog_timer);
#endif
}
}
static void
gem_tint(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
struct gem_txsoft *txs;
int progress;
uint32_t txlast;
#ifdef GEM_DEBUG
int i;
GEM_LOCK_ASSERT(sc, MA_OWNED);
CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
/*
* Go through our TX list and free mbufs for those
* frames that have been transmitted.
*/
progress = 0;
GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
#ifdef GEM_DEBUG
if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
printf(" txsoft %p transmit chain:\n", txs);
for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
printf("descriptor %d: ", i);
printf("gd_flags: 0x%016llx\t",
(long long)le64toh(
sc->sc_txdescs[i].gd_flags));
printf("gd_addr: 0x%016llx\n",
(long long)le64toh(
sc->sc_txdescs[i].gd_addr));
if (i == txs->txs_lastdesc)
break;
}
}
#endif
/*
* In theory, we could harvest some descriptors before
* the ring is empty, but that's a bit complicated.
*
* GEM_TX_COMPLETION points to the last descriptor
* processed + 1.
*/
txlast = GEM_READ_4(sc, GEM_TX_COMPLETION);
#ifdef GEM_DEBUG
CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
"txs->txs_lastdesc = %d, txlast = %d",
__func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
#endif
if (txs->txs_firstdesc <= txs->txs_lastdesc) {
if ((txlast >= txs->txs_firstdesc) &&
(txlast <= txs->txs_lastdesc))
break;
} else {
/* Ick -- this command wraps. */
if ((txlast >= txs->txs_firstdesc) ||
(txlast <= txs->txs_lastdesc))
break;
}
#ifdef GEM_DEBUG
CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
#endif
STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
sc->sc_txfree += txs->txs_ndescs;
bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
if (txs->txs_mbuf != NULL) {
m_freem(txs->txs_mbuf);
txs->txs_mbuf = NULL;
}
STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
progress = 1;
}
#ifdef GEM_DEBUG
CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
"GEM_TX_COMPLETION %x",
__func__, GEM_READ_4(sc, GEM_TX_STATE_MACHINE),
((long long)GEM_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
GEM_READ_4(sc, GEM_TX_DATA_PTR_LO),
GEM_READ_4(sc, GEM_TX_COMPLETION));
#endif
if (progress) {
if (sc->sc_txfree == GEM_NTXDESC - 1)
sc->sc_txwin = 0;
/*
* We freed some descriptors, so reset IFF_DRV_OACTIVE
* and restart.
*/
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (STAILQ_EMPTY(&sc->sc_txdirtyq))
sc->sc_wdog_timer = 0;
gem_start_locked(ifp);
}
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "%s: %s: watchdog %d",
device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
#endif
}
#ifdef GEM_RINT_TIMEOUT
static void
gem_rint_timeout(void *arg)
{
struct gem_softc *sc = arg;
GEM_LOCK_ASSERT(sc, MA_OWNED);
gem_rint(sc);
}
#endif
static void
gem_rint(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
struct mbuf *m;
uint64_t rxstat;
uint32_t rxcomp;
GEM_LOCK_ASSERT(sc, MA_OWNED);
#ifdef GEM_RINT_TIMEOUT
callout_stop(&sc->sc_rx_ch);
#endif
#ifdef GEM_DEBUG
CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
/*
* Read the completion register once. This limits
* how long the following loop can execute.
*/
rxcomp = GEM_READ_4(sc, GEM_RX_COMPLETION);
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
__func__, sc->sc_rxptr, rxcomp);
#endif
GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (; sc->sc_rxptr != rxcomp;) {
m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
rxstat = le64toh(sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
if (rxstat & GEM_RD_OWN) {
#ifdef GEM_RINT_TIMEOUT
/*
* The descriptor is still marked as owned, although
* it is supposed to have completed. This has been
* observed on some machines. Just exiting here
* might leave the packet sitting around until another
* one arrives to trigger a new interrupt, which is
* generally undesirable, so set up a timeout.
*/
callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
gem_rint_timeout, sc);
#endif
m = NULL;
goto kickit;
}
if (rxstat & GEM_RD_BAD_CRC) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
device_printf(sc->sc_dev, "receive error: CRC error\n");
GEM_INIT_RXDESC(sc, sc->sc_rxptr);
m = NULL;
goto kickit;
}
#ifdef GEM_DEBUG
if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
printf(" rxsoft %p descriptor %d: ",
&sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
printf("gd_flags: 0x%016llx\t",
(long long)le64toh(
sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
printf("gd_addr: 0x%016llx\n",
(long long)le64toh(
sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
}
#endif
/*
* Allocate a new mbuf cluster. If that fails, we are
* out of memory, and must drop the packet and recycle
* the buffer that's already attached to this descriptor.
*/
if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
GEM_INIT_RXDESC(sc, sc->sc_rxptr);
m = NULL;
}
kickit:
/*
* Update the RX kick register. This register has to point
* to the descriptor after the last valid one (before the
* current batch) and for optimum performance should be
* incremented in multiples of 4 (the DMA engine fetches/
* updates descriptors in batches of 4).
*/
sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
if ((sc->sc_rxptr % 4) == 0) {
GEM_CDSYNC(sc,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
GEM_WRITE_4(sc, GEM_RX_KICK,
(sc->sc_rxptr + GEM_NRXDESC - 4) &
GEM_NRXDESC_MASK);
}
if (m == NULL) {
if (rxstat & GEM_RD_OWN)
break;
continue;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_data += ETHER_ALIGN; /* first byte offset */
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
gem_rxcksum(m, rxstat);
/* Pass it on. */
GEM_UNLOCK(sc);
if_input(ifp, m);
GEM_LOCK(sc);
}
#ifdef GEM_DEBUG
CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
sc->sc_rxptr, GEM_READ_4(sc, GEM_RX_COMPLETION));
#endif
}
static int
gem_add_rxbuf(struct gem_softc *sc, int idx)
{
struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
struct mbuf *m;
bus_dma_segment_t segs[1];
int error, nsegs;
GEM_LOCK_ASSERT(sc, MA_OWNED);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
#ifdef GEM_DEBUG
/* Bzero the packet to check DMA. */
memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
#endif
if (rxs->rxs_mbuf != NULL) {
bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
}
error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev,
"cannot load RS DMA map %d, error = %d\n", idx, error);
m_freem(m);
return (error);
}
/* If nsegs is wrong then the stack is corrupt. */
KASSERT(nsegs == 1,
("%s: too many DMA segments (%d)", __func__, nsegs));
rxs->rxs_mbuf = m;
rxs->rxs_paddr = segs[0].ds_addr;
bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
BUS_DMASYNC_PREREAD);
GEM_INIT_RXDESC(sc, idx);
return (0);
}
static void
gem_eint(struct gem_softc *sc, u_int status)
{
if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
gem_reset_rxdma(sc);
return;
}
device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
if ((status & GEM_INTR_BERR) != 0) {
printf(", PCI bus error 0x%x",
GEM_READ_4(sc, GEM_PCI_ERROR_STATUS));
}
printf("\n");
}
void
gem_intr(void *v)
{
struct gem_softc *sc = v;
uint32_t status, status2;
GEM_LOCK(sc);
status = GEM_READ_4(sc, GEM_STATUS);
#ifdef GEM_DEBUG
CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
device_get_name(sc->sc_dev), __func__,
(status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
/*
* PCS interrupts must be cleared, otherwise no traffic is passed!
*/
if ((status & GEM_INTR_PCS) != 0) {
status2 =
GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS);
if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
device_printf(sc->sc_dev,
"%s: PCS link status changed\n", __func__);
}
if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
status2 = GEM_READ_4(sc, GEM_MAC_CONTROL_STATUS);
if ((status2 & GEM_MAC_PAUSED) != 0)
device_printf(sc->sc_dev,
"%s: PAUSE received (PAUSE time %d slots)\n",
__func__, GEM_MAC_PAUSE_TIME(status2));
if ((status2 & GEM_MAC_PAUSE) != 0)
device_printf(sc->sc_dev,
"%s: transited to PAUSE state\n", __func__);
if ((status2 & GEM_MAC_RESUME) != 0)
device_printf(sc->sc_dev,
"%s: transited to non-PAUSE state\n", __func__);
}
if ((status & GEM_INTR_MIF) != 0)
device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
#endif
if (__predict_false(status &
(GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
gem_eint(sc, status);
if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
gem_rint(sc);
if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
gem_tint(sc);
if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
status2 = GEM_READ_4(sc, GEM_MAC_TX_STATUS);
if ((status2 &
~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
GEM_MAC_TX_PEAK_EXP)) != 0)
device_printf(sc->sc_dev,
"MAC TX fault, status %x\n", status2);
if ((status2 &
(GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
gem_init_locked(sc);
}
}
if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
status2 = GEM_READ_4(sc, GEM_MAC_RX_STATUS);
/*
* At least with GEM_SUN_GEM revisions GEM_MAC_RX_OVERFLOW
* happen often due to a silicon bug so handle them silently.
* Moreover, it's likely that the receiver has hung so we
* reset it.
*/
if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
gem_reset_rxdma(sc);
} else if ((status2 &
~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
device_printf(sc->sc_dev,
"MAC RX fault, status %x\n", status2);
}
GEM_UNLOCK(sc);
}
static int
gem_watchdog(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
GEM_LOCK_ASSERT(sc, MA_OWNED);
#ifdef GEM_DEBUG
CTR4(KTR_GEM,
"%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
__func__, GEM_READ_4(sc, GEM_RX_CONFIG),
GEM_READ_4(sc, GEM_MAC_RX_STATUS),
GEM_READ_4(sc, GEM_MAC_RX_CONFIG));
CTR4(KTR_GEM,
"%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
__func__, GEM_READ_4(sc, GEM_TX_CONFIG),
GEM_READ_4(sc, GEM_MAC_TX_STATUS),
GEM_READ_4(sc, GEM_MAC_TX_CONFIG));
#endif
if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
return (0);
if ((sc->sc_flags & GEM_LINK) != 0)
device_printf(sc->sc_dev, "device timeout\n");
else if (bootverbose)
device_printf(sc->sc_dev, "device timeout (no link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
/* Try to get more packets going. */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
gem_init_locked(sc);
gem_start_locked(ifp);
return (EJUSTRETURN);
}
static void
gem_mifinit(struct gem_softc *sc)
{
/* Configure the MIF in frame mode. */
GEM_WRITE_4(sc, GEM_MIF_CONFIG,
GEM_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/*
* MII interface
*
* The MII interface supports at least three different operating modes:
*
* Bitbang mode is implemented using data, clock and output enable registers.
*
* Frame mode is implemented by loading a complete frame into the frame
* register and polling the valid bit for completion.
*
* Polling mode uses the frame register but completion is indicated by
* an interrupt.
*
*/
int
gem_mii_readreg(device_t dev, int phy, int reg)
{
struct gem_softc *sc;
int n;
uint32_t v;
#ifdef GEM_DEBUG_PHY
printf("%s: phy %d reg %d\n", __func__, phy, reg);
#endif
sc = device_get_softc(dev);
if ((sc->sc_flags & GEM_SERDES) != 0) {
switch (reg) {
case MII_BMCR:
reg = GEM_MII_CONTROL;
break;
case MII_BMSR:
reg = GEM_MII_STATUS;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
case MII_ANAR:
reg = GEM_MII_ANAR;
break;
case MII_ANLPAR:
reg = GEM_MII_ANLPAR;
break;
case MII_EXTSR:
return (EXTSR_1000XFDX | EXTSR_1000XHDX);
default:
device_printf(sc->sc_dev,
"%s: unhandled register %d\n", __func__, reg);
return (0);
}
return (GEM_READ_4(sc, reg));
}
/* Construct the frame command. */
v = GEM_MIF_FRAME_READ |
(phy << GEM_MIF_PHY_SHIFT) |
(reg << GEM_MIF_REG_SHIFT);
GEM_WRITE_4(sc, GEM_MIF_FRAME, v);
GEM_BARRIER(sc, GEM_MIF_FRAME, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
for (n = 0; n < 100; n++) {
DELAY(1);
v = GEM_READ_4(sc, GEM_MIF_FRAME);
if (v & GEM_MIF_FRAME_TA0)
return (v & GEM_MIF_FRAME_DATA);
}
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (0);
}
int
gem_mii_writereg(device_t dev, int phy, int reg, int val)
{
struct gem_softc *sc;
int n;
uint32_t v;
#ifdef GEM_DEBUG_PHY
printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
#endif
sc = device_get_softc(dev);
if ((sc->sc_flags & GEM_SERDES) != 0) {
switch (reg) {
case MII_BMSR:
reg = GEM_MII_STATUS;
break;
case MII_BMCR:
reg = GEM_MII_CONTROL;
if ((val & GEM_MII_CONTROL_RESET) == 0)
break;
GEM_WRITE_4(sc, GEM_MII_CONTROL, val);
GEM_BARRIER(sc, GEM_MII_CONTROL, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_MII_CONTROL,
GEM_MII_CONTROL_RESET, 0))
device_printf(sc->sc_dev,
"cannot reset PCS\n");
/* FALLTHROUGH */
case MII_ANAR:
GEM_WRITE_4(sc, GEM_MII_CONFIG, 0);
GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
BUS_SPACE_BARRIER_WRITE);
GEM_WRITE_4(sc, GEM_MII_ANAR, val);
GEM_BARRIER(sc, GEM_MII_ANAR, 4,
BUS_SPACE_BARRIER_WRITE);
GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
BUS_SPACE_BARRIER_WRITE);
GEM_WRITE_4(sc, GEM_MII_CONFIG,
GEM_MII_CONFIG_ENABLE);
GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
BUS_SPACE_BARRIER_WRITE);
return (0);
case MII_ANLPAR:
reg = GEM_MII_ANLPAR;
break;
default:
device_printf(sc->sc_dev,
"%s: unhandled register %d\n", __func__, reg);
return (0);
}
GEM_WRITE_4(sc, reg, val);
GEM_BARRIER(sc, reg, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (0);
}
/* Construct the frame command. */
v = GEM_MIF_FRAME_WRITE |
(phy << GEM_MIF_PHY_SHIFT) |
(reg << GEM_MIF_REG_SHIFT) |
(val & GEM_MIF_FRAME_DATA);
GEM_WRITE_4(sc, GEM_MIF_FRAME, v);
GEM_BARRIER(sc, GEM_MIF_FRAME, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
for (n = 0; n < 100; n++) {
DELAY(1);
v = GEM_READ_4(sc, GEM_MIF_FRAME);
if (v & GEM_MIF_FRAME_TA0)
return (1);
}
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (0);
}
void
gem_mii_statchg(device_t dev)
{
struct gem_softc *sc;
int gigabit;
uint32_t rxcfg, txcfg, v;
sc = device_get_softc(dev);
GEM_LOCK_ASSERT(sc, MA_OWNED);
#ifdef GEM_DEBUG
if ((sc->sc_if_getflags(ifp) & IFF_DEBUG) != 0)
device_printf(sc->sc_dev, "%s: status change\n", __func__);
#endif
if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
sc->sc_flags |= GEM_LINK;
else
sc->sc_flags &= ~GEM_LINK;
switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
case IFM_1000_T:
gigabit = 1;
break;
default:
gigabit = 0;
}
/*
* The configuration done here corresponds to the steps F) and
* G) and as far as enabling of RX and TX MAC goes also step H)
* of the initialization sequence outlined in section 3.2.1 of
* the GEM Gigabit Ethernet ASIC Specification.
*/
rxcfg = sc->sc_mac_rxcfg;
rxcfg &= ~GEM_MAC_RX_CARR_EXTEND;
txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
else if (gigabit != 0) {
rxcfg |= GEM_MAC_RX_CARR_EXTEND;
txcfg |= GEM_MAC_TX_CARR_EXTEND;
}
(void)gem_disable_tx(sc);
GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
(void)gem_disable_rx(sc);
GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
v = GEM_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
v |= GEM_MAC_CC_RX_PAUSE;
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
IFM_ETH_TXPAUSE) != 0)
v |= GEM_MAC_CC_TX_PAUSE;
GEM_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
gigabit != 0)
GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME,
GEM_MAC_SLOT_TIME_CARR_EXTEND);
else
GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME,
GEM_MAC_SLOT_TIME_NORMAL);
/* XIF Configuration */
v = GEM_MAC_XIF_LINK_LED;
v |= GEM_MAC_XIF_TX_MII_ENA;
if ((sc->sc_flags & GEM_SERDES) == 0) {
if ((GEM_READ_4(sc, GEM_MIF_CONFIG) &
GEM_MIF_CONFIG_PHY_SEL) != 0) {
/* External MII needs echo disable if half duplex. */
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
IFM_FDX) == 0)
v |= GEM_MAC_XIF_ECHO_DISABL;
} else
/*
* Internal MII needs buffer enable.
* XXX buffer enable makes only sense for an
* external PHY.
*/
v |= GEM_MAC_XIF_MII_BUF_ENA;
}
if (gigabit != 0)
v |= GEM_MAC_XIF_GMII_MODE;
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
v |= GEM_MAC_XIF_FDPLX_LED;
GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
sc->sc_mac_rxcfg = rxcfg;
if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0 &&
(sc->sc_flags & GEM_LINK) != 0) {
GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG,
txcfg | GEM_MAC_TX_ENABLE);
GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG,
rxcfg | GEM_MAC_RX_ENABLE);
}
}
int
gem_mediachange(if_t ifp)
{
struct gem_softc *sc = if_getsoftc(ifp);
int error;
/* XXX add support for serial media. */
GEM_LOCK(sc);
error = mii_mediachg(sc->sc_mii);
GEM_UNLOCK(sc);
return (error);
}
void
gem_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct gem_softc *sc = if_getsoftc(ifp);
GEM_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
GEM_UNLOCK(sc);
return;
}
mii_pollstat(sc->sc_mii);
ifmr->ifm_active = sc->sc_mii->mii_media_active;
ifmr->ifm_status = sc->sc_mii->mii_media_status;
GEM_UNLOCK(sc);
}
static int
gem_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct gem_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int error;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
GEM_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->sc_ifflags) &
(IFF_ALLMULTI | IFF_PROMISC)) != 0)
gem_setladrf(sc);
else
gem_init_locked(sc);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
gem_stop(ifp, 0);
if ((if_getflags(ifp) & IFF_LINK0) != 0)
sc->sc_csum_features |= CSUM_UDP;
else
sc->sc_csum_features &= ~CSUM_UDP;
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassist(ifp, sc->sc_csum_features);
sc->sc_ifflags = if_getflags(ifp);
GEM_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
GEM_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
gem_setladrf(sc);
GEM_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
break;
case SIOCSIFCAP:
GEM_LOCK(sc);
if_setcapenable(ifp, ifr->ifr_reqcap);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassist(ifp, sc->sc_csum_features);
else
if_sethwassist(ifp, 0);
GEM_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static u_int
gem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *hash = arg;
crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
/* We just want the 8 most significant bits. */
crc >>= 24;
/* Set the corresponding bit in the filter. */
hash[crc >> 4] |= 1 << (15 - (crc & 15));
return (1);
}
static void
gem_setladrf(struct gem_softc *sc)
{
if_t ifp = sc->sc_ifp;
int i;
uint32_t hash[16];
uint32_t v;
GEM_LOCK_ASSERT(sc, MA_OWNED);
/*
* Turn off the RX MAC and the hash filter as required by the Sun GEM
* programming restrictions.
*/
v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER;
GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER |
GEM_MAC_RX_ENABLE, 0))
device_printf(sc->sc_dev,
"cannot disable RX MAC or hash filter\n");
v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP);
if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
v |= GEM_MAC_RX_PROMISCUOUS;
goto chipit;
}
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
v |= GEM_MAC_RX_PROMISC_GRP;
goto chipit;
}
/*
* Set up multicast address filter by passing all multicast
* addresses through a crc generator, and then using the high
* order 8 bits as an index into the 256 bit logical address
* filter. The high order 4 bits selects the word, while the
* other 4 bits select the bit within the word (where bit 0
* is the MSB).
*/
memset(hash, 0, sizeof(hash));
if_foreach_llmaddr(ifp, gem_hash_maddr, hash);
v |= GEM_MAC_RX_HASH_FILTER;
/* Now load the hash table into the chip (if we are using it). */
for (i = 0; i < 16; i++)
GEM_WRITE_4(sc,
GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
hash[i]);
chipit:
sc->sc_mac_rxcfg = v;
GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE);
}
diff --git a/sys/dev/gve/gve_main.c b/sys/dev/gve/gve_main.c
index cd7849778bce..f8a37b9f37a9 100644
--- a/sys/dev/gve/gve_main.c
+++ b/sys/dev/gve/gve_main.c
@@ -1,872 +1,863 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2023 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "gve.h"
#include "gve_adminq.h"
#define GVE_DRIVER_VERSION "GVE-FBSD-1.0.1\n"
#define GVE_VERSION_MAJOR 1
#define GVE_VERSION_MINOR 0
#define GVE_VERSION_SUB 1
#define GVE_DEFAULT_RX_COPYBREAK 256
/* Devices supported by this driver. */
static struct gve_dev {
uint16_t vendor_id;
uint16_t device_id;
const char *name;
} gve_devs[] = {
{ PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC, "gVNIC" }
};
struct sx gve_global_lock;
static int
gve_verify_driver_compatibility(struct gve_priv *priv)
{
int err;
struct gve_driver_info *driver_info;
struct gve_dma_handle driver_info_mem;
err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info),
PAGE_SIZE, &driver_info_mem);
if (err != 0)
return (ENOMEM);
driver_info = driver_info_mem.cpu_addr;
*driver_info = (struct gve_driver_info) {
.os_type = 3, /* Freebsd */
.driver_major = GVE_VERSION_MAJOR,
.driver_minor = GVE_VERSION_MINOR,
.driver_sub = GVE_VERSION_SUB,
.os_version_major = htobe32(FBSD_VERSION_MAJOR),
.os_version_minor = htobe32(FBSD_VERSION_MINOR),
.os_version_sub = htobe32(FBSD_VERSION_PATCH),
.driver_capability_flags = {
htobe64(GVE_DRIVER_CAPABILITY_FLAGS1),
htobe64(GVE_DRIVER_CAPABILITY_FLAGS2),
htobe64(GVE_DRIVER_CAPABILITY_FLAGS3),
htobe64(GVE_DRIVER_CAPABILITY_FLAGS4),
},
};
snprintf(driver_info->os_version_str1, sizeof(driver_info->os_version_str1),
"FreeBSD %u", __FreeBSD_version);
bus_dmamap_sync(driver_info_mem.tag, driver_info_mem.map,
BUS_DMASYNC_PREREAD);
err = gve_adminq_verify_driver_compatibility(priv,
sizeof(struct gve_driver_info), driver_info_mem.bus_addr);
/* It's ok if the device doesn't support this */
if (err == EOPNOTSUPP)
err = 0;
gve_dma_free_coherent(&driver_info_mem);
return (err);
}
static int
gve_up(struct gve_priv *priv)
{
if_t ifp = priv->ifp;
int err;
GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
if (device_is_attached(priv->dev) == 0) {
device_printf(priv->dev, "Cannot bring the iface up when detached\n");
return (ENXIO);
}
if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
return (0);
if_clearhwassist(ifp);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, CSUM_IP6_TCP | CSUM_IP6_UDP, 0);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TSO6)
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
err = gve_register_qpls(priv);
if (err != 0)
goto reset;
err = gve_create_rx_rings(priv);
if (err != 0)
goto reset;
err = gve_create_tx_rings(priv);
if (err != 0)
goto reset;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
if_link_state_change(ifp, LINK_STATE_UP);
gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
}
gve_unmask_all_queue_irqs(priv);
gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
priv->interface_up_cnt++;
return (0);
reset:
gve_schedule_reset(priv);
return (err);
}
static void
gve_down(struct gve_priv *priv)
{
GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
return;
if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
}
if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
if (gve_destroy_rx_rings(priv) != 0)
goto reset;
if (gve_destroy_tx_rings(priv) != 0)
goto reset;
if (gve_unregister_qpls(priv) != 0)
goto reset;
gve_mask_all_queue_irqs(priv);
gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
priv->interface_down_cnt++;
return;
reset:
gve_schedule_reset(priv);
}
static int
gve_set_mtu(if_t ifp, uint32_t new_mtu)
{
struct gve_priv *priv = if_getsoftc(ifp);
int err;
if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) {
device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n",
new_mtu, priv->max_mtu, ETHERMIN);
return (EINVAL);
}
err = gve_adminq_set_mtu(priv, new_mtu);
if (err == 0) {
if (bootverbose)
device_printf(priv->dev, "MTU set to %d\n", new_mtu);
if_setmtu(ifp, new_mtu);
} else {
device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu);
}
return (err);
}
static void
gve_init(void *arg)
{
struct gve_priv *priv = (struct gve_priv *)arg;
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) {
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
gve_up(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
}
}
static int
gve_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct gve_priv *priv;
struct ifreq *ifr;
int rc = 0;
priv = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFMTU:
if (if_getmtu(ifp) == ifr->ifr_mtu)
break;
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
gve_down(priv);
gve_set_mtu(ifp, ifr->ifr_mtu);
rc = gve_up(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
break;
case SIOCSIFFLAGS:
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
rc = gve_up(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
gve_down(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
}
}
break;
case SIOCSIFCAP:
if (ifr->ifr_reqcap == if_getcapenable(ifp))
break;
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
gve_down(priv);
if_setcapenable(ifp, ifr->ifr_reqcap);
rc = gve_up(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
break;
case SIOCSIFMEDIA:
/* FALLTHROUGH */
case SIOCGIFMEDIA:
rc = ifmedia_ioctl(ifp, ifr, &priv->media, command);
break;
default:
rc = ether_ioctl(ifp, command, data);
break;
}
return (rc);
}
static int
gve_media_change(if_t ifp)
{
struct gve_priv *priv = if_getsoftc(ifp);
device_printf(priv->dev, "Media change not supported\n");
return (0);
}
static void
gve_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct gve_priv *priv = if_getsoftc(ifp);
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= IFM_AUTO;
} else {
ifmr->ifm_active |= IFM_NONE;
}
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
}
static uint64_t
gve_get_counter(if_t ifp, ift_counter cnt)
{
struct gve_priv *priv;
uint64_t rpackets = 0;
uint64_t tpackets = 0;
uint64_t rbytes = 0;
uint64_t tbytes = 0;
uint64_t rx_dropped_pkt = 0;
uint64_t tx_dropped_pkt = 0;
priv = if_getsoftc(ifp);
gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets,
&tbytes, &tx_dropped_pkt);
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (rpackets);
case IFCOUNTER_OPACKETS:
return (tpackets);
case IFCOUNTER_IBYTES:
return (rbytes);
case IFCOUNTER_OBYTES:
return (tbytes);
case IFCOUNTER_IQDROPS:
return (rx_dropped_pkt);
case IFCOUNTER_OQDROPS:
return (tx_dropped_pkt);
default:
return (if_get_counter_default(ifp, cnt));
}
}
-static int
+static void
gve_setup_ifnet(device_t dev, struct gve_priv *priv)
{
int caps = 0;
if_t ifp;
ifp = priv->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(priv->dev, "Failed to allocate ifnet struct\n");
- return (ENXIO);
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setsoftc(ifp, priv);
if_setdev(ifp, dev);
if_setinitfn(ifp, gve_init);
if_setioctlfn(ifp, gve_ioctl);
if_settransmitfn(ifp, gve_xmit_ifp);
if_setqflushfn(ifp, gve_qflush);
#if __FreeBSD_version >= 1400086
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
#else
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH);
#endif
ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status);
if_setgetcounterfn(ifp, gve_get_counter);
caps = IFCAP_RXCSUM |
IFCAP_TXCSUM |
IFCAP_TXCSUM_IPV6 |
IFCAP_TSO |
IFCAP_LRO;
if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0)
caps |= IFCAP_JUMBO_MTU;
if_setcapabilities(ifp, caps);
if_setcapenable(ifp, caps);
if (bootverbose)
device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu);
if_setmtu(ifp, priv->max_mtu);
ether_ifattach(ifp, priv->mac);
ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
-
- return (0);
}
static int
gve_alloc_counter_array(struct gve_priv *priv)
{
int err;
err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters,
PAGE_SIZE, &priv->counter_array_mem);
if (err != 0)
return (err);
priv->counters = priv->counter_array_mem.cpu_addr;
return (0);
}
static void
gve_free_counter_array(struct gve_priv *priv)
{
if (priv->counters != NULL)
gve_dma_free_coherent(&priv->counter_array_mem);
priv->counter_array_mem = (struct gve_dma_handle){};
}
static int
gve_alloc_irq_db_array(struct gve_priv *priv)
{
int err;
err = gve_dma_alloc_coherent(priv,
sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE,
&priv->irqs_db_mem);
if (err != 0)
return (err);
priv->irq_db_indices = priv->irqs_db_mem.cpu_addr;
return (0);
}
static void
gve_free_irq_db_array(struct gve_priv *priv)
{
if (priv->irq_db_indices != NULL)
gve_dma_free_coherent(&priv->irqs_db_mem);
priv->irqs_db_mem = (struct gve_dma_handle){};
}
static void
gve_free_rings(struct gve_priv *priv)
{
gve_free_irqs(priv);
gve_free_tx_rings(priv);
gve_free_rx_rings(priv);
gve_free_qpls(priv);
}
static int
gve_alloc_rings(struct gve_priv *priv)
{
int err;
err = gve_alloc_qpls(priv);
if (err != 0)
goto abort;
err = gve_alloc_rx_rings(priv);
if (err != 0)
goto abort;
err = gve_alloc_tx_rings(priv);
if (err != 0)
goto abort;
err = gve_alloc_irqs(priv);
if (err != 0)
goto abort;
return (0);
abort:
gve_free_rings(priv);
return (err);
}
static void
gve_deconfigure_resources(struct gve_priv *priv)
{
int err;
if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) {
err = gve_adminq_deconfigure_device_resources(priv);
if (err != 0) {
device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n",
err);
return;
}
if (bootverbose)
device_printf(priv->dev, "Deconfigured device resources\n");
gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
}
gve_free_irq_db_array(priv);
gve_free_counter_array(priv);
}
static int
gve_configure_resources(struct gve_priv *priv)
{
int err;
if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK))
return (0);
err = gve_alloc_counter_array(priv);
if (err != 0)
return (err);
err = gve_alloc_irq_db_array(priv);
if (err != 0)
goto abort;
err = gve_adminq_configure_device_resources(priv);
if (err != 0) {
device_printf(priv->dev, "Failed to configure device resources: err=%d\n",
err);
err = (ENXIO);
goto abort;
}
gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
if (bootverbose)
device_printf(priv->dev, "Configured device resources\n");
return (0);
abort:
gve_deconfigure_resources(priv);
return (err);
}
static void
gve_set_queue_cnts(struct gve_priv *priv)
{
priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES);
priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES);
priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
if (priv->default_num_queues > 0) {
priv->tx_cfg.num_queues = MIN(priv->default_num_queues,
priv->tx_cfg.num_queues);
priv->rx_cfg.num_queues = MIN(priv->default_num_queues,
priv->rx_cfg.num_queues);
}
priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues;
priv->mgmt_msix_idx = priv->num_queues;
}
static int
gve_alloc_adminq_and_describe_device(struct gve_priv *priv)
{
int err;
if ((err = gve_adminq_alloc(priv)) != 0)
return (err);
if ((err = gve_verify_driver_compatibility(priv)) != 0) {
device_printf(priv->dev,
"Failed to verify driver compatibility: err=%d\n", err);
goto abort;
}
if ((err = gve_adminq_describe_device(priv)) != 0)
goto abort;
gve_set_queue_cnts(priv);
priv->num_registered_pages = 0;
return (0);
abort:
gve_release_adminq(priv);
return (err);
}
void
gve_schedule_reset(struct gve_priv *priv)
{
if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET))
return;
device_printf(priv->dev, "Scheduling reset task!\n");
gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
taskqueue_enqueue(priv->service_tq, &priv->service_task);
}
static void
gve_destroy(struct gve_priv *priv)
{
gve_down(priv);
gve_deconfigure_resources(priv);
gve_release_adminq(priv);
}
static void
gve_restore(struct gve_priv *priv)
{
int err;
err = gve_adminq_alloc(priv);
if (err != 0)
goto abort;
err = gve_configure_resources(priv);
if (err != 0)
goto abort;
err = gve_up(priv);
if (err != 0)
goto abort;
return;
abort:
device_printf(priv->dev, "Restore failed!\n");
return;
}
static void
gve_handle_reset(struct gve_priv *priv)
{
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET))
return;
gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
/*
* Releasing the adminq causes the NIC to destroy all resources
* registered with it, so by clearing the flags beneath we cause
* the subsequent gve_down call below to not attempt to tell the
* NIC to destroy these resources again.
*
* The call to gve_down is needed in the first place to refresh
* the state and the DMA-able memory within each driver ring.
*/
gve_release_adminq(priv);
gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK);
gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK);
gve_down(priv);
gve_restore(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
priv->reset_cnt++;
gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
}
static void
gve_handle_link_status(struct gve_priv *priv)
{
uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
bool link_up = status & GVE_DEVICE_STATUS_LINK_STATUS;
if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP))
return;
if (link_up) {
if (bootverbose)
device_printf(priv->dev, "Device link is up.\n");
if_link_state_change(priv->ifp, LINK_STATE_UP);
gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
} else {
device_printf(priv->dev, "Device link is down.\n");
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
}
}
static void
gve_service_task(void *arg, int pending)
{
struct gve_priv *priv = (struct gve_priv *)arg;
uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
if (((GVE_DEVICE_STATUS_RESET_MASK & status) != 0) &&
!gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) {
device_printf(priv->dev, "Device requested reset\n");
gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
}
gve_handle_reset(priv);
gve_handle_link_status(priv);
}
static int
gve_probe(device_t dev)
{
uint16_t deviceid, vendorid;
int i;
vendorid = pci_get_vendor(dev);
deviceid = pci_get_device(dev);
for (i = 0; i < nitems(gve_devs); i++) {
if (vendorid == gve_devs[i].vendor_id &&
deviceid == gve_devs[i].device_id) {
device_set_desc(dev, gve_devs[i].name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static void
gve_free_sys_res_mem(struct gve_priv *priv)
{
if (priv->msix_table != NULL)
bus_release_resource(priv->dev, SYS_RES_MEMORY,
rman_get_rid(priv->msix_table), priv->msix_table);
if (priv->db_bar != NULL)
bus_release_resource(priv->dev, SYS_RES_MEMORY,
rman_get_rid(priv->db_bar), priv->db_bar);
if (priv->reg_bar != NULL)
bus_release_resource(priv->dev, SYS_RES_MEMORY,
rman_get_rid(priv->reg_bar), priv->reg_bar);
}
static int
gve_attach(device_t dev)
{
struct gve_priv *priv;
int rid;
int err;
priv = device_get_softc(dev);
priv->dev = dev;
GVE_IFACE_LOCK_INIT(priv->gve_iface_lock);
pci_enable_busmaster(dev);
rid = PCIR_BAR(GVE_REGISTER_BAR);
priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (priv->reg_bar == NULL) {
device_printf(dev, "Failed to allocate BAR0\n");
err = ENXIO;
goto abort;
}
rid = PCIR_BAR(GVE_DOORBELL_BAR);
priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (priv->db_bar == NULL) {
device_printf(dev, "Failed to allocate BAR2\n");
err = ENXIO;
goto abort;
}
rid = pci_msix_table_bar(priv->dev);
priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (priv->msix_table == NULL) {
device_printf(dev, "Failed to allocate msix table\n");
err = ENXIO;
goto abort;
}
err = gve_alloc_adminq_and_describe_device(priv);
if (err != 0)
goto abort;
err = gve_configure_resources(priv);
if (err != 0)
goto abort;
err = gve_alloc_rings(priv);
if (err != 0)
goto abort;
- err = gve_setup_ifnet(dev, priv);
- if (err != 0)
- goto abort;
+ gve_setup_ifnet(dev, priv);
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION,
sizeof(GVE_DRIVER_VERSION) - 1);
TASK_INIT(&priv->service_task, 0, gve_service_task, priv);
priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO,
taskqueue_thread_enqueue, &priv->service_tq);
taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq",
device_get_nameunit(priv->dev));
gve_setup_sysctl(priv);
if (bootverbose)
device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION);
return (0);
abort:
gve_free_rings(priv);
gve_deconfigure_resources(priv);
gve_release_adminq(priv);
gve_free_sys_res_mem(priv);
GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
return (err);
}
static int
gve_detach(device_t dev)
{
struct gve_priv *priv = device_get_softc(dev);
if_t ifp = priv->ifp;
ether_ifdetach(ifp);
GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
gve_destroy(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
gve_free_rings(priv);
gve_free_sys_res_mem(priv);
GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL))
taskqueue_drain(priv->service_tq, &priv->service_task);
taskqueue_free(priv->service_tq);
if_free(ifp);
return (bus_generic_detach(dev));
}
static device_method_t gve_methods[] = {
DEVMETHOD(device_probe, gve_probe),
DEVMETHOD(device_attach, gve_attach),
DEVMETHOD(device_detach, gve_detach),
DEVMETHOD_END
};
static driver_t gve_driver = {
"gve",
gve_methods,
sizeof(struct gve_priv)
};
#if __FreeBSD_version < 1301503
static devclass_t gve_devclass;
DRIVER_MODULE(gve, pci, gve_driver, gve_devclass, 0, 0);
#else
DRIVER_MODULE(gve, pci, gve_driver, 0, 0);
#endif
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, gve, gve_devs,
nitems(gve_devs));
diff --git a/sys/dev/iicbus/if_ic.c b/sys/dev/iicbus/if_ic.c
index 52ab5afb9c4e..caca35a3d22b 100644
--- a/sys/dev/iicbus/if_ic.c
+++ b/sys/dev/iicbus/if_ic.c
@@ -1,435 +1,433 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1998, 2001 Nicolas Souchu
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* I2C bus IP driver
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/filio.h>
#include <sys/sockio.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/bus.h>
#include <sys/time.h>
#include <sys/malloc.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/netisr.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <net/bpf.h>
#include <dev/iicbus/iiconf.h>
#include <dev/iicbus/iicbus.h>
#include "iicbus_if.h"
#define PCF_MASTER_ADDRESS 0xaa
#define ICHDRLEN sizeof(u_int32_t)
#define ICMTU 1500 /* default mtu */
struct ic_softc {
if_t ic_ifp;
device_t ic_dev;
u_char ic_addr; /* peer I2C address */
int ic_flags;
char *ic_obuf;
char *ic_ifbuf;
char *ic_cp;
int ic_xfercnt;
int ic_iferrs;
struct mtx ic_lock;
};
#define IC_SENDING 0x0001
#define IC_OBUF_BUSY 0x0002
#define IC_IFBUF_BUSY 0x0004
#define IC_BUFFERS_BUSY (IC_OBUF_BUSY | IC_IFBUF_BUSY)
#define IC_BUFFER_WAITER 0x0004
static int icprobe(device_t);
static int icattach(device_t);
static int icioctl(if_t, u_long, caddr_t);
static int icoutput(if_t, struct mbuf *, const struct sockaddr *,
struct route *);
static int icintr(device_t, int, char *);
static device_method_t ic_methods[] = {
/* device interface */
DEVMETHOD(device_probe, icprobe),
DEVMETHOD(device_attach, icattach),
/* iicbus interface */
DEVMETHOD(iicbus_intr, icintr),
{ 0, 0 }
};
static driver_t ic_driver = {
"ic",
ic_methods,
sizeof(struct ic_softc),
};
static void
ic_alloc_buffers(struct ic_softc *sc, int mtu)
{
char *obuf, *ifbuf;
obuf = malloc(mtu + ICHDRLEN, M_DEVBUF, M_WAITOK);
ifbuf = malloc(mtu + ICHDRLEN, M_DEVBUF, M_WAITOK);
mtx_lock(&sc->ic_lock);
while (sc->ic_flags & IC_BUFFERS_BUSY) {
sc->ic_flags |= IC_BUFFER_WAITER;
mtx_sleep(sc, &sc->ic_lock, 0, "icalloc", 0);
sc->ic_flags &= ~IC_BUFFER_WAITER;
}
free(sc->ic_obuf, M_DEVBUF);
free(sc->ic_ifbuf, M_DEVBUF);
sc->ic_obuf = obuf;
sc->ic_ifbuf = ifbuf;
if_setmtu(sc->ic_ifp, mtu);
mtx_unlock(&sc->ic_lock);
}
/*
* icprobe()
*/
static int
icprobe(device_t dev)
{
return (BUS_PROBE_NOWILDCARD);
}
/*
* icattach()
*/
static int
icattach(device_t dev)
{
struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev);
if_t ifp;
ifp = sc->ic_ifp = if_alloc(IFT_PARA);
- if (ifp == NULL)
- return (ENOSPC);
mtx_init(&sc->ic_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
sc->ic_addr = PCF_MASTER_ADDRESS; /* XXX only PCF masters */
sc->ic_dev = dev;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_SIMPLEX | IFF_POINTOPOINT | IFF_MULTICAST);
if_setioctlfn(ifp, icioctl);
if_setoutputfn(ifp, icoutput);
if_setifheaderlen(ifp, 0);
if_setsendqlen(ifp, ifqmaxlen);
ic_alloc_buffers(sc, ICMTU);
if_attach(ifp);
bpfattach(ifp, DLT_NULL, ICHDRLEN);
return (0);
}
/*
* iciotcl()
*/
static int
icioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct ic_softc *sc = if_getsoftc(ifp);
device_t icdev = sc->ic_dev;
device_t parent = device_get_parent(icdev);
struct ifaddr *ifa = (struct ifaddr *)data;
struct ifreq *ifr = (struct ifreq *)data;
int error;
switch (cmd) {
case SIOCAIFADDR:
case SIOCSIFADDR:
if (ifa->ifa_addr->sa_family != AF_INET)
return (EAFNOSUPPORT);
mtx_lock(&sc->ic_lock);
if_setflagbits(ifp, IFF_UP, 0);
goto locked;
case SIOCSIFFLAGS:
mtx_lock(&sc->ic_lock);
locked:
if ((!(if_getflags(ifp) & IFF_UP)) &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
/* XXX disable PCF */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
mtx_unlock(&sc->ic_lock);
/* IFF_UP is not set, try to release the bus anyway */
iicbus_release_bus(parent, icdev);
break;
}
if (((if_getflags(ifp) & IFF_UP)) &&
(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
mtx_unlock(&sc->ic_lock);
if ((error = iicbus_request_bus(parent, icdev,
IIC_WAIT | IIC_INTR)))
return (error);
mtx_lock(&sc->ic_lock);
iicbus_reset(parent, IIC_FASTEST, 0, NULL);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
}
mtx_unlock(&sc->ic_lock);
break;
case SIOCSIFMTU:
ic_alloc_buffers(sc, ifr->ifr_mtu);
break;
case SIOCGIFMTU:
mtx_lock(&sc->ic_lock);
ifr->ifr_mtu = if_getmtu(sc->ic_ifp);
mtx_unlock(&sc->ic_lock);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifr == NULL)
return (EAFNOSUPPORT); /* XXX */
switch (ifr->ifr_addr.sa_family) {
case AF_INET:
break;
default:
return (EAFNOSUPPORT);
}
break;
default:
return (EINVAL);
}
return (0);
}
/*
* icintr()
*/
static int
icintr(device_t dev, int event, char *ptr)
{
struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev);
struct mbuf *top;
int len;
mtx_lock(&sc->ic_lock);
switch (event) {
case INTR_GENERAL:
case INTR_START:
sc->ic_cp = sc->ic_ifbuf;
sc->ic_xfercnt = 0;
sc->ic_flags |= IC_IFBUF_BUSY;
break;
case INTR_STOP:
/* if any error occurred during transfert,
* drop the packet */
sc->ic_flags &= ~IC_IFBUF_BUSY;
if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) ==
IC_BUFFER_WAITER)
wakeup(&sc);
if (sc->ic_iferrs)
goto err;
if ((len = sc->ic_xfercnt) == 0)
break; /* ignore */
if (len <= ICHDRLEN)
goto err;
len -= ICHDRLEN;
if_inc_counter(sc->ic_ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(sc->ic_ifp, IFCOUNTER_IBYTES, len);
BPF_TAP(sc->ic_ifp, sc->ic_ifbuf, len + ICHDRLEN);
top = m_devget(sc->ic_ifbuf + ICHDRLEN, len, 0, sc->ic_ifp, 0);
if (top) {
struct epoch_tracker et;
mtx_unlock(&sc->ic_lock);
M_SETFIB(top, if_getfib(sc->ic_ifp));
NET_EPOCH_ENTER(et);
netisr_dispatch(NETISR_IP, top);
NET_EPOCH_EXIT(et);
mtx_lock(&sc->ic_lock);
}
break;
err:
if_printf(sc->ic_ifp, "errors (%d)!\n", sc->ic_iferrs);
sc->ic_iferrs = 0; /* reset error count */
if_inc_counter(sc->ic_ifp, IFCOUNTER_IERRORS, 1);
break;
case INTR_RECEIVE:
if (sc->ic_xfercnt >= if_getmtu(sc->ic_ifp) + ICHDRLEN) {
sc->ic_iferrs++;
} else {
*sc->ic_cp++ = *ptr;
sc->ic_xfercnt++;
}
break;
case INTR_NOACK: /* xfer terminated by master */
break;
case INTR_TRANSMIT:
*ptr = 0xff; /* XXX */
break;
case INTR_ERROR:
sc->ic_iferrs++;
break;
default:
panic("%s: unknown event (%d)!", __func__, event);
}
mtx_unlock(&sc->ic_lock);
return (0);
}
/*
* icoutput()
*/
static int
icoutput(if_t ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
struct ic_softc *sc = if_getsoftc(ifp);
device_t icdev = sc->ic_dev;
device_t parent = device_get_parent(icdev);
int len, sent;
struct mbuf *mm;
u_char *cp;
u_int32_t hdr;
/* BPF writes need to be handled specially. */
if (dst->sa_family == AF_UNSPEC || dst->sa_family == pseudo_AF_HDRCMPLT)
bcopy(dst->sa_data, &hdr, sizeof(hdr));
else
hdr = RO_GET_FAMILY(ro, dst);
mtx_lock(&sc->ic_lock);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
/* already sending? */
if (sc->ic_flags & IC_SENDING) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
goto error;
}
/* insert header */
bcopy ((char *)&hdr, sc->ic_obuf, ICHDRLEN);
cp = sc->ic_obuf + ICHDRLEN;
len = 0;
mm = m;
do {
if (len + mm->m_len > if_getmtu(sc->ic_ifp)) {
/* packet too large */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
goto error;
}
bcopy(mtod(mm,char *), cp, mm->m_len);
cp += mm->m_len;
len += mm->m_len;
} while ((mm = mm->m_next));
BPF_MTAP2(ifp, &hdr, sizeof(hdr), m);
sc->ic_flags |= (IC_SENDING | IC_OBUF_BUSY);
m_freem(m);
mtx_unlock(&sc->ic_lock);
/* send the packet */
if (iicbus_block_write(parent, sc->ic_addr, sc->ic_obuf,
len + ICHDRLEN, &sent))
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
}
mtx_lock(&sc->ic_lock);
sc->ic_flags &= ~(IC_SENDING | IC_OBUF_BUSY);
if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) ==
IC_BUFFER_WAITER)
wakeup(&sc);
mtx_unlock(&sc->ic_lock);
return (0);
error:
m_freem(m);
mtx_unlock(&sc->ic_lock);
return(0);
}
DRIVER_MODULE(ic, iicbus, ic_driver, 0, 0);
MODULE_DEPEND(ic, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
MODULE_VERSION(ic, 1);
diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c
index aebeb9c617fa..96824e2d7f27 100644
--- a/sys/dev/jme/if_jme.c
+++ b/sys/dev/jme/if_jme.c
@@ -1,3452 +1,3446 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <dev/jme/if_jmereg.h>
#include <dev/jme/if_jmevar.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/* Define the following to disable printing Rx errors. */
#undef JME_SHOW_ERRORS
#define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
MODULE_DEPEND(jme, pci, 1, 1, 1);
MODULE_DEPEND(jme, ether, 1, 1, 1);
MODULE_DEPEND(jme, miibus, 1, 1, 1);
/* Tunables. */
static int msi_disable = 0;
static int msix_disable = 0;
TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
/*
* Devices supported by this driver.
*/
static struct jme_dev {
uint16_t jme_vendorid;
uint16_t jme_deviceid;
const char *jme_name;
} jme_devs[] = {
{ VENDORID_JMICRON, DEVICEID_JMC250,
"JMicron Inc, JMC25x Gigabit Ethernet" },
{ VENDORID_JMICRON, DEVICEID_JMC260,
"JMicron Inc, JMC26x Fast Ethernet" },
};
static int jme_miibus_readreg(device_t, int, int);
static int jme_miibus_writereg(device_t, int, int, int);
static void jme_miibus_statchg(device_t);
static void jme_mediastatus(if_t, struct ifmediareq *);
static int jme_mediachange(if_t);
static int jme_probe(device_t);
static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
static int jme_eeprom_macaddr(struct jme_softc *);
static int jme_efuse_macaddr(struct jme_softc *);
static void jme_reg_macaddr(struct jme_softc *);
static void jme_set_macaddr(struct jme_softc *, uint8_t *);
static void jme_map_intr_vector(struct jme_softc *);
static int jme_attach(device_t);
static int jme_detach(device_t);
static void jme_sysctl_node(struct jme_softc *);
static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int jme_dma_alloc(struct jme_softc *);
static void jme_dma_free(struct jme_softc *);
static int jme_shutdown(device_t);
static void jme_setlinkspeed(struct jme_softc *);
static void jme_setwol(struct jme_softc *);
static int jme_suspend(device_t);
static int jme_resume(device_t);
static int jme_encap(struct jme_softc *, struct mbuf **);
static void jme_start(if_t);
static void jme_start_locked(if_t);
static void jme_watchdog(struct jme_softc *);
static int jme_ioctl(if_t, u_long, caddr_t);
static void jme_mac_config(struct jme_softc *);
static void jme_link_task(void *, int);
static int jme_intr(void *);
static void jme_int_task(void *, int);
static void jme_txeof(struct jme_softc *);
static __inline void jme_discard_rxbuf(struct jme_softc *, int);
static void jme_rxeof(struct jme_softc *);
static int jme_rxintr(struct jme_softc *, int);
static void jme_tick(void *);
static void jme_reset(struct jme_softc *);
static void jme_init(void *);
static void jme_init_locked(struct jme_softc *);
static void jme_stop(struct jme_softc *);
static void jme_stop_tx(struct jme_softc *);
static void jme_stop_rx(struct jme_softc *);
static int jme_init_rx_ring(struct jme_softc *);
static void jme_init_tx_ring(struct jme_softc *);
static void jme_init_ssb(struct jme_softc *);
static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
static void jme_set_vlan(struct jme_softc *);
static void jme_set_filter(struct jme_softc *);
static void jme_stats_clear(struct jme_softc *);
static void jme_stats_save(struct jme_softc *);
static void jme_stats_update(struct jme_softc *);
static void jme_phy_down(struct jme_softc *);
static void jme_phy_up(struct jme_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
static device_method_t jme_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, jme_probe),
DEVMETHOD(device_attach, jme_attach),
DEVMETHOD(device_detach, jme_detach),
DEVMETHOD(device_shutdown, jme_shutdown),
DEVMETHOD(device_suspend, jme_suspend),
DEVMETHOD(device_resume, jme_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, jme_miibus_readreg),
DEVMETHOD(miibus_writereg, jme_miibus_writereg),
DEVMETHOD(miibus_statchg, jme_miibus_statchg),
{ NULL, NULL }
};
static driver_t jme_driver = {
"jme",
jme_methods,
sizeof(struct jme_softc)
};
DRIVER_MODULE(jme, pci, jme_driver, 0, 0);
DRIVER_MODULE(miibus, jme, miibus_driver, 0, 0);
static struct resource_spec jme_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec jme_irq_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec jme_irq_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
/*
* Read a PHY register on the MII of the JMC250.
*/
static int
jme_miibus_readreg(device_t dev, int phy, int reg)
{
struct jme_softc *sc;
uint32_t val;
int i;
sc = device_get_softc(dev);
/* For FPGA version, PHY address 0 should be ignored. */
if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
return (0);
CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
for (i = JME_PHY_TIMEOUT; i > 0; i--) {
DELAY(1);
if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
break;
}
if (i == 0) {
device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
return (0);
}
return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
}
/*
* Write a PHY register on the MII of the JMC250.
*/
static int
jme_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct jme_softc *sc;
int i;
sc = device_get_softc(dev);
/* For FPGA version, PHY address 0 should be ignored. */
if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
return (0);
CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
for (i = JME_PHY_TIMEOUT; i > 0; i--) {
DELAY(1);
if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
break;
}
if (i == 0)
device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
return (0);
}
/*
* Callback from MII layer when media changes.
*/
static void
jme_miibus_statchg(device_t dev)
{
struct jme_softc *sc;
sc = device_get_softc(dev);
taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
}
/*
* Get the current interface media status.
*/
static void
jme_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct jme_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
JME_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
JME_UNLOCK(sc);
return;
}
mii = device_get_softc(sc->jme_miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
JME_UNLOCK(sc);
}
/*
* Set hardware to newly-selected media.
*/
static int
jme_mediachange(if_t ifp)
{
struct jme_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
JME_LOCK(sc);
mii = device_get_softc(sc->jme_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
JME_UNLOCK(sc);
return (error);
}
static int
jme_probe(device_t dev)
{
struct jme_dev *sp;
int i;
uint16_t vendor, devid;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
sp = jme_devs;
for (i = 0; i < nitems(jme_devs); i++, sp++) {
if (vendor == sp->jme_vendorid &&
devid == sp->jme_deviceid) {
device_set_desc(dev, sp->jme_name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
{
uint32_t reg;
int i;
*val = 0;
for (i = JME_TIMEOUT; i > 0; i--) {
reg = CSR_READ_4(sc, JME_SMBCSR);
if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
break;
DELAY(1);
}
if (i == 0) {
device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
return (ETIMEDOUT);
}
reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
for (i = JME_TIMEOUT; i > 0; i--) {
DELAY(1);
reg = CSR_READ_4(sc, JME_SMBINTF);
if ((reg & SMBINTF_CMD_TRIGGER) == 0)
break;
}
if (i == 0) {
device_printf(sc->jme_dev, "EEPROM read timeout!\n");
return (ETIMEDOUT);
}
reg = CSR_READ_4(sc, JME_SMBINTF);
*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
return (0);
}
static int
jme_eeprom_macaddr(struct jme_softc *sc)
{
uint8_t eaddr[ETHER_ADDR_LEN];
uint8_t fup, reg, val;
uint32_t offset;
int match;
offset = 0;
if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
fup != JME_EEPROM_SIG0)
return (ENOENT);
if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
fup != JME_EEPROM_SIG1)
return (ENOENT);
match = 0;
do {
if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
break;
if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
(fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
break;
if (reg >= JME_PAR0 &&
reg < JME_PAR0 + ETHER_ADDR_LEN) {
if (jme_eeprom_read_byte(sc, offset + 2,
&val) != 0)
break;
eaddr[reg - JME_PAR0] = val;
match++;
}
}
/* Check for the end of EEPROM descriptor. */
if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
break;
/* Try next eeprom descriptor. */
offset += JME_EEPROM_DESC_BYTES;
} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
if (match == ETHER_ADDR_LEN) {
bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
return (0);
}
return (ENOENT);
}
static int
jme_efuse_macaddr(struct jme_softc *sc)
{
uint32_t reg;
int i;
reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
EFUSE_CTL1_AUTOLAOD_DONE)
return (ENOENT);
/* Reset eFuse controller. */
reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
reg |= EFUSE_CTL2_RESET;
pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
reg &= ~EFUSE_CTL2_RESET;
pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
/* Have eFuse reload station address to MAC controller. */
reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
reg &= ~EFUSE_CTL1_CMD_MASK;
reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
/*
* Verify completion of eFuse autload command. It should be
* completed within 108us.
*/
DELAY(110);
for (i = 10; i > 0; i--) {
reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
DELAY(20);
continue;
}
if ((reg & EFUSE_CTL1_EXECUTE) == 0)
break;
/* Station address loading is still in progress. */
DELAY(20);
}
if (i == 0) {
device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
return (ETIMEDOUT);
}
return (0);
}
static void
jme_reg_macaddr(struct jme_softc *sc)
{
uint32_t par0, par1;
/* Read station address. */
par0 = CSR_READ_4(sc, JME_PAR0);
par1 = CSR_READ_4(sc, JME_PAR1);
par1 &= 0xFFFF;
if ((par0 == 0 && par1 == 0) ||
(par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
device_printf(sc->jme_dev,
"Failed to retrieve Ethernet address.\n");
} else {
/*
* For controllers that use eFuse, the station address
* could also be extracted from JME_PCI_PAR0 and
* JME_PCI_PAR1 registers in PCI configuration space.
* Each register holds exactly half of station address(24bits)
* so use JME_PAR0, JME_PAR1 registers instead.
*/
sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
}
}
static void
jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
{
uint32_t val;
int i;
if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
/*
* Avoid reprogramming station address if the address
* is the same as previous one. Note, reprogrammed
* station address is permanent as if it was written
* to EEPROM. So if station address was changed by
* admistrator it's possible to lose factory configured
* address when driver fails to restore its address.
* (e.g. reboot or system crash)
*/
if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
for (i = 0; i < ETHER_ADDR_LEN; i++) {
val = JME_EFUSE_EEPROM_FUNC0 <<
JME_EFUSE_EEPROM_FUNC_SHIFT;
val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
JME_EFUSE_EEPROM_PAGE_SHIFT;
val |= (JME_PAR0 + i) <<
JME_EFUSE_EEPROM_ADDR_SHIFT;
val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
val | JME_EFUSE_EEPROM_WRITE, 4);
}
}
} else {
CSR_WRITE_4(sc, JME_PAR0,
eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
}
}
static void
jme_map_intr_vector(struct jme_softc *sc)
{
uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
bzero(map, sizeof(map));
/* Map Tx interrupts source to MSI/MSIX vector 2. */
map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ5_COMP)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
/* Map Rx interrupts source to MSI/MSIX vector 1. */
map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
/* Map all other interrupts source to MSI/MSIX vector 0. */
CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
}
static int
jme_attach(device_t dev)
{
struct jme_softc *sc;
if_t ifp;
struct mii_softc *miisc;
struct mii_data *mii;
uint32_t reg;
uint16_t burst;
int error, i, mii_flags, msic, msixc, pmc;
error = 0;
sc = device_get_softc(dev);
sc->jme_dev = dev;
mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
/*
* Map the device. JMC250 supports both memory mapped and I/O
* register space access. Because I/O register access should
* use different BARs to access registers it's waste of time
* to use I/O register spce access. JMC250 uses 16K to map
* entire memory space.
*/
pci_enable_busmaster(dev);
sc->jme_res_spec = jme_res_spec_mem;
sc->jme_irq_spec = jme_irq_spec_legacy;
error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
if (error != 0) {
device_printf(dev, "cannot allocate memory resources.\n");
goto fail;
}
/* Allocate IRQ resources. */
msixc = pci_msix_count(dev);
msic = pci_msi_count(dev);
if (bootverbose) {
device_printf(dev, "MSIX count : %d\n", msixc);
device_printf(dev, "MSI count : %d\n", msic);
}
/* Use 1 MSI/MSI-X. */
if (msixc > 1)
msixc = 1;
if (msic > 1)
msic = 1;
/* Prefer MSIX over MSI. */
if (msix_disable == 0 || msi_disable == 0) {
if (msix_disable == 0 && msixc > 0 &&
pci_alloc_msix(dev, &msixc) == 0) {
if (msixc == 1) {
device_printf(dev, "Using %d MSIX messages.\n",
msixc);
sc->jme_flags |= JME_FLAG_MSIX;
sc->jme_irq_spec = jme_irq_spec_msi;
} else
pci_release_msi(dev);
}
if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
if (msic == 1) {
device_printf(dev, "Using %d MSI messages.\n",
msic);
sc->jme_flags |= JME_FLAG_MSI;
sc->jme_irq_spec = jme_irq_spec_msi;
} else
pci_release_msi(dev);
}
/* Map interrupt vector 0, 1 and 2. */
if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
(sc->jme_flags & JME_FLAG_MSIX) != 0)
jme_map_intr_vector(sc);
}
error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
if (error != 0) {
device_printf(dev, "cannot allocate IRQ resources.\n");
goto fail;
}
sc->jme_rev = pci_get_device(dev);
if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
sc->jme_flags |= JME_FLAG_FASTETH;
sc->jme_flags |= JME_FLAG_NOJUMBO;
}
reg = CSR_READ_4(sc, JME_CHIPMODE);
sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
CHIPMODE_NOT_FPGA)
sc->jme_flags |= JME_FLAG_FPGA;
if (bootverbose) {
device_printf(dev, "PCI device revision : 0x%04x\n",
sc->jme_rev);
device_printf(dev, "Chip revision : 0x%02x\n",
sc->jme_chip_rev);
if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
device_printf(dev, "FPGA revision : 0x%04x\n",
(reg & CHIPMODE_FPGA_REV_MASK) >>
CHIPMODE_FPGA_REV_SHIFT);
}
if (sc->jme_chip_rev == 0xFF) {
device_printf(dev, "Unknown chip revision : 0x%02x\n",
sc->jme_rev);
error = ENXIO;
goto fail;
}
/* Identify controller features and bugs. */
if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
sc->jme_flags |= JME_FLAG_DMA32BIT;
if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
sc->jme_flags |= JME_FLAG_HWMIB;
}
/* Reset the ethernet controller. */
jme_reset(sc);
/* Get station address. */
if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
error = jme_efuse_macaddr(sc);
if (error == 0)
jme_reg_macaddr(sc);
} else {
error = ENOENT;
reg = CSR_READ_4(sc, JME_SMBCSR);
if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
error = jme_eeprom_macaddr(sc);
if (error != 0 && bootverbose)
device_printf(sc->jme_dev,
"ethernet hardware address not found in EEPROM.\n");
if (error != 0)
jme_reg_macaddr(sc);
}
/*
* Save PHY address.
* Integrated JR0211 has fixed PHY address whereas FPGA version
* requires PHY probing to get correct PHY address.
*/
if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
GPREG0_PHY_ADDR_MASK;
if (bootverbose)
device_printf(dev, "PHY is at address %d.\n",
sc->jme_phyaddr);
} else
sc->jme_phyaddr = 0;
/* Set max allowable DMA size. */
if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
sc->jme_flags |= JME_FLAG_PCIE;
burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
if (bootverbose) {
device_printf(dev, "Read request size : %d bytes.\n",
128 << ((burst >> 12) & 0x07));
device_printf(dev, "TLP payload size : %d bytes.\n",
128 << ((burst >> 5) & 0x07));
}
switch ((burst >> 12) & 0x07) {
case 0:
sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
break;
case 1:
sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
break;
default:
sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
break;
}
sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
} else {
sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
}
/* Create coalescing sysctl node. */
jme_sysctl_node(sc);
if ((error = jme_dma_alloc(sc)) != 0)
goto fail;
ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, jme_ioctl);
if_setstartfn(ifp, jme_start);
if_setinitfn(ifp, jme_init);
if_setsendqlen(ifp, JME_TX_RING_CNT - 1);
if_setsendqready(ifp);
/* JMC250 supports Tx/Rx checksum offload as well as TSO. */
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
if_sethwassist(ifp, JME_CSUM_FEATURES | CSUM_TSO);
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
sc->jme_flags |= JME_FLAG_PMCAP;
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Wakeup PHY. */
jme_phy_up(sc);
mii_flags = MIIF_DOPAUSE;
/* Ask PHY calibration to PHY driver. */
if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
mii_flags |= MIIF_MACPRIV0;
/* Set up MII bus. */
error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
jme_mediastatus, BMSR_DEFCAPMASK,
sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
MII_OFFSET_ANY, mii_flags);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
/*
* Force PHY to FPGA mode.
*/
if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
mii = device_get_softc(sc->jme_miibus);
if (mii->mii_instance != 0) {
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (miisc->mii_phy != 0) {
sc->jme_phyaddr = miisc->mii_phy;
break;
}
}
if (sc->jme_phyaddr != 0) {
device_printf(sc->jme_dev,
"FPGA PHY is at %d\n", sc->jme_phyaddr);
/* vendor magic. */
jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
0x0004);
}
}
}
ether_ifattach(ifp, sc->jme_eaddr);
/* VLAN capability setup */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Create local taskq. */
sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->jme_tq);
if (sc->jme_tq == NULL) {
device_printf(dev, "could not create taskqueue.\n");
ether_ifdetach(ifp);
error = ENXIO;
goto fail;
}
taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->jme_dev));
for (i = 0; i < 1; i++) {
error = bus_setup_intr(dev, sc->jme_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
&sc->jme_intrhand[i]);
if (error != 0)
break;
}
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
taskqueue_free(sc->jme_tq);
sc->jme_tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
jme_detach(dev);
return (error);
}
static int
jme_detach(device_t dev)
{
struct jme_softc *sc;
if_t ifp;
int i;
sc = device_get_softc(dev);
ifp = sc->jme_ifp;
if (device_is_attached(dev)) {
JME_LOCK(sc);
sc->jme_flags |= JME_FLAG_DETACH;
jme_stop(sc);
JME_UNLOCK(sc);
callout_drain(&sc->jme_tick_ch);
taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
/* Restore possibly modified station address. */
if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
jme_set_macaddr(sc, sc->jme_eaddr);
ether_ifdetach(ifp);
}
if (sc->jme_tq != NULL) {
taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
taskqueue_free(sc->jme_tq);
sc->jme_tq = NULL;
}
if (sc->jme_miibus != NULL) {
device_delete_child(dev, sc->jme_miibus);
sc->jme_miibus = NULL;
}
bus_generic_detach(dev);
jme_dma_free(sc);
if (ifp != NULL) {
if_free(ifp);
sc->jme_ifp = NULL;
}
for (i = 0; i < 1; i++) {
if (sc->jme_intrhand[i] != NULL) {
bus_teardown_intr(dev, sc->jme_irq[i],
sc->jme_intrhand[i]);
sc->jme_intrhand[i] = NULL;
}
}
if (sc->jme_irq[0] != NULL)
bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
pci_release_msi(dev);
if (sc->jme_res[0] != NULL)
bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
mtx_destroy(&sc->jme_mtx);
return (0);
}
#define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
static void
jme_sysctl_node(struct jme_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct jme_hw_stats *stats;
int error;
stats = &sc->jme_stats;
ctx = device_get_sysctl_ctx(sc->jme_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_to,
0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_pkt,
0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_to,
0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_pkt,
0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I",
"max number of Rx events to process");
/* Pull in device tunables. */
sc->jme_process_limit = JME_PROC_DEFAULT;
error = resource_int_value(device_get_name(sc->jme_dev),
device_get_unit(sc->jme_dev), "process_limit",
&sc->jme_process_limit);
if (error == 0) {
if (sc->jme_process_limit < JME_PROC_MIN ||
sc->jme_process_limit > JME_PROC_MAX) {
device_printf(sc->jme_dev,
"process_limit value out of range; "
"using default: %d\n", JME_PROC_DEFAULT);
sc->jme_process_limit = JME_PROC_DEFAULT;
}
}
sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
error = resource_int_value(device_get_name(sc->jme_dev),
device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
if (error == 0) {
if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
device_printf(sc->jme_dev,
"tx_coal_to value out of range; "
"using default: %d\n", PCCTX_COAL_TO_DEFAULT);
sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
}
}
sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
error = resource_int_value(device_get_name(sc->jme_dev),
device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
if (error == 0) {
if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
device_printf(sc->jme_dev,
"tx_coal_pkt value out of range; "
"using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
}
}
sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
error = resource_int_value(device_get_name(sc->jme_dev),
device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
if (error == 0) {
if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
device_printf(sc->jme_dev,
"rx_coal_to value out of range; "
"using default: %d\n", PCCRX_COAL_TO_DEFAULT);
sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
}
}
sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
error = resource_int_value(device_get_name(sc->jme_dev),
device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
if (error == 0) {
if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
device_printf(sc->jme_dev,
"tx_coal_pkt value out of range; "
"using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
}
}
if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
return;
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "JME statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
child = SYSCTL_CHILDREN(tree);
JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->rx_good_frames, "Good frames");
JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
&stats->rx_crc_errs, "CRC errors");
JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
&stats->rx_mii_errs, "MII errors");
JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
&stats->rx_fifo_oflows, "FIFO overflows");
JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
&stats->rx_desc_empty, "Descriptor empty");
JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
&stats->rx_bad_frames, "Bad frames");
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->tx_good_frames, "Good frames");
JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
&stats->tx_bad_frames, "Bad frames");
}
#undef JME_SYSCTL_STAT_ADD32
struct jme_dmamap_arg {
bus_addr_t jme_busaddr;
};
static void
jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct jme_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct jme_dmamap_arg *)arg;
ctx->jme_busaddr = segs[0].ds_addr;
}
static int
jme_dma_alloc(struct jme_softc *sc)
{
struct jme_dmamap_arg ctx;
struct jme_txdesc *txd;
struct jme_rxdesc *rxd;
bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
int error, i;
lowaddr = BUS_SPACE_MAXADDR;
if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
again:
/* Create parent ring tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1, 0, /* algnmnt, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->jme_cdata.jme_ring_tag);
if (error != 0) {
device_printf(sc->jme_dev,
"could not create parent ring DMA tag.\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
JME_TX_RING_SIZE, /* maxsize */
1, /* nsegments */
JME_TX_RING_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->jme_cdata.jme_tx_ring_tag);
if (error != 0) {
device_printf(sc->jme_dev,
"could not allocate Tx ring DMA tag.\n");
goto fail;
}
/* Create tag for Rx ring. */
error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
JME_RX_RING_SIZE, /* maxsize */
1, /* nsegments */
JME_RX_RING_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->jme_cdata.jme_rx_ring_tag);
if (error != 0) {
device_printf(sc->jme_dev,
"could not allocate Rx ring DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
(void **)&sc->jme_rdata.jme_tx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->jme_cdata.jme_tx_ring_map);
if (error != 0) {
device_printf(sc->jme_dev,
"could not allocate DMA'able memory for Tx ring.\n");
goto fail;
}
ctx.jme_busaddr = 0;
error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0 || ctx.jme_busaddr == 0) {
device_printf(sc->jme_dev,
"could not load DMA'able memory for Tx ring.\n");
goto fail;
}
sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx ring. */
error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
(void **)&sc->jme_rdata.jme_rx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->jme_cdata.jme_rx_ring_map);
if (error != 0) {
device_printf(sc->jme_dev,
"could not allocate DMA'able memory for Rx ring.\n");
goto fail;
}
ctx.jme_busaddr = 0;
error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0 || ctx.jme_busaddr == 0) {
device_printf(sc->jme_dev,
"could not load DMA'able memory for Rx ring.\n");
goto fail;
}
sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
/* Tx/Rx descriptor queue should reside within 4GB boundary. */
tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
JME_TX_RING_SIZE;
rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
JME_RX_RING_SIZE;
if ((JME_ADDR_HI(tx_ring_end) !=
JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
(JME_ADDR_HI(rx_ring_end) !=
JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
device_printf(sc->jme_dev, "4GB boundary crossed, "
"switching to 32bit DMA address mode.\n");
jme_dma_free(sc);
/* Limit DMA address space to 32bit and try again. */
lowaddr = BUS_SPACE_MAXADDR_32BIT;
goto again;
}
}
lowaddr = BUS_SPACE_MAXADDR;
if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
/* Create parent buffer tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1, 0, /* algnmnt, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->jme_cdata.jme_buffer_tag);
if (error != 0) {
device_printf(sc->jme_dev,
"could not create parent buffer DMA tag.\n");
goto fail;
}
/* Create shadow status block tag. */
error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
JME_SSB_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
JME_SSB_SIZE, /* maxsize */
1, /* nsegments */
JME_SSB_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->jme_cdata.jme_ssb_tag);
if (error != 0) {
device_printf(sc->jme_dev,
"could not create shared status block DMA tag.\n");
goto fail;
}
/* Create tag for Tx buffers. */
error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
JME_TSO_MAXSIZE, /* maxsize */
JME_MAXTXSEGS, /* nsegments */
JME_TSO_MAXSEGSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->jme_cdata.jme_tx_tag);
if (error != 0) {
device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
goto fail;
}
/* Create tag for Rx buffers. */
error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->jme_cdata.jme_rx_tag);
if (error != 0) {
device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
goto fail;
}
/*
* Allocate DMA'able memory and load the DMA map for shared
* status block.
*/
error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
(void **)&sc->jme_rdata.jme_ssb_block,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->jme_cdata.jme_ssb_map);
if (error != 0) {
device_printf(sc->jme_dev, "could not allocate DMA'able "
"memory for shared status block.\n");
goto fail;
}
ctx.jme_busaddr = 0;
error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0 || ctx.jme_busaddr == 0) {
device_printf(sc->jme_dev, "could not load DMA'able memory "
"for shared status block.\n");
goto fail;
}
sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
/* Create DMA maps for Tx buffers. */
for (i = 0; i < JME_TX_RING_CNT; i++) {
txd = &sc->jme_cdata.jme_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->jme_dev,
"could not create Tx dmamap.\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
&sc->jme_cdata.jme_rx_sparemap)) != 0) {
device_printf(sc->jme_dev,
"could not create spare Rx dmamap.\n");
goto fail;
}
for (i = 0; i < JME_RX_RING_CNT; i++) {
rxd = &sc->jme_cdata.jme_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->jme_dev,
"could not create Rx dmamap.\n");
goto fail;
}
}
fail:
return (error);
}
static void
jme_dma_free(struct jme_softc *sc)
{
struct jme_txdesc *txd;
struct jme_rxdesc *rxd;
int i;
/* Tx ring */
if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
if (sc->jme_rdata.jme_tx_ring_paddr)
bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
sc->jme_cdata.jme_tx_ring_map);
if (sc->jme_rdata.jme_tx_ring)
bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
sc->jme_rdata.jme_tx_ring,
sc->jme_cdata.jme_tx_ring_map);
sc->jme_rdata.jme_tx_ring = NULL;
sc->jme_rdata.jme_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
sc->jme_cdata.jme_tx_ring_tag = NULL;
}
/* Rx ring */
if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
if (sc->jme_rdata.jme_rx_ring_paddr)
bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
sc->jme_cdata.jme_rx_ring_map);
if (sc->jme_rdata.jme_rx_ring)
bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
sc->jme_rdata.jme_rx_ring,
sc->jme_cdata.jme_rx_ring_map);
sc->jme_rdata.jme_rx_ring = NULL;
sc->jme_rdata.jme_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
sc->jme_cdata.jme_rx_ring_tag = NULL;
}
/* Tx buffers */
if (sc->jme_cdata.jme_tx_tag != NULL) {
for (i = 0; i < JME_TX_RING_CNT; i++) {
txd = &sc->jme_cdata.jme_txdesc[i];
if (txd->tx_dmamap != NULL) {
bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
sc->jme_cdata.jme_tx_tag = NULL;
}
/* Rx buffers */
if (sc->jme_cdata.jme_rx_tag != NULL) {
for (i = 0; i < JME_RX_RING_CNT; i++) {
rxd = &sc->jme_cdata.jme_rxdesc[i];
if (rxd->rx_dmamap != NULL) {
bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc->jme_cdata.jme_rx_sparemap != NULL) {
bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
sc->jme_cdata.jme_rx_sparemap);
sc->jme_cdata.jme_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
sc->jme_cdata.jme_rx_tag = NULL;
}
/* Shared status block. */
if (sc->jme_cdata.jme_ssb_tag != NULL) {
if (sc->jme_rdata.jme_ssb_block_paddr)
bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
sc->jme_cdata.jme_ssb_map);
if (sc->jme_rdata.jme_ssb_block)
bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
sc->jme_rdata.jme_ssb_block,
sc->jme_cdata.jme_ssb_map);
sc->jme_rdata.jme_ssb_block = NULL;
sc->jme_rdata.jme_ssb_block_paddr = 0;
bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
sc->jme_cdata.jme_ssb_tag = NULL;
}
if (sc->jme_cdata.jme_buffer_tag != NULL) {
bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
sc->jme_cdata.jme_buffer_tag = NULL;
}
if (sc->jme_cdata.jme_ring_tag != NULL) {
bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
sc->jme_cdata.jme_ring_tag = NULL;
}
}
/*
* Make sure the interface is stopped at reboot time.
*/
static int
jme_shutdown(device_t dev)
{
return (jme_suspend(dev));
}
/*
* Unlike other ethernet controllers, JMC250 requires
* explicit resetting link speed to 10/100Mbps as gigabit
* link will cunsume more power than 375mA.
* Note, we reset the link speed to 10/100Mbps with
* auto-negotiation but we don't know whether that operation
* would succeed or not as we have no control after powering
* off. If the renegotiation fail WOL may not work. Running
* at 1Gbps draws more power than 375mA at 3.3V which is
* specified in PCI specification and that would result in
* complete shutdowning power to ethernet controller.
*
* TODO
* Save current negotiated media speed/duplex/flow-control
* to softc and restore the same link again after resuming.
* PHY handling such as power down/resetting to 100Mbps
* may be better handled in suspend method in phy driver.
*/
static void
jme_setlinkspeed(struct jme_softc *sc)
{
struct mii_data *mii;
int aneg, i;
JME_LOCK_ASSERT(sc);
mii = device_get_softc(sc->jme_miibus);
mii_pollstat(mii);
aneg = 0;
if ((mii->mii_media_status & IFM_AVALID) != 0) {
switch IFM_SUBTYPE(mii->mii_media_active) {
case IFM_10_T:
case IFM_100_TX:
return;
case IFM_1000_T:
aneg++;
default:
break;
}
}
jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
BMCR_AUTOEN | BMCR_STARTNEG);
DELAY(1000);
if (aneg != 0) {
/* Poll link state until jme(4) get a 10/100 link. */
for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
mii_pollstat(mii);
if ((mii->mii_media_status & IFM_AVALID) != 0) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
jme_mac_config(sc);
return;
default:
break;
}
}
JME_UNLOCK(sc);
pause("jmelnk", hz);
JME_LOCK(sc);
}
if (i == MII_ANEGTICKS_GIGE)
device_printf(sc->jme_dev, "establishing link failed, "
"WOL may not work!");
}
/*
* No link, force MAC to have 100Mbps, full-duplex link.
* This is the last resort and may/may not work.
*/
mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
jme_mac_config(sc);
}
static void
jme_setwol(struct jme_softc *sc)
{
if_t ifp;
uint32_t gpr, pmcs;
uint16_t pmstat;
int pmc;
JME_LOCK_ASSERT(sc);
if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
/* Remove Tx MAC/offload clock to save more power. */
if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
CSR_WRITE_4(sc, JME_GPREG1,
CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
/* No PME capability, PHY power down. */
jme_phy_down(sc);
return;
}
ifp = sc->jme_ifp;
gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
pmcs = CSR_READ_4(sc, JME_PMCS);
pmcs &= ~PMCS_WOL_ENB_MASK;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
/* Enable PME message. */
gpr |= GPREG0_PME_ENB;
/* For gigabit controllers, reset link speed to 10/100. */
if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
jme_setlinkspeed(sc);
}
CSR_WRITE_4(sc, JME_PMCS, pmcs);
CSR_WRITE_4(sc, JME_GPREG0, gpr);
/* Remove Tx MAC/offload clock to save more power. */
if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
/* Request PME. */
pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
/* No WOL, PHY power down. */
jme_phy_down(sc);
}
}
static int
jme_suspend(device_t dev)
{
struct jme_softc *sc;
sc = device_get_softc(dev);
JME_LOCK(sc);
jme_stop(sc);
jme_setwol(sc);
JME_UNLOCK(sc);
return (0);
}
static int
jme_resume(device_t dev)
{
struct jme_softc *sc;
if_t ifp;
uint16_t pmstat;
int pmc;
sc = device_get_softc(dev);
JME_LOCK(sc);
if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
pmstat = pci_read_config(sc->jme_dev,
pmc + PCIR_POWER_STATUS, 2);
/* Disable PME clear PME status. */
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->jme_dev,
pmc + PCIR_POWER_STATUS, pmstat, 2);
}
/* Wakeup PHY. */
jme_phy_up(sc);
ifp = sc->jme_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
jme_init_locked(sc);
}
JME_UNLOCK(sc);
return (0);
}
static int
jme_encap(struct jme_softc *sc, struct mbuf **m_head)
{
struct jme_txdesc *txd;
struct jme_desc *desc;
struct mbuf *m;
bus_dma_segment_t txsegs[JME_MAXTXSEGS];
int error, i, nsegs, prod;
uint32_t cflags, tsosegsz;
JME_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
/*
* Due to the adherence to NDIS specification JMC250
* assumes upper stack computed TCP pseudo checksum
* without including payload length. This breaks
* checksum offload for TSO case so recompute TCP
* pseudo checksum for JMC250. Hopefully this wouldn't
* be much burden on modern CPUs.
*/
struct ether_header *eh;
struct ip *ip;
struct tcphdr *tcp;
uint32_t ip_off, poff;
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
ip_off = sizeof(struct ether_header);
m = m_pullup(*m_head, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
/* Check the existence of VLAN tag. */
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ip_off = sizeof(struct ether_vlan_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
}
m = m_pullup(m, ip_off + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + ip_off);
poff = ip_off + (ip->ip_hl << 2);
m = m_pullup(m, poff + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
/*
* Reset IP checksum and recompute TCP pseudo
* checksum that NDIS specification requires.
*/
ip = (struct ip *)(mtod(m, char *) + ip_off);
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
ip->ip_sum = 0;
if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr,
htons((tcp->th_off << 2) + IPPROTO_TCP));
/* No need to TSO, force IP checksum offload. */
(*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
(*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
} else
tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
*m_head = m;
}
prod = sc->jme_cdata.jme_tx_prod;
txd = &sc->jme_cdata.jme_txdesc[prod];
error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/*
* Check descriptor overrun. Leave one free descriptor.
* Since we always use 64bit address mode for transmitting,
* each Tx request requires one more dummy descriptor.
*/
if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
return (ENOBUFS);
}
m = *m_head;
cflags = 0;
tsosegsz = 0;
/* Configure checksum offload and TSO. */
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
JME_TD_MSS_SHIFT;
cflags |= JME_TD_TSO;
} else {
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
cflags |= JME_TD_IPCSUM;
if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
cflags |= JME_TD_TCPCSUM;
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
cflags |= JME_TD_UDPCSUM;
}
/* Configure VLAN. */
if ((m->m_flags & M_VLANTAG) != 0) {
cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
cflags |= JME_TD_VLAN_TAG;
}
desc = &sc->jme_rdata.jme_tx_ring[prod];
desc->flags = htole32(cflags);
desc->buflen = htole32(tsosegsz);
desc->addr_hi = htole32(m->m_pkthdr.len);
desc->addr_lo = 0;
sc->jme_cdata.jme_tx_cnt++;
JME_DESC_INC(prod, JME_TX_RING_CNT);
for (i = 0; i < nsegs; i++) {
desc = &sc->jme_rdata.jme_tx_ring[prod];
desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
desc->buflen = htole32(txsegs[i].ds_len);
desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
sc->jme_cdata.jme_tx_cnt++;
JME_DESC_INC(prod, JME_TX_RING_CNT);
}
/* Update producer index. */
sc->jme_cdata.jme_tx_prod = prod;
/*
* Finally request interrupt and give the first descriptor
* owenership to hardware.
*/
desc = txd->tx_desc;
desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
txd->tx_m = m;
txd->tx_ndesc = nsegs + 1;
/* Sync descriptors. */
bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
sc->jme_cdata.jme_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
jme_start(if_t ifp)
{
struct jme_softc *sc;
sc = if_getsoftc(ifp);
JME_LOCK(sc);
jme_start_locked(ifp);
JME_UNLOCK(sc);
}
static void
jme_start_locked(if_t ifp)
{
struct jme_softc *sc;
struct mbuf *m_head;
int enq;
sc = if_getsoftc(ifp);
JME_LOCK_ASSERT(sc);
if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
jme_txeof(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp); ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (jme_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
/*
* Reading TXCSR takes very long time under heavy load
* so cache TXCSR value and writes the ORed value with
* the kick command to the TXCSR. This saves one register
* access cycle.
*/
CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
TXCSR_TXQ_N_START(TXCSR_TXQ0));
/* Set a timeout in case the chip goes out to lunch. */
sc->jme_watchdog_timer = JME_TX_TIMEOUT;
}
}
static void
jme_watchdog(struct jme_softc *sc)
{
if_t ifp;
JME_LOCK_ASSERT(sc);
if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
return;
ifp = sc->jme_ifp;
if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
jme_init_locked(sc);
return;
}
jme_txeof(sc);
if (sc->jme_cdata.jme_tx_cnt == 0) {
if_printf(sc->jme_ifp,
"watchdog timeout (missed Tx interrupts) -- recovering\n");
if (!if_sendq_empty(ifp))
jme_start_locked(ifp);
return;
}
if_printf(sc->jme_ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
jme_init_locked(sc);
if (!if_sendq_empty(ifp))
jme_start_locked(ifp);
}
static int
jme_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct jme_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
uint32_t reg;
int error, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
ifr->ifr_mtu > JME_MAX_MTU)) {
error = EINVAL;
break;
}
if (if_getmtu(ifp) != ifr->ifr_mtu) {
/*
* No special configuration is required when interface
* MTU is changed but availability of TSO/Tx checksum
* offload should be chcked against new MTU size as
* FIFO size is just 2K.
*/
JME_LOCK(sc);
if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
if_setcapenablebit(ifp, 0,
IFCAP_TXCSUM | IFCAP_TSO4);
if_sethwassistbits(ifp, 0,
JME_CSUM_FEATURES | CSUM_TSO);
VLAN_CAPABILITIES(ifp);
}
if_setmtu(ifp, ifr->ifr_mtu);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
jme_init_locked(sc);
}
JME_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
JME_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->jme_if_flags)
& (IFF_PROMISC | IFF_ALLMULTI)) != 0)
jme_set_filter(sc);
} else {
if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
jme_init_locked(sc);
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
jme_stop(sc);
}
sc->jme_if_flags = if_getflags(ifp);
JME_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
JME_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
jme_set_filter(sc);
JME_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->jme_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
JME_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
if ((IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, JME_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, JME_CSUM_FEATURES);
}
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
reg = CSR_READ_4(sc, JME_RXMAC);
reg &= ~RXMAC_CSUM_ENB;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
reg |= RXMAC_CSUM_ENB;
CSR_WRITE_4(sc, JME_RXMAC, reg);
}
if ((mask & IFCAP_TSO4) != 0 &&
if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
if ((IFCAP_TSO4 & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
}
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(IFCAP_WOL_MAGIC & if_getcapabilities(ifp)) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
jme_set_vlan(sc);
}
JME_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
jme_mac_config(struct jme_softc *sc)
{
struct mii_data *mii;
uint32_t ghc, gpreg, rxmac, txmac, txpause;
uint32_t txclk;
JME_LOCK_ASSERT(sc);
mii = device_get_softc(sc->jme_miibus);
CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
DELAY(10);
CSR_WRITE_4(sc, JME_GHC, 0);
ghc = 0;
txclk = 0;
rxmac = CSR_READ_4(sc, JME_RXMAC);
rxmac &= ~RXMAC_FC_ENB;
txmac = CSR_READ_4(sc, JME_TXMAC);
txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
txpause = CSR_READ_4(sc, JME_TXPFC);
txpause &= ~TXPFC_PAUSE_ENB;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
ghc |= GHC_FULL_DUPLEX;
rxmac &= ~RXMAC_COLL_DET_ENB;
txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
TXMAC_FRAME_BURST);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
txpause |= TXPFC_PAUSE_ENB;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
rxmac |= RXMAC_FC_ENB;
/* Disable retry transmit timer/retry limit. */
CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
} else {
rxmac |= RXMAC_COLL_DET_ENB;
txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
/* Enable retry transmit timer/retry limit. */
CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
}
/* Reprogram Tx/Rx MACs with resolved speed/duplex. */
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
ghc |= GHC_SPEED_10;
txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
break;
case IFM_100_TX:
ghc |= GHC_SPEED_100;
txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
break;
case IFM_1000_T:
if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
break;
ghc |= GHC_SPEED_1000;
txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
break;
default:
break;
}
if (sc->jme_rev == DEVICEID_JMC250 &&
sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
/*
* Workaround occasional packet loss issue of JMC250 A2
* when it runs on half-duplex media.
*/
gpreg = CSR_READ_4(sc, JME_GPREG1);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
gpreg &= ~GPREG1_HDPX_FIX;
else
gpreg |= GPREG1_HDPX_FIX;
CSR_WRITE_4(sc, JME_GPREG1, gpreg);
/* Workaround CRC errors at 100Mbps on JMC250 A2. */
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
/* Extend interface FIFO depth. */
jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
0x1B, 0x0000);
} else {
/* Select default interface FIFO depth. */
jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
0x1B, 0x0004);
}
}
if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
ghc |= txclk;
CSR_WRITE_4(sc, JME_GHC, ghc);
CSR_WRITE_4(sc, JME_RXMAC, rxmac);
CSR_WRITE_4(sc, JME_TXMAC, txmac);
CSR_WRITE_4(sc, JME_TXPFC, txpause);
}
static void
jme_link_task(void *arg, int pending)
{
struct jme_softc *sc;
struct mii_data *mii;
if_t ifp;
struct jme_txdesc *txd;
bus_addr_t paddr;
int i;
sc = (struct jme_softc *)arg;
JME_LOCK(sc);
mii = device_get_softc(sc->jme_miibus);
ifp = sc->jme_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
JME_UNLOCK(sc);
return;
}
sc->jme_flags &= ~JME_FLAG_LINK;
if ((mii->mii_media_status & IFM_AVALID) != 0) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->jme_flags |= JME_FLAG_LINK;
break;
case IFM_1000_T:
if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
break;
sc->jme_flags |= JME_FLAG_LINK;
break;
default:
break;
}
}
/*
* Disabling Rx/Tx MACs have a side-effect of resetting
* JME_TXNDA/JME_RXNDA register to the first address of
* Tx/Rx descriptor address. So driver should reset its
* internal procucer/consumer pointer and reclaim any
* allocated resources. Note, just saving the value of
* JME_TXNDA and JME_RXNDA registers before stopping MAC
* and restoring JME_TXNDA/JME_RXNDA register is not
* sufficient to make sure correct MAC state because
* stopping MAC operation can take a while and hardware
* might have updated JME_TXNDA/JME_RXNDA registers
* during the stop operation.
*/
/* Block execution of task. */
taskqueue_block(sc->jme_tq);
/* Disable interrupts and stop driver. */
CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
callout_stop(&sc->jme_tick_ch);
sc->jme_watchdog_timer = 0;
/* Stop receiver/transmitter. */
jme_stop_rx(sc);
jme_stop_tx(sc);
/* XXX Drain all queued tasks. */
JME_UNLOCK(sc);
taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
JME_LOCK(sc);
if (sc->jme_cdata.jme_rxhead != NULL)
m_freem(sc->jme_cdata.jme_rxhead);
JME_RXCHAIN_RESET(sc);
jme_txeof(sc);
if (sc->jme_cdata.jme_tx_cnt != 0) {
/* Remove queued packets for transmit. */
for (i = 0; i < JME_TX_RING_CNT; i++) {
txd = &sc->jme_cdata.jme_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(
sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(
sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
txd->tx_ndesc = 0;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
}
}
/*
* Reuse configured Rx descriptors and reset
* producer/consumer index.
*/
sc->jme_cdata.jme_rx_cons = 0;
sc->jme_morework = 0;
jme_init_tx_ring(sc);
/* Initialize shadow status block. */
jme_init_ssb(sc);
/* Program MAC with resolved speed/duplex/flow-control. */
if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
jme_mac_config(sc);
jme_stats_clear(sc);
CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
/* Set Tx ring address to the hardware. */
paddr = JME_TX_RING_ADDR(sc, 0);
CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
/* Set Rx ring address to the hardware. */
paddr = JME_RX_RING_ADDR(sc, 0);
CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
/* Restart receiver/transmitter. */
CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
RXCSR_RXQ_START);
CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
/* Lastly enable TX/RX clock. */
if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
CSR_WRITE_4(sc, JME_GHC,
CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
CSR_WRITE_4(sc, JME_GPREG1,
CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
}
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
/* Unblock execution of task. */
taskqueue_unblock(sc->jme_tq);
/* Reenable interrupts. */
CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
JME_UNLOCK(sc);
}
static int
jme_intr(void *arg)
{
struct jme_softc *sc;
uint32_t status;
sc = (struct jme_softc *)arg;
status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
if (status == 0 || status == 0xFFFFFFFF)
return (FILTER_STRAY);
/* Disable interrupts. */
CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
return (FILTER_HANDLED);
}
static void
jme_int_task(void *arg, int pending)
{
struct jme_softc *sc;
if_t ifp;
uint32_t status;
int more;
sc = (struct jme_softc *)arg;
ifp = sc->jme_ifp;
JME_LOCK(sc);
status = CSR_READ_4(sc, JME_INTR_STATUS);
if (sc->jme_morework != 0) {
sc->jme_morework = 0;
status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
}
if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
goto done;
/* Reset PCC counter/timer and Ack interrupts. */
status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
CSR_WRITE_4(sc, JME_INTR_STATUS, status);
more = 0;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
more = jme_rxintr(sc, sc->jme_process_limit);
if (more != 0)
sc->jme_morework = 1;
}
if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
/*
* Notify hardware availability of new Rx
* buffers.
* Reading RXCSR takes very long time under
* heavy load so cache RXCSR value and writes
* the ORed value with the kick command to
* the RXCSR. This saves one register access
* cycle.
*/
CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
RXCSR_RX_ENB | RXCSR_RXQ_START);
}
if (!if_sendq_empty(ifp))
jme_start_locked(ifp);
}
if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
JME_UNLOCK(sc);
return;
}
done:
JME_UNLOCK(sc);
/* Reenable interrupts. */
CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
}
static void
jme_txeof(struct jme_softc *sc)
{
if_t ifp;
struct jme_txdesc *txd;
uint32_t status;
int cons, nsegs;
JME_LOCK_ASSERT(sc);
ifp = sc->jme_ifp;
cons = sc->jme_cdata.jme_tx_cons;
if (cons == sc->jme_cdata.jme_tx_prod)
return;
bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
sc->jme_cdata.jme_tx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Go through our Tx list and free mbufs for those
* frames which have been transmitted.
*/
for (; cons != sc->jme_cdata.jme_tx_prod;) {
txd = &sc->jme_cdata.jme_txdesc[cons];
status = le32toh(txd->tx_desc->flags);
if ((status & JME_TD_OWN) == JME_TD_OWN)
break;
if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if ((status & JME_TD_COLLISION) != 0)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
le32toh(txd->tx_desc->buflen) &
JME_TD_BUF_LEN_MASK);
}
/*
* Only the first descriptor of multi-descriptor
* transmission is updated so driver have to skip entire
* chained buffers for the transmiited frame. In other
* words, JME_TD_OWN bit is valid only at the first
* descriptor of a multi-descriptor transmission.
*/
for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
sc->jme_rdata.jme_tx_ring[cons].flags = 0;
JME_DESC_INC(cons, JME_TX_RING_CNT);
}
/* Reclaim transferred mbufs. */
bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
KASSERT(txd->tx_m != NULL,
("%s: freeing NULL mbuf!\n", __func__));
m_freem(txd->tx_m);
txd->tx_m = NULL;
sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
("%s: Active Tx desc counter was garbled\n", __func__));
txd->tx_ndesc = 0;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
sc->jme_cdata.jme_tx_cons = cons;
/* Unarm watchdog timer when there is no pending descriptors in queue. */
if (sc->jme_cdata.jme_tx_cnt == 0)
sc->jme_watchdog_timer = 0;
bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
sc->jme_cdata.jme_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static __inline void
jme_discard_rxbuf(struct jme_softc *sc, int cons)
{
struct jme_desc *desc;
desc = &sc->jme_rdata.jme_rx_ring[cons];
desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
desc->buflen = htole32(MCLBYTES);
}
/* Receive a frame. */
static void
jme_rxeof(struct jme_softc *sc)
{
if_t ifp;
struct jme_desc *desc;
struct jme_rxdesc *rxd;
struct mbuf *mp, *m;
uint32_t flags, status;
int cons, count, nsegs;
JME_LOCK_ASSERT(sc);
ifp = sc->jme_ifp;
cons = sc->jme_cdata.jme_rx_cons;
desc = &sc->jme_rdata.jme_rx_ring[cons];
flags = le32toh(desc->flags);
status = le32toh(desc->buflen);
nsegs = JME_RX_NSEGS(status);
sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
if ((status & JME_RX_ERR_STAT) != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
#ifdef JME_SHOW_ERRORS
device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
__func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
#endif
sc->jme_cdata.jme_rx_cons += nsegs;
sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
return;
}
for (count = 0; count < nsegs; count++,
JME_DESC_INC(cons, JME_RX_RING_CNT)) {
rxd = &sc->jme_cdata.jme_rxdesc[cons];
mp = rxd->rx_m;
/* Add a new receive buffer to the ring. */
if (jme_newbuf(sc, rxd) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/* Reuse buffer. */
for (; count < nsegs; count++) {
jme_discard_rxbuf(sc, cons);
JME_DESC_INC(cons, JME_RX_RING_CNT);
}
if (sc->jme_cdata.jme_rxhead != NULL) {
m_freem(sc->jme_cdata.jme_rxhead);
JME_RXCHAIN_RESET(sc);
}
break;
}
/*
* Assume we've received a full sized frame.
* Actual size is fixed when we encounter the end of
* multi-segmented frame.
*/
mp->m_len = MCLBYTES;
/* Chain received mbufs. */
if (sc->jme_cdata.jme_rxhead == NULL) {
sc->jme_cdata.jme_rxhead = mp;
sc->jme_cdata.jme_rxtail = mp;
} else {
/*
* Receive processor can receive a maximum frame
* size of 65535 bytes.
*/
mp->m_flags &= ~M_PKTHDR;
sc->jme_cdata.jme_rxtail->m_next = mp;
sc->jme_cdata.jme_rxtail = mp;
}
if (count == nsegs - 1) {
/* Last desc. for this frame. */
m = sc->jme_cdata.jme_rxhead;
m->m_flags |= M_PKTHDR;
m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
if (nsegs > 1) {
/* Set first mbuf size. */
m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
/* Set last mbuf size. */
mp->m_len = sc->jme_cdata.jme_rxlen -
((MCLBYTES - JME_RX_PAD_BYTES) +
(MCLBYTES * (nsegs - 2)));
} else
m->m_len = sc->jme_cdata.jme_rxlen;
m->m_pkthdr.rcvif = ifp;
/*
* Account for 10bytes auto padding which is used
* to align IP header on 32bit boundary. Also note,
* CRC bytes is automatically removed by the
* hardware.
*/
m->m_data += JME_RX_PAD_BYTES;
/* Set checksum information. */
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
(flags & JME_RD_IPV4) != 0) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((flags & JME_RD_IPCSUM) != 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if (((flags & JME_RD_MORE_FRAG) == 0) &&
((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
(JME_RD_TCP | JME_RD_TCPCSUM) ||
(flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
(JME_RD_UDP | JME_RD_UDPCSUM))) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
/* Check for VLAN tagged packets. */
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
(flags & JME_RD_VLAN_TAG) != 0) {
m->m_pkthdr.ether_vtag =
flags & JME_RD_VLAN_MASK;
m->m_flags |= M_VLANTAG;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
/* Pass it on. */
JME_UNLOCK(sc);
if_input(ifp, m);
JME_LOCK(sc);
/* Reset mbuf chains. */
JME_RXCHAIN_RESET(sc);
}
}
sc->jme_cdata.jme_rx_cons += nsegs;
sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
}
static int
jme_rxintr(struct jme_softc *sc, int count)
{
struct jme_desc *desc;
int nsegs, prog, pktlen;
bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
sc->jme_cdata.jme_rx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (prog = 0; count > 0; prog++) {
desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
break;
if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
break;
nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
/*
* Check number of segments against received bytes.
* Non-matching value would indicate that hardware
* is still trying to update Rx descriptors. I'm not
* sure whether this check is needed.
*/
pktlen = JME_RX_BYTES(le32toh(desc->buflen));
if (nsegs != howmany(pktlen, MCLBYTES))
break;
prog++;
/* Received a frame. */
jme_rxeof(sc);
count -= nsegs;
}
if (prog > 0)
bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
sc->jme_cdata.jme_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (count > 0 ? 0 : EAGAIN);
}
static void
jme_tick(void *arg)
{
struct jme_softc *sc;
struct mii_data *mii;
sc = (struct jme_softc *)arg;
JME_LOCK_ASSERT(sc);
mii = device_get_softc(sc->jme_miibus);
mii_tick(mii);
/*
* Reclaim Tx buffers that have been completed. It's not
* needed here but it would release allocated mbuf chains
* faster and limit the maximum delay to a hz.
*/
jme_txeof(sc);
jme_stats_update(sc);
jme_watchdog(sc);
callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
}
static void
jme_reset(struct jme_softc *sc)
{
uint32_t ghc, gpreg;
/* Stop receiver, transmitter. */
jme_stop_rx(sc);
jme_stop_tx(sc);
/* Reset controller. */
CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
CSR_READ_4(sc, JME_GHC);
DELAY(10);
/*
* Workaround Rx FIFO overruns seen under certain conditions.
* Explicitly synchorize TX/RX clock. TX/RX clock should be
* enabled only after enabling TX/RX MACs.
*/
if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
/* Disable TX clock. */
CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
/* Disable RX clock. */
gpreg = CSR_READ_4(sc, JME_GPREG1);
CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
gpreg = CSR_READ_4(sc, JME_GPREG1);
/* De-assert RESET but still disable TX clock. */
CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
ghc = CSR_READ_4(sc, JME_GHC);
/* Enable TX clock. */
CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
/* Enable RX clock. */
CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
CSR_READ_4(sc, JME_GPREG1);
/* Disable TX/RX clock again. */
CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
} else
CSR_WRITE_4(sc, JME_GHC, 0);
CSR_READ_4(sc, JME_GHC);
DELAY(10);
}
static void
jme_init(void *xsc)
{
struct jme_softc *sc;
sc = (struct jme_softc *)xsc;
JME_LOCK(sc);
jme_init_locked(sc);
JME_UNLOCK(sc);
}
static void
jme_init_locked(struct jme_softc *sc)
{
if_t ifp;
struct mii_data *mii;
bus_addr_t paddr;
uint32_t reg;
int error;
JME_LOCK_ASSERT(sc);
ifp = sc->jme_ifp;
mii = device_get_softc(sc->jme_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel any pending I/O.
*/
jme_stop(sc);
/*
* Reset the chip to a known state.
*/
jme_reset(sc);
/* Init descriptors. */
error = jme_init_rx_ring(sc);
if (error != 0) {
device_printf(sc->jme_dev,
"%s: initialization failed: no memory for Rx buffers.\n",
__func__);
jme_stop(sc);
return;
}
jme_init_tx_ring(sc);
/* Initialize shadow status block. */
jme_init_ssb(sc);
/* Reprogram the station address. */
jme_set_macaddr(sc, if_getlladdr(sc->jme_ifp));
/*
* Configure Tx queue.
* Tx priority queue weight value : 0
* Tx FIFO threshold for processing next packet : 16QW
* Maximum Tx DMA length : 512
* Allow Tx DMA burst.
*/
sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
sc->jme_txcsr |= sc->jme_tx_dma_size;
sc->jme_txcsr |= TXCSR_DMA_BURST;
CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
/* Set Tx descriptor counter. */
CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
/* Set Tx ring address to the hardware. */
paddr = JME_TX_RING_ADDR(sc, 0);
CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
/* Configure TxMAC parameters. */
reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
reg |= TXMAC_THRESH_1_PKT;
reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
CSR_WRITE_4(sc, JME_TXMAC, reg);
/*
* Configure Rx queue.
* FIFO full threshold for transmitting Tx pause packet : 128T
* FIFO threshold for processing next packet : 128QW
* Rx queue 0 select
* Max Rx DMA length : 128
* Rx descriptor retry : 32
* Rx descriptor retry time gap : 256ns
* Don't receive runt/bad frame.
*/
sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
/*
* Since Rx FIFO size is 4K bytes, receiving frames larger
* than 4K bytes will suffer from Rx FIFO overruns. So
* decrease FIFO threshold to reduce the FIFO overruns for
* frames larger than 4000 bytes.
* For best performance of standard MTU sized frames use
* maximum allowable FIFO threshold, 128QW. Note these do
* not hold on chip full mask version >=2. For these
* controllers 64QW and 128QW are not valid value.
*/
if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
else {
if ((if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
else
sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
}
sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
/* Set Rx descriptor counter. */
CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
/* Set Rx ring address to the hardware. */
paddr = JME_RX_RING_ADDR(sc, 0);
CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
/* Clear receive filter. */
CSR_WRITE_4(sc, JME_RXMAC, 0);
/* Set up the receive filter. */
jme_set_filter(sc);
jme_set_vlan(sc);
/*
* Disable all WOL bits as WOL can interfere normal Rx
* operation. Also clear WOL detection status bits.
*/
reg = CSR_READ_4(sc, JME_PMCS);
reg &= ~PMCS_WOL_ENB_MASK;
CSR_WRITE_4(sc, JME_PMCS, reg);
reg = CSR_READ_4(sc, JME_RXMAC);
/*
* Pad 10bytes right before received frame. This will greatly
* help Rx performance on strict-alignment architectures as
* it does not need to copy the frame to align the payload.
*/
reg |= RXMAC_PAD_10BYTES;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
reg |= RXMAC_CSUM_ENB;
CSR_WRITE_4(sc, JME_RXMAC, reg);
/* Configure general purpose reg0 */
reg = CSR_READ_4(sc, JME_GPREG0);
reg &= ~GPREG0_PCC_UNIT_MASK;
/* Set PCC timer resolution to micro-seconds unit. */
reg |= GPREG0_PCC_UNIT_US;
/*
* Disable all shadow register posting as we have to read
* JME_INTR_STATUS register in jme_int_task. Also it seems
* that it's hard to synchronize interrupt status between
* hardware and software with shadow posting due to
* requirements of bus_dmamap_sync(9).
*/
reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
/* Disable posting of DW0. */
reg &= ~GPREG0_POST_DW0_ENB;
/* Clear PME message. */
reg &= ~GPREG0_PME_ENB;
/* Set PHY address. */
reg &= ~GPREG0_PHY_ADDR_MASK;
reg |= sc->jme_phyaddr;
CSR_WRITE_4(sc, JME_GPREG0, reg);
/* Configure Tx queue 0 packet completion coalescing. */
reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
PCCTX_COAL_TO_MASK;
reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
PCCTX_COAL_PKT_MASK;
reg |= PCCTX_COAL_TXQ0;
CSR_WRITE_4(sc, JME_PCCTX, reg);
/* Configure Rx queue 0 packet completion coalescing. */
reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
PCCRX_COAL_TO_MASK;
reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
PCCRX_COAL_PKT_MASK;
CSR_WRITE_4(sc, JME_PCCRX0, reg);
/*
* Configure PCD(Packet Completion Deferring). It seems PCD
* generates an interrupt when the time interval between two
* back-to-back incoming/outgoing packet is long enough for
* it to reach its timer value 0. The arrival of new packets
* after timer has started causes the PCD timer to restart.
* Unfortunately, it's not clear how PCD is useful at this
* moment, so just use the same of PCC parameters.
*/
if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
sc->jme_rx_pcd_to = PCDRX_TO_MAX;
sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
sc->jme_tx_pcd_to = PCDTX_TO_MAX;
reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
CSR_WRITE_4(sc, PCDRX_REG(0), reg);
reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
CSR_WRITE_4(sc, JME_PCDTX, reg);
}
/* Configure shadow status block but don't enable posting. */
paddr = sc->jme_rdata.jme_ssb_block_paddr;
CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
/* Disable Timer 1 and Timer 2. */
CSR_WRITE_4(sc, JME_TIMER1, 0);
CSR_WRITE_4(sc, JME_TIMER2, 0);
/* Configure retry transmit period, retry limit value. */
CSR_WRITE_4(sc, JME_TXTRHD,
((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
TXTRHD_RT_PERIOD_MASK) |
((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
TXTRHD_RT_LIMIT_SHIFT));
/* Disable RSS. */
CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
/* Initialize the interrupt mask. */
CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
/*
* Enabling Tx/Rx DMA engines and Rx queue processing is
* done after detection of valid link in jme_link_task.
*/
sc->jme_flags &= ~JME_FLAG_LINK;
/* Set the current media. */
mii_mediachg(mii);
callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
static void
jme_stop(struct jme_softc *sc)
{
if_t ifp;
struct jme_txdesc *txd;
struct jme_rxdesc *rxd;
int i;
JME_LOCK_ASSERT(sc);
/*
* Mark the interface down and cancel the watchdog timer.
*/
ifp = sc->jme_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->jme_flags &= ~JME_FLAG_LINK;
callout_stop(&sc->jme_tick_ch);
sc->jme_watchdog_timer = 0;
/*
* Disable interrupts.
*/
CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
/* Disable updating shadow status block. */
CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
/* Stop receiver, transmitter. */
jme_stop_rx(sc);
jme_stop_tx(sc);
/* Reclaim Rx/Tx buffers that have been completed. */
jme_rxintr(sc, JME_RX_RING_CNT);
if (sc->jme_cdata.jme_rxhead != NULL)
m_freem(sc->jme_cdata.jme_rxhead);
JME_RXCHAIN_RESET(sc);
jme_txeof(sc);
/*
* Free RX and TX mbufs still in the queues.
*/
for (i = 0; i < JME_RX_RING_CNT; i++) {
rxd = &sc->jme_cdata.jme_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < JME_TX_RING_CNT; i++) {
txd = &sc->jme_cdata.jme_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
txd->tx_ndesc = 0;
}
}
jme_stats_update(sc);
jme_stats_save(sc);
}
static void
jme_stop_tx(struct jme_softc *sc)
{
uint32_t reg;
int i;
reg = CSR_READ_4(sc, JME_TXCSR);
if ((reg & TXCSR_TX_ENB) == 0)
return;
reg &= ~TXCSR_TX_ENB;
CSR_WRITE_4(sc, JME_TXCSR, reg);
for (i = JME_TIMEOUT; i > 0; i--) {
DELAY(1);
if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
break;
}
if (i == 0)
device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
}
static void
jme_stop_rx(struct jme_softc *sc)
{
uint32_t reg;
int i;
reg = CSR_READ_4(sc, JME_RXCSR);
if ((reg & RXCSR_RX_ENB) == 0)
return;
reg &= ~RXCSR_RX_ENB;
CSR_WRITE_4(sc, JME_RXCSR, reg);
for (i = JME_TIMEOUT; i > 0; i--) {
DELAY(1);
if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
break;
}
if (i == 0)
device_printf(sc->jme_dev, "stopping recevier timeout!\n");
}
static void
jme_init_tx_ring(struct jme_softc *sc)
{
struct jme_ring_data *rd;
struct jme_txdesc *txd;
int i;
sc->jme_cdata.jme_tx_prod = 0;
sc->jme_cdata.jme_tx_cons = 0;
sc->jme_cdata.jme_tx_cnt = 0;
rd = &sc->jme_rdata;
bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
for (i = 0; i < JME_TX_RING_CNT; i++) {
txd = &sc->jme_cdata.jme_txdesc[i];
txd->tx_m = NULL;
txd->tx_desc = &rd->jme_tx_ring[i];
txd->tx_ndesc = 0;
}
bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
sc->jme_cdata.jme_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
jme_init_ssb(struct jme_softc *sc)
{
struct jme_ring_data *rd;
rd = &sc->jme_rdata;
bzero(rd->jme_ssb_block, JME_SSB_SIZE);
bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
jme_init_rx_ring(struct jme_softc *sc)
{
struct jme_ring_data *rd;
struct jme_rxdesc *rxd;
int i;
sc->jme_cdata.jme_rx_cons = 0;
JME_RXCHAIN_RESET(sc);
sc->jme_morework = 0;
rd = &sc->jme_rdata;
bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
for (i = 0; i < JME_RX_RING_CNT; i++) {
rxd = &sc->jme_cdata.jme_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_desc = &rd->jme_rx_ring[i];
if (jme_newbuf(sc, rxd) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
sc->jme_cdata.jme_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static int
jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
{
struct jme_desc *desc;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
/*
* JMC250 has 64bit boundary alignment limitation so jme(4)
* takes advantage of 10 bytes padding feature of hardware
* in order not to copy entire frame to align IP header on
* 32bit boundary.
*/
m->m_len = m->m_pkthdr.len = MCLBYTES;
if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
sc->jme_cdata.jme_rx_sparemap = map;
bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
desc = rxd->rx_desc;
desc->buflen = htole32(segs[0].ds_len);
desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
return (0);
}
static void
jme_set_vlan(struct jme_softc *sc)
{
if_t ifp;
uint32_t reg;
JME_LOCK_ASSERT(sc);
ifp = sc->jme_ifp;
reg = CSR_READ_4(sc, JME_RXMAC);
reg &= ~RXMAC_VLAN_ENB;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
reg |= RXMAC_VLAN_ENB;
CSR_WRITE_4(sc, JME_RXMAC, reg);
}
static u_int
jme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *mchash = arg;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
/* Just want the 6 least significant bits. */
crc &= 0x3f;
/* Set the corresponding bit in the hash table. */
mchash[crc >> 5] |= 1 << (crc & 0x1f);
return (1);
}
static void
jme_set_filter(struct jme_softc *sc)
{
if_t ifp;
uint32_t mchash[2];
uint32_t rxcfg;
JME_LOCK_ASSERT(sc);
ifp = sc->jme_ifp;
rxcfg = CSR_READ_4(sc, JME_RXMAC);
rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
RXMAC_ALLMULTI);
/* Always accept frames destined to our station address. */
rxcfg |= RXMAC_UNICAST;
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxcfg |= RXMAC_BROADCAST;
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxcfg |= RXMAC_PROMISC;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
rxcfg |= RXMAC_ALLMULTI;
CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
return;
}
/*
* Set up the multicast address filter by passing all multicast
* addresses through a CRC generator, and then using the low-order
* 6 bits as an index into the 64 bit multicast hash table. The
* high order bits select the register, while the rest of the bits
* select the bit within the register.
*/
rxcfg |= RXMAC_MULTICAST;
bzero(mchash, sizeof(mchash));
if_foreach_llmaddr(ifp, jme_hash_maddr, &mchash);
CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
}
static void
jme_stats_clear(struct jme_softc *sc)
{
JME_LOCK_ASSERT(sc);
if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
return;
/* Disable and clear counters. */
CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
/* Activate hw counters. */
CSR_WRITE_4(sc, JME_STATCSR, 0);
CSR_READ_4(sc, JME_STATCSR);
bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
}
static void
jme_stats_save(struct jme_softc *sc)
{
JME_LOCK_ASSERT(sc);
if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
return;
/* Save current counters. */
bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
/* Disable and clear counters. */
CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
}
static void
jme_stats_update(struct jme_softc *sc)
{
struct jme_hw_stats *stat, *ostat;
uint32_t reg;
JME_LOCK_ASSERT(sc);
if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
return;
stat = &sc->jme_stats;
ostat = &sc->jme_ostats;
stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
reg = CSR_READ_4(sc, JME_STAT_CRCMII);
stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
STAT_RX_CRC_ERR_SHIFT;
stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
STAT_RX_MII_ERR_SHIFT;
reg = CSR_READ_4(sc, JME_STAT_RXERR);
stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
STAT_RXERR_OFLOW_SHIFT;
stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
STAT_RXERR_MPTY_SHIFT;
reg = CSR_READ_4(sc, JME_STAT_FAIL);
stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
/* Account for previous counters. */
stat->rx_good_frames += ostat->rx_good_frames;
stat->rx_crc_errs += ostat->rx_crc_errs;
stat->rx_mii_errs += ostat->rx_mii_errs;
stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
stat->rx_desc_empty += ostat->rx_desc_empty;
stat->rx_bad_frames += ostat->rx_bad_frames;
stat->tx_good_frames += ostat->tx_good_frames;
stat->tx_bad_frames += ostat->tx_bad_frames;
}
static void
jme_phy_down(struct jme_softc *sc)
{
uint32_t reg;
jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
reg = CSR_READ_4(sc, JME_PHYPOWDN);
reg |= 0x0000000F;
CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
reg &= ~PE1_GIGA_PDOWN_MASK;
reg |= PE1_GIGA_PDOWN_D3;
pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
}
}
static void
jme_phy_up(struct jme_softc *sc)
{
uint32_t reg;
uint16_t bmcr;
bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
bmcr &= ~BMCR_PDOWN;
jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
reg = CSR_READ_4(sc, JME_PHYPOWDN);
reg &= ~0x0000000F;
CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
reg &= ~PE1_GIGA_PDOWN_MASK;
reg |= PE1_GIGA_PDOWN_DIS;
pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
}
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
}
static int
sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
}
static int
sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
}
static int
sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
}
static int
sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
JME_PROC_MIN, JME_PROC_MAX));
}
diff --git a/sys/dev/le/lance.c b/sys/dev/le/lance.c
index 74ae09ee14c3..6eef8b5da052 100644
--- a/sys/dev/le/lance.c
+++ b/sys/dev/le/lance.c
@@ -1,819 +1,817 @@
/* $NetBSD: lance.c,v 1.34 2005/12/24 20:27:30 perry Exp $ */
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
* Simulation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell and Rick Macklem.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <machine/bus.h>
#include <dev/le/lancereg.h>
#include <dev/le/lancevar.h>
static void lance_start(if_t);
static void lance_stop(struct lance_softc *);
static void lance_init(void *);
static void lance_watchdog(void *s);
static int lance_mediachange(if_t);
static void lance_mediastatus(if_t, struct ifmediareq *);
static int lance_ioctl(if_t, u_long, caddr_t);
int
lance_config(struct lance_softc *sc, const char* name, int unit)
{
if_t ifp;
int i, nbuf;
if (LE_LOCK_INITIALIZED(sc) == 0)
return (ENXIO);
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOSPC);
callout_init_mtx(&sc->sc_wdog_ch, &sc->sc_mtx, 0);
/* Initialize ifnet structure. */
if_setsoftc(ifp, sc);
if_initname(ifp, name, unit);
if_setstartfn(ifp, lance_start);
if_setioctlfn(ifp, lance_ioctl);
if_setinitfn(ifp, lance_init);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
#ifdef LANCE_REVC_BUG
if_setflagsbit(ifp, 0, IFF_MULTICAST);
#endif
if_setbaudrate(ifp, IF_Mbps(10));
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
/* Initialize ifmedia structures. */
ifmedia_init(&sc->sc_media, 0, lance_mediachange, lance_mediastatus);
if (sc->sc_supmedia != NULL) {
for (i = 0; i < sc->sc_nsupmedia; i++)
ifmedia_add(&sc->sc_media, sc->sc_supmedia[i], 0, NULL);
ifmedia_set(&sc->sc_media, sc->sc_defaultmedia);
} else {
ifmedia_add(&sc->sc_media,
IFM_MAKEWORD(IFM_ETHER, IFM_MANUAL, 0, 0), 0, NULL);
ifmedia_set(&sc->sc_media,
IFM_MAKEWORD(IFM_ETHER, IFM_MANUAL, 0, 0));
}
switch (sc->sc_memsize) {
case 8192:
sc->sc_nrbuf = 4;
sc->sc_ntbuf = 1;
break;
case 16384:
sc->sc_nrbuf = 8;
sc->sc_ntbuf = 2;
break;
case 32768:
sc->sc_nrbuf = 16;
sc->sc_ntbuf = 4;
break;
case 65536:
sc->sc_nrbuf = 32;
sc->sc_ntbuf = 8;
break;
case 131072:
sc->sc_nrbuf = 64;
sc->sc_ntbuf = 16;
break;
case 262144:
sc->sc_nrbuf = 128;
sc->sc_ntbuf = 32;
break;
default:
/* weird memory size; cope with it */
nbuf = sc->sc_memsize / LEBLEN;
sc->sc_ntbuf = nbuf / 5;
sc->sc_nrbuf = nbuf - sc->sc_ntbuf;
}
if_printf(ifp, "%d receive buffers, %d transmit buffers\n",
sc->sc_nrbuf, sc->sc_ntbuf);
/* Make sure the chip is stopped. */
LE_LOCK(sc);
lance_stop(sc);
LE_UNLOCK(sc);
return (0);
}
void
lance_attach(struct lance_softc *sc)
{
if_t ifp = sc->sc_ifp;
/* Attach the interface. */
ether_ifattach(ifp, sc->sc_enaddr);
/* Claim 802.1q capability. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
gone_in(15, "le: 10/100 NIC no longer needed for Qemu/MIPS");
}
void
lance_detach(struct lance_softc *sc)
{
if_t ifp = sc->sc_ifp;
LE_LOCK(sc);
lance_stop(sc);
LE_UNLOCK(sc);
callout_drain(&sc->sc_wdog_ch);
ether_ifdetach(ifp);
if_free(ifp);
}
void
lance_suspend(struct lance_softc *sc)
{
LE_LOCK(sc);
lance_stop(sc);
LE_UNLOCK(sc);
}
void
lance_resume(struct lance_softc *sc)
{
LE_LOCK(sc);
if (if_getflags(sc->sc_ifp) & IFF_UP)
lance_init_locked(sc);
LE_UNLOCK(sc);
}
static void
lance_start(if_t ifp)
{
struct lance_softc *sc = if_getsoftc(ifp);
LE_LOCK(sc);
(*sc->sc_start_locked)(sc);
LE_UNLOCK(sc);
}
static void
lance_stop(struct lance_softc *sc)
{
if_t ifp = sc->sc_ifp;
LE_LOCK_ASSERT(sc, MA_OWNED);
/*
* Mark the interface down and cancel the watchdog timer.
*/
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
callout_stop(&sc->sc_wdog_ch);
sc->sc_wdog_timer = 0;
(*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_STOP);
}
static void
lance_init(void *xsc)
{
struct lance_softc *sc = (struct lance_softc *)xsc;
LE_LOCK(sc);
lance_init_locked(sc);
LE_UNLOCK(sc);
}
/*
* Initialization of interface; set up initialization block
* and transmit/receive descriptor rings.
*/
void
lance_init_locked(struct lance_softc *sc)
{
if_t ifp = sc->sc_ifp;
u_long a;
int timo;
LE_LOCK_ASSERT(sc, MA_OWNED);
(*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_STOP);
DELAY(100);
/* Newer LANCE chips have a reset register. */
if (sc->sc_hwreset)
(*sc->sc_hwreset)(sc);
/* Set the correct byte swapping mode, etc. */
(*sc->sc_wrcsr)(sc, LE_CSR3, sc->sc_conf3);
/* Set the current media. This may require the chip to be stopped. */
if (sc->sc_mediachange)
(void)(*sc->sc_mediachange)(sc);
/*
* Update our private copy of the Ethernet address.
* We NEED the copy so we can ensure its alignment!
*/
memcpy(sc->sc_enaddr, if_getlladdr(ifp), ETHER_ADDR_LEN);
/* Set up LANCE init block. */
(*sc->sc_meminit)(sc);
/* Give LANCE the physical address of its init block. */
a = sc->sc_addr + LE_INITADDR(sc);
(*sc->sc_wrcsr)(sc, LE_CSR1, a & 0xffff);
(*sc->sc_wrcsr)(sc, LE_CSR2, a >> 16);
/* Try to initialize the LANCE. */
DELAY(100);
(*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INIT);
/* Wait for initialization to finish. */
for (timo = 100000; timo; timo--)
if ((*sc->sc_rdcsr)(sc, LE_CSR0) & LE_C0_IDON)
break;
if ((*sc->sc_rdcsr)(sc, LE_CSR0) & LE_C0_IDON) {
/* Start the LANCE. */
(*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INEA | LE_C0_STRT);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->sc_wdog_timer = 0;
callout_reset(&sc->sc_wdog_ch, hz, lance_watchdog, sc);
(*sc->sc_start_locked)(sc);
} else
if_printf(ifp, "controller failed to initialize\n");
if (sc->sc_hwinit)
(*sc->sc_hwinit)(sc);
}
/*
* Routine to copy from mbuf chain to transmit buffer in
* network buffer memory.
*/
int
lance_put(struct lance_softc *sc, int boff, struct mbuf *m)
{
struct mbuf *n;
int len, tlen = 0;
LE_LOCK_ASSERT(sc, MA_OWNED);
for (; m; m = n) {
len = m->m_len;
if (len == 0) {
n = m_free(m);
m = NULL;
continue;
}
(*sc->sc_copytobuf)(sc, mtod(m, caddr_t), boff, len);
boff += len;
tlen += len;
n = m_free(m);
m = NULL;
}
if (tlen < LEMINSIZE) {
(*sc->sc_zerobuf)(sc, boff, LEMINSIZE - tlen);
tlen = LEMINSIZE;
}
return (tlen);
}
/*
* Pull data off an interface.
* Len is length of data, with local net header stripped.
* We copy the data into mbufs. When full cluster sized units are present
* we copy into clusters.
*/
struct mbuf *
lance_get(struct lance_softc *sc, int boff, int totlen)
{
if_t ifp = sc->sc_ifp;
struct mbuf *m, *m0, *newm;
caddr_t newdata;
int len;
if (totlen <= ETHER_HDR_LEN || totlen > LEBLEN - ETHER_CRC_LEN) {
#ifdef LEDEBUG
if_printf(ifp, "invalid packet size %d; dropping\n", totlen);
#endif
return (NULL);
}
MGETHDR(m0, M_NOWAIT, MT_DATA);
if (m0 == NULL)
return (NULL);
m0->m_pkthdr.rcvif = ifp;
m0->m_pkthdr.len = totlen;
len = MHLEN;
m = m0;
while (totlen > 0) {
if (totlen >= MINCLSIZE) {
if (!(MCLGET(m, M_NOWAIT)))
goto bad;
len = MCLBYTES;
}
if (m == m0) {
newdata = (caddr_t)
ALIGN(m->m_data + ETHER_HDR_LEN) - ETHER_HDR_LEN;
len -= newdata - m->m_data;
m->m_data = newdata;
}
m->m_len = len = min(totlen, len);
(*sc->sc_copyfrombuf)(sc, mtod(m, caddr_t), boff, len);
boff += len;
totlen -= len;
if (totlen > 0) {
MGET(newm, M_NOWAIT, MT_DATA);
if (newm == NULL)
goto bad;
len = MLEN;
m = m->m_next = newm;
}
}
return (m0);
bad:
m_freem(m0);
return (NULL);
}
static void
lance_watchdog(void *xsc)
{
struct lance_softc *sc = (struct lance_softc *)xsc;
if_t ifp = sc->sc_ifp;
LE_LOCK_ASSERT(sc, MA_OWNED);
if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
callout_reset(&sc->sc_wdog_ch, hz, lance_watchdog, sc);
return;
}
if_printf(ifp, "device timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
lance_init_locked(sc);
}
static int
lance_mediachange(if_t ifp)
{
struct lance_softc *sc = if_getsoftc(ifp);
if (sc->sc_mediachange) {
/*
* For setting the port in LE_CSR15 the PCnet chips must
* be powered down or stopped and unlike documented may
* not take effect without an initialization. So don't
* invoke (*sc_mediachange) directly here but go through
* lance_init_locked().
*/
LE_LOCK(sc);
lance_stop(sc);
lance_init_locked(sc);
if (!if_sendq_empty(ifp))
(*sc->sc_start_locked)(sc);
LE_UNLOCK(sc);
}
return (0);
}
static void
lance_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct lance_softc *sc = if_getsoftc(ifp);
LE_LOCK(sc);
if (!(if_getflags(ifp) & IFF_UP)) {
LE_UNLOCK(sc);
return;
}
ifmr->ifm_status = IFM_AVALID;
if (sc->sc_flags & LE_CARRIER)
ifmr->ifm_status |= IFM_ACTIVE;
if (sc->sc_mediastatus)
(*sc->sc_mediastatus)(sc, ifmr);
LE_UNLOCK(sc);
}
/*
* Process an ioctl request.
*/
static int
lance_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct lance_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
LE_LOCK(sc);
if (if_getflags(ifp) & IFF_PROMISC) {
if (!(sc->sc_flags & LE_PROMISC)) {
sc->sc_flags |= LE_PROMISC;
lance_init_locked(sc);
}
} else if (sc->sc_flags & LE_PROMISC) {
sc->sc_flags &= ~LE_PROMISC;
lance_init_locked(sc);
}
if ((if_getflags(ifp) & IFF_ALLMULTI) &&
!(sc->sc_flags & LE_ALLMULTI)) {
sc->sc_flags |= LE_ALLMULTI;
lance_init_locked(sc);
} else if (!(if_getflags(ifp) & IFF_ALLMULTI) &&
(sc->sc_flags & LE_ALLMULTI)) {
sc->sc_flags &= ~LE_ALLMULTI;
lance_init_locked(sc);
}
if (!(if_getflags(ifp) & IFF_UP) &&
if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/*
* If interface is marked down and it is running, then
* stop it.
*/
lance_stop(sc);
} else if (if_getflags(ifp) & IFF_UP &&
!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
/*
* If interface is marked up and it is stopped, then
* start it.
*/
lance_init_locked(sc);
}
#ifdef LEDEBUG
if (if_getflags(ifp) & IFF_DEBUG)
sc->sc_flags |= LE_DEBUG;
else
sc->sc_flags &= ~LE_DEBUG;
#endif
LE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/*
* Multicast list has changed; set the hardware filter
* accordingly.
*/
LE_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
lance_init_locked(sc);
LE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
struct lance_hash_maddr_ctx {
struct lance_softc *sc;
uint16_t *af;
};
static u_int
lance_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct lance_hash_maddr_ctx *ctx = arg;
struct lance_softc *sc = ctx->sc;
uint32_t crc;
crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
/* Just want the 6 most significant bits. */
crc >>= 26;
/* Set the corresponding bit in the filter. */
ctx->af[crc >> 4] |= LE_HTOLE16(1 << (crc & 0xf));
return (1);
}
/*
* Set up the logical address filter.
*/
void
lance_setladrf(struct lance_softc *sc, uint16_t *af)
{
if_t ifp = sc->sc_ifp;
struct lance_hash_maddr_ctx ctx = { sc, af };
/*
* Set up multicast address filter by passing all multicast addresses
* through a crc generator, and then using the high order 6 bits as an
* index into the 64 bit logical address filter. The high order bit
* selects the word, while the rest of the bits select the bit within
* the word.
*/
if (if_getflags(ifp) & IFF_PROMISC || sc->sc_flags & LE_ALLMULTI) {
af[0] = af[1] = af[2] = af[3] = 0xffff;
return;
}
af[0] = af[1] = af[2] = af[3] = 0x0000;
if_foreach_llmaddr(ifp, lance_hash_maddr, &ctx);
}
/*
* Routines for accessing the transmit and receive buffers.
* The various CPU and adapter configurations supported by this
* driver require three different access methods for buffers
* and descriptors:
* (1) contig (contiguous data; no padding),
* (2) gap2 (two bytes of data followed by two bytes of padding),
* (3) gap16 (16 bytes of data followed by 16 bytes of padding).
*/
/*
* contig: contiguous data with no padding.
*
* Buffers may have any alignment.
*/
void
lance_copytobuf_contig(struct lance_softc *sc, void *from, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
/*
* Just call memcpy() to do the work.
*/
memcpy(buf + boff, from, len);
}
void
lance_copyfrombuf_contig(struct lance_softc *sc, void *to, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
/*
* Just call memcpy() to do the work.
*/
memcpy(to, buf + boff, len);
}
void
lance_zerobuf_contig(struct lance_softc *sc, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
/*
* Just let memset() do the work
*/
memset(buf + boff, 0, len);
}
#if 0
/*
* Examples only; duplicate these and tweak (if necessary) in
* machine-specific front-ends.
*/
/*
* gap2: two bytes of data followed by two bytes of pad.
*
* Buffers must be 4-byte aligned. The code doesn't worry about
* doing an extra byte.
*/
static void
lance_copytobuf_gap2(struct lance_softc *sc, void *fromv, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
caddr_t from = fromv;
volatile uint16_t *bptr;
if (boff & 0x1) {
/* Handle unaligned first byte. */
bptr = ((volatile uint16_t *)buf) + (boff - 1);
*bptr = (*from++ << 8) | (*bptr & 0xff);
bptr += 2;
len--;
} else
bptr = ((volatile uint16_t *)buf) + boff;
while (len > 1) {
*bptr = (from[1] << 8) | (from[0] & 0xff);
bptr += 2;
from += 2;
len -= 2;
}
if (len == 1)
*bptr = (uint16_t)*from;
}
static void
lance_copyfrombuf_gap2(struct lance_softc *sc, void *tov, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
caddr_t to = tov;
volatile uint16_t *bptr;
uint16_t tmp;
if (boff & 0x1) {
/* Handle unaligned first byte. */
bptr = ((volatile uint16_t *)buf) + (boff - 1);
*to++ = (*bptr >> 8) & 0xff;
bptr += 2;
len--;
} else
bptr = ((volatile uint16_t *)buf) + boff;
while (len > 1) {
tmp = *bptr;
*to++ = tmp & 0xff;
*to++ = (tmp >> 8) & 0xff;
bptr += 2;
len -= 2;
}
if (len == 1)
*to = *bptr & 0xff;
}
static void
lance_zerobuf_gap2(struct lance_softc *sc, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
volatile uint16_t *bptr;
if ((unsigned)boff & 0x1) {
bptr = ((volatile uint16_t *)buf) + (boff - 1);
*bptr &= 0xff;
bptr += 2;
len--;
} else
bptr = ((volatile uint16_t *)buf) + boff;
while (len > 0) {
*bptr = 0;
bptr += 2;
len -= 2;
}
}
/*
* gap16: 16 bytes of data followed by 16 bytes of pad.
*
* Buffers must be 32-byte aligned.
*/
static void
lance_copytobuf_gap16(struct lance_softc *sc, void *fromv, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
caddr_t bptr, from = fromv;
int xfer;
bptr = buf + ((boff << 1) & ~0x1f);
boff &= 0xf;
xfer = min(len, 16 - boff);
while (len > 0) {
memcpy(bptr + boff, from, xfer);
from += xfer;
bptr += 32;
boff = 0;
len -= xfer;
xfer = min(len, 16);
}
}
static void
lance_copyfrombuf_gap16(struct lance_softc *sc, void *tov, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
caddr_t bptr, to = tov;
int xfer;
bptr = buf + ((boff << 1) & ~0x1f);
boff &= 0xf;
xfer = min(len, 16 - boff);
while (len > 0) {
memcpy(to, bptr + boff, xfer);
to += xfer;
bptr += 32;
boff = 0;
len -= xfer;
xfer = min(len, 16);
}
}
static void
lance_zerobuf_gap16(struct lance_softc *sc, int boff, int len)
{
volatile caddr_t buf = sc->sc_mem;
caddr_t bptr;
int xfer;
bptr = buf + ((boff << 1) & ~0x1f);
boff &= 0xf;
xfer = min(len, 16 - boff);
while (len > 0) {
memset(bptr + boff, 0, xfer);
bptr += 32;
boff = 0;
len -= xfer;
xfer = min(len, 16);
}
}
#endif /* Example only */
diff --git a/sys/dev/lge/if_lge.c b/sys/dev/lge/if_lge.c
index d77866ee3cad..dfcaa01d366b 100644
--- a/sys/dev/lge/if_lge.c
+++ b/sys/dev/lge/if_lge.c
@@ -1,1537 +1,1532 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2001 Wind River Systems
* Copyright (c) 1997, 1998, 1999, 2000, 2001
* Bill Paul <william.paul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
* documentation not available, but ask me nicely.
*
* The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
* It's a 64-bit PCI part that supports TCP/IP checksum offload,
* VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
* are three supported methods for data transfer between host and
* NIC: programmed I/O, traditional scatter/gather DMA and Packet
* Propulsion Technology (tm) DMA. The latter mechanism is a form
* of double buffer DMA where the packet data is copied to a
* pre-allocated DMA buffer who's physical address has been loaded
* into a table at device initialization time. The rationale is that
* the virtual to physical address translation needed for normal
* scatter/gather DMA is more expensive than the data copy needed
* for double buffering. This may be true in Windows NT and the like,
* but it isn't true for us, at least on the x86 arch. This driver
* uses the scatter/gather I/O method for both TX and RX.
*
* The LXT1001 only supports TCP/IP checksum offload on receive.
* Also, the VLAN tagging is done using a 16-entry table which allows
* the chip to perform hardware filtering based on VLAN tags. Sadly,
* our vlan support doesn't currently play well with this kind of
* hardware support.
*
* Special thanks to:
* - Jeff James at Intel, for arranging to have the LXT1001 manual
* released (at long last)
* - Beny Chen at D-Link, for actually sending it to me
* - Brad Short and Keith Alexis at SMC, for sending me sample
* SMC9462SX and SMC9462TX adapters for testing
* - Paul Saab at Y!, for not killing me (though it remains to be seen
* if in fact he did me much of a favor)
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/bpf.h>
#include <vm/vm.h> /* for vtophys */
#include <vm/pmap.h> /* for vtophys */
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#define LGE_USEIOSPACE
#include <dev/lge/if_lgereg.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/*
* Various supported device vendors/types and their names.
*/
static const struct lge_type lge_devs[] = {
{ LGE_VENDORID, LGE_DEVICEID, "Level 1 Gigabit Ethernet" },
{ 0, 0, NULL }
};
static int lge_probe(device_t);
static int lge_attach(device_t);
static int lge_detach(device_t);
static int lge_alloc_jumbo_mem(struct lge_softc *);
static void lge_free_jumbo_mem(struct lge_softc *);
static void *lge_jalloc(struct lge_softc *);
static void lge_jfree(struct mbuf *);
static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, struct mbuf *);
static int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *);
static void lge_rxeof(struct lge_softc *, int);
static void lge_rxeoc(struct lge_softc *);
static void lge_txeof(struct lge_softc *);
static void lge_intr(void *);
static void lge_tick(void *);
static void lge_start(if_t);
static void lge_start_locked(if_t);
static int lge_ioctl(if_t, u_long, caddr_t);
static void lge_init(void *);
static void lge_init_locked(struct lge_softc *);
static void lge_stop(struct lge_softc *);
static void lge_watchdog(struct lge_softc *);
static int lge_shutdown(device_t);
static int lge_ifmedia_upd(if_t);
static void lge_ifmedia_upd_locked(if_t);
static void lge_ifmedia_sts(if_t, struct ifmediareq *);
static void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *);
static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int);
static int lge_miibus_readreg(device_t, int, int);
static int lge_miibus_writereg(device_t, int, int, int);
static void lge_miibus_statchg(device_t);
static void lge_setmulti(struct lge_softc *);
static void lge_reset(struct lge_softc *);
static int lge_list_rx_init(struct lge_softc *);
static int lge_list_tx_init(struct lge_softc *);
#ifdef LGE_USEIOSPACE
#define LGE_RES SYS_RES_IOPORT
#define LGE_RID LGE_PCI_LOIO
#else
#define LGE_RES SYS_RES_MEMORY
#define LGE_RID LGE_PCI_LOMEM
#endif
static device_method_t lge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, lge_probe),
DEVMETHOD(device_attach, lge_attach),
DEVMETHOD(device_detach, lge_detach),
DEVMETHOD(device_shutdown, lge_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, lge_miibus_readreg),
DEVMETHOD(miibus_writereg, lge_miibus_writereg),
DEVMETHOD(miibus_statchg, lge_miibus_statchg),
DEVMETHOD_END
};
static driver_t lge_driver = {
"lge",
lge_methods,
sizeof(struct lge_softc)
};
DRIVER_MODULE(lge, pci, lge_driver, 0, 0);
DRIVER_MODULE(miibus, lge, miibus_driver, 0, 0);
MODULE_DEPEND(lge, pci, 1, 1, 1);
MODULE_DEPEND(lge, ether, 1, 1, 1);
MODULE_DEPEND(lge, miibus, 1, 1, 1);
#define LGE_SETBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, \
CSR_READ_4(sc, reg) | (x))
#define LGE_CLRBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, \
CSR_READ_4(sc, reg) & ~(x))
#define SIO_SET(x) \
CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x)
#define SIO_CLR(x) \
CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x)
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
lge_eeprom_getword(struct lge_softc *sc, int addr, u_int16_t *dest)
{
int i;
u_int32_t val;
CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8));
for (i = 0; i < LGE_TIMEOUT; i++)
if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ))
break;
if (i == LGE_TIMEOUT) {
device_printf(sc->lge_dev, "EEPROM read timed out\n");
return;
}
val = CSR_READ_4(sc, LGE_EEDATA);
if (addr & 1)
*dest = (val >> 16) & 0xFFFF;
else
*dest = val & 0xFFFF;
return;
}
/*
* Read a sequence of words from the EEPROM.
*/
static void
lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off, int cnt, int swap)
{
int i;
u_int16_t word = 0, *ptr;
for (i = 0; i < cnt; i++) {
lge_eeprom_getword(sc, off + i, &word);
ptr = (u_int16_t *)(dest + (i * 2));
if (swap)
*ptr = ntohs(word);
else
*ptr = word;
}
return;
}
static int
lge_miibus_readreg(device_t dev, int phy, int reg)
{
struct lge_softc *sc;
int i;
sc = device_get_softc(dev);
/*
* If we have a non-PCS PHY, pretend that the internal
* autoneg stuff at PHY address 0 isn't there so that
* the miibus code will find only the GMII PHY.
*/
if (sc->lge_pcs == 0 && phy == 0)
return(0);
CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
for (i = 0; i < LGE_TIMEOUT; i++)
if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
break;
if (i == LGE_TIMEOUT) {
device_printf(sc->lge_dev, "PHY read timed out\n");
return(0);
}
return(CSR_READ_4(sc, LGE_GMIICTL) >> 16);
}
static int
lge_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct lge_softc *sc;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, LGE_GMIICTL,
(data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
for (i = 0; i < LGE_TIMEOUT; i++)
if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
break;
if (i == LGE_TIMEOUT) {
device_printf(sc->lge_dev, "PHY write timed out\n");
return(0);
}
return(0);
}
static void
lge_miibus_statchg(device_t dev)
{
struct lge_softc *sc;
struct mii_data *mii;
sc = device_get_softc(dev);
mii = device_get_softc(sc->lge_miibus);
LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
break;
case IFM_100_TX:
LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
break;
case IFM_10_T:
LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
break;
default:
/*
* Choose something, even if it's wrong. Clearing
* all the bits will hose autoneg on the internal
* PHY.
*/
LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
break;
}
if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
} else {
LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
}
return;
}
static u_int
lge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int count)
{
uint32_t h, *hashes = arg;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
static void
lge_setmulti(struct lge_softc *sc)
{
if_t ifp;
uint32_t hashes[2] = { 0, 0 };
ifp = sc->lge_ifp;
LGE_LOCK_ASSERT(sc);
/* Make sure multicast hash table is enabled. */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST);
if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
return;
}
/* first, zot all the existing hash bits */
CSR_WRITE_4(sc, LGE_MAR0, 0);
CSR_WRITE_4(sc, LGE_MAR1, 0);
/* now program new ones */
if_foreach_llmaddr(ifp, lge_hash_maddr, hashes);
CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
return;
}
static void
lge_reset(struct lge_softc *sc)
{
int i;
LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST);
for (i = 0; i < LGE_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST))
break;
}
if (i == LGE_TIMEOUT)
device_printf(sc->lge_dev, "reset never completed\n");
/* Wait a little while for the chip to get its brains in order. */
DELAY(1000);
return;
}
/*
* Probe for a Level 1 chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
lge_probe(device_t dev)
{
const struct lge_type *t;
t = lge_devs;
while(t->lge_name != NULL) {
if ((pci_get_vendor(dev) == t->lge_vid) &&
(pci_get_device(dev) == t->lge_did)) {
device_set_desc(dev, t->lge_name);
return(BUS_PROBE_DEFAULT);
}
t++;
}
return(ENXIO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
lge_attach(device_t dev)
{
u_char eaddr[ETHER_ADDR_LEN];
struct lge_softc *sc;
if_t ifp = NULL;
int error = 0, rid;
sc = device_get_softc(dev);
sc->lge_dev = dev;
mtx_init(&sc->lge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->lge_stat_callout, &sc->lge_mtx, 0);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
rid = LGE_RID;
sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE);
if (sc->lge_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
sc->lge_btag = rman_get_bustag(sc->lge_res);
sc->lge_bhandle = rman_get_bushandle(sc->lge_res);
/* Allocate interrupt */
rid = 0;
sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->lge_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
/* Reset the adapter. */
lge_reset(sc);
/*
* Get station address from the EEPROM.
*/
lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0);
lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0);
lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0);
sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF,
M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
if (sc->lge_ldata == NULL) {
device_printf(dev, "no memory for list buffers!\n");
error = ENXIO;
goto fail;
}
/* Try to allocate memory for jumbo buffers. */
if (lge_alloc_jumbo_mem(sc)) {
device_printf(dev, "jumbo buffer allocation failed\n");
error = ENXIO;
goto fail;
}
ifp = sc->lge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, lge_ioctl);
if_setstartfn(ifp, lge_start);
if_setinitfn(ifp, lge_init);
if_setsendqlen(ifp, LGE_TX_LIST_CNT - 1);
if_setcapabilities(ifp, IFCAP_RXCSUM);
if_setcapenable(ifp, if_getcapabilities(ifp));
if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
sc->lge_pcs = 1;
else
sc->lge_pcs = 0;
/*
* Do MII setup.
*/
error = mii_attach(dev, &sc->lge_miibus, ifp, lge_ifmedia_upd,
lge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
error = bus_setup_intr(dev, sc->lge_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, lge_intr, sc, &sc->lge_intrhand);
if (error) {
ether_ifdetach(ifp);
device_printf(dev, "couldn't set up irq\n");
goto fail;
}
return (0);
fail:
lge_free_jumbo_mem(sc);
if (sc->lge_ldata)
contigfree(sc->lge_ldata,
sizeof(struct lge_list_data), M_DEVBUF);
if (ifp)
if_free(ifp);
if (sc->lge_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
if (sc->lge_res)
bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
mtx_destroy(&sc->lge_mtx);
return(error);
}
static int
lge_detach(device_t dev)
{
struct lge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->lge_ifp;
LGE_LOCK(sc);
lge_reset(sc);
lge_stop(sc);
LGE_UNLOCK(sc);
callout_drain(&sc->lge_stat_callout);
ether_ifdetach(ifp);
bus_generic_detach(dev);
device_delete_child(dev, sc->lge_miibus);
bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF);
if_free(ifp);
lge_free_jumbo_mem(sc);
mtx_destroy(&sc->lge_mtx);
return(0);
}
/*
* Initialize the transmit descriptors.
*/
static int
lge_list_tx_init(struct lge_softc *sc)
{
struct lge_list_data *ld;
struct lge_ring_data *cd;
int i;
cd = &sc->lge_cdata;
ld = sc->lge_ldata;
for (i = 0; i < LGE_TX_LIST_CNT; i++) {
ld->lge_tx_list[i].lge_mbuf = NULL;
ld->lge_tx_list[i].lge_ctl = 0;
}
cd->lge_tx_prod = cd->lge_tx_cons = 0;
return(0);
}
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that
* we arralge the descriptors in a closed ring, so that the last descriptor
* points back to the first.
*/
static int
lge_list_rx_init(struct lge_softc *sc)
{
struct lge_list_data *ld;
struct lge_ring_data *cd;
int i;
ld = sc->lge_ldata;
cd = &sc->lge_cdata;
cd->lge_rx_prod = cd->lge_rx_cons = 0;
CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
for (i = 0; i < LGE_RX_LIST_CNT; i++) {
if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
break;
if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
return(ENOBUFS);
}
/* Clear possible 'rx command queue empty' interrupt. */
CSR_READ_4(sc, LGE_ISR);
return(0);
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
*/
static int
lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m)
{
struct mbuf *m_new = NULL;
char *buf = NULL;
if (m == NULL) {
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->lge_dev, "no memory for rx list "
"-- packet dropped!\n");
return(ENOBUFS);
}
/* Allocate the jumbo buffer */
buf = lge_jalloc(sc);
if (buf == NULL) {
#ifdef LGE_VERBOSE
device_printf(sc->lge_dev, "jumbo allocation failed "
"-- packet dropped!\n");
#endif
m_freem(m_new);
return(ENOBUFS);
}
/* Attach the buffer to the mbuf */
m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
m_extadd(m_new, buf, LGE_JUMBO_FRAMELEN, lge_jfree, sc, NULL,
0, EXT_NET_DRV);
} else {
m_new = m;
m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
m_new->m_data = m_new->m_ext.ext_buf;
}
/*
* Adjust alignment so packet payload begins on a
* longword boundary. Mandatory for Alpha, useful on
* x86 too.
*/
m_adj(m_new, ETHER_ALIGN);
c->lge_mbuf = m_new;
c->lge_fragptr_hi = 0;
c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t));
c->lge_fraglen = m_new->m_len;
c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
c->lge_sts = 0;
/*
* Put this buffer in the RX command FIFO. To do this,
* we just write the physical address of the descriptor
* into the RX descriptor address registers. Note that
* there are two registers, one high DWORD and one low
* DWORD, which lets us specify a 64-bit address if
* desired. We only use a 32-bit address for now.
* Writing to the low DWORD register is what actually
* causes the command to be issued, so we do that
* last.
*/
CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c));
LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
return(0);
}
static int
lge_alloc_jumbo_mem(struct lge_softc *sc)
{
caddr_t ptr;
int i;
struct lge_jpool_entry *entry;
/* Grab a big chunk o' storage. */
sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF,
M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
if (sc->lge_cdata.lge_jumbo_buf == NULL) {
device_printf(sc->lge_dev, "no memory for jumbo buffers!\n");
return(ENOBUFS);
}
SLIST_INIT(&sc->lge_jfree_listhead);
SLIST_INIT(&sc->lge_jinuse_listhead);
/*
* Now divide it up into 9K pieces and save the addresses
* in an array.
*/
ptr = sc->lge_cdata.lge_jumbo_buf;
for (i = 0; i < LGE_JSLOTS; i++) {
sc->lge_cdata.lge_jslots[i] = ptr;
ptr += LGE_JLEN;
entry = malloc(sizeof(struct lge_jpool_entry),
M_DEVBUF, M_NOWAIT);
if (entry == NULL) {
device_printf(sc->lge_dev, "no memory for jumbo "
"buffer queue!\n");
return(ENOBUFS);
}
entry->slot = i;
SLIST_INSERT_HEAD(&sc->lge_jfree_listhead,
entry, jpool_entries);
}
return(0);
}
static void
lge_free_jumbo_mem(struct lge_softc *sc)
{
struct lge_jpool_entry *entry;
if (sc->lge_cdata.lge_jumbo_buf == NULL)
return;
while ((entry = SLIST_FIRST(&sc->lge_jinuse_listhead))) {
device_printf(sc->lge_dev,
"asked to free buffer that is in use!\n");
SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry,
jpool_entries);
}
while (!SLIST_EMPTY(&sc->lge_jfree_listhead)) {
entry = SLIST_FIRST(&sc->lge_jfree_listhead);
SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
free(entry, M_DEVBUF);
}
contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF);
return;
}
/*
* Allocate a jumbo buffer.
*/
static void *
lge_jalloc(struct lge_softc *sc)
{
struct lge_jpool_entry *entry;
entry = SLIST_FIRST(&sc->lge_jfree_listhead);
if (entry == NULL) {
#ifdef LGE_VERBOSE
device_printf(sc->lge_dev, "no free jumbo buffers\n");
#endif
return(NULL);
}
SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries);
return(sc->lge_cdata.lge_jslots[entry->slot]);
}
/*
* Release a jumbo buffer.
*/
static void
lge_jfree(struct mbuf *m)
{
struct lge_softc *sc;
int i;
struct lge_jpool_entry *entry;
/* Extract the softc struct pointer. */
sc = m->m_ext.ext_arg1;
if (sc == NULL)
panic("lge_jfree: can't find softc pointer!");
/* calculate the slot this buffer belongs to */
i = ((vm_offset_t)m->m_ext.ext_buf
- (vm_offset_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN;
if ((i < 0) || (i >= LGE_JSLOTS))
panic("lge_jfree: asked to free buffer that we don't manage!");
entry = SLIST_FIRST(&sc->lge_jinuse_listhead);
if (entry == NULL)
panic("lge_jfree: buffer not in use!");
entry->slot = i;
SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static void
lge_rxeof(struct lge_softc *sc, int cnt)
{
struct mbuf *m;
if_t ifp;
struct lge_rx_desc *cur_rx;
int c, i, total_len = 0;
u_int32_t rxsts, rxctl;
ifp = sc->lge_ifp;
/* Find out how many frames were processed. */
c = cnt;
i = sc->lge_cdata.lge_rx_cons;
/* Suck them in. */
while(c) {
struct mbuf *m0 = NULL;
cur_rx = &sc->lge_ldata->lge_rx_list[i];
rxctl = cur_rx->lge_ctl;
rxsts = cur_rx->lge_sts;
m = cur_rx->lge_mbuf;
cur_rx->lge_mbuf = NULL;
total_len = LGE_RXBYTES(cur_rx);
LGE_INC(i, LGE_RX_LIST_CNT);
c--;
/*
* If an error occurs, update stats, clear the
* status word and leave the mbuf cluster in place:
* it should simply get re-used next time this descriptor
* comes up in the ring.
*/
if (rxctl & LGE_RXCTL_ERRMASK) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
lge_newbuf(sc, &LGE_RXTAIL(sc), m);
continue;
}
if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
ifp, NULL);
lge_newbuf(sc, &LGE_RXTAIL(sc), m);
if (m0 == NULL) {
device_printf(sc->lge_dev, "no receive buffers "
"available -- packet dropped!\n");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
m = m0;
} else {
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = total_len;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
/* Do IP checksum checking. */
if (rxsts & LGE_RXSTS_ISIP)
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if (!(rxsts & LGE_RXSTS_IPCSUMERR))
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((rxsts & LGE_RXSTS_ISTCP &&
!(rxsts & LGE_RXSTS_TCPCSUMERR)) ||
(rxsts & LGE_RXSTS_ISUDP &&
!(rxsts & LGE_RXSTS_UDPCSUMERR))) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
LGE_UNLOCK(sc);
if_input(ifp, m);
LGE_LOCK(sc);
}
sc->lge_cdata.lge_rx_cons = i;
return;
}
static void
lge_rxeoc(struct lge_softc *sc)
{
if_t ifp;
ifp = sc->lge_ifp;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
lge_init_locked(sc);
return;
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
lge_txeof(struct lge_softc *sc)
{
struct lge_tx_desc *cur_tx = NULL;
if_t ifp;
u_int32_t idx, txdone;
ifp = sc->lge_ifp;
/* Clear the timeout timer. */
sc->lge_timer = 0;
/*
* Go through our tx list and free mbufs for those
* frames that have been transmitted.
*/
idx = sc->lge_cdata.lge_tx_cons;
txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
cur_tx = &sc->lge_ldata->lge_tx_list[idx];
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if (cur_tx->lge_mbuf != NULL) {
m_freem(cur_tx->lge_mbuf);
cur_tx->lge_mbuf = NULL;
}
cur_tx->lge_ctl = 0;
txdone--;
LGE_INC(idx, LGE_TX_LIST_CNT);
sc->lge_timer = 0;
}
sc->lge_cdata.lge_tx_cons = idx;
if (cur_tx != NULL)
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
return;
}
static void
lge_tick(void *xsc)
{
struct lge_softc *sc;
struct mii_data *mii;
if_t ifp;
sc = xsc;
ifp = sc->lge_ifp;
LGE_LOCK_ASSERT(sc);
CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, CSR_READ_4(sc, LGE_STATSVAL));
CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, CSR_READ_4(sc, LGE_STATSVAL));
if (!sc->lge_link) {
mii = device_get_softc(sc->lge_miibus);
mii_tick(mii);
if (mii->mii_media_status & IFM_ACTIVE &&
IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
sc->lge_link++;
if (bootverbose &&
(IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX||
IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T))
device_printf(sc->lge_dev, "gigabit link up\n");
if (!if_sendq_empty(ifp))
lge_start_locked(ifp);
}
}
if (sc->lge_timer != 0 && --sc->lge_timer == 0)
lge_watchdog(sc);
callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc);
return;
}
static void
lge_intr(void *arg)
{
struct lge_softc *sc;
if_t ifp;
u_int32_t status;
sc = arg;
ifp = sc->lge_ifp;
LGE_LOCK(sc);
/* Suppress unwanted interrupts */
if (!(if_getflags(ifp) & IFF_UP)) {
lge_stop(sc);
LGE_UNLOCK(sc);
return;
}
for (;;) {
/*
* Reading the ISR register clears all interrupts, and
* clears the 'interrupts enabled' bit in the IMR
* register.
*/
status = CSR_READ_4(sc, LGE_ISR);
if ((status & LGE_INTRS) == 0)
break;
if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
lge_txeof(sc);
if (status & LGE_ISR_RXDMA_DONE)
lge_rxeof(sc, LGE_RX_DMACNT(status));
if (status & LGE_ISR_RXCMDFIFO_EMPTY)
lge_rxeoc(sc);
if (status & LGE_ISR_PHY_INTR) {
sc->lge_link = 0;
callout_stop(&sc->lge_stat_callout);
lge_tick(sc);
}
}
/* Re-enable interrupts. */
CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
if (!if_sendq_empty(ifp))
lge_start_locked(ifp);
LGE_UNLOCK(sc);
return;
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
lge_encap(struct lge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
{
struct lge_frag *f = NULL;
struct lge_tx_desc *cur_tx;
struct mbuf *m;
int frag = 0, tot_len = 0;
/*
* Start packing the mbufs in this chain into
* the fragment pointers. Stop when we run out
* of fragments or hit the end of the mbuf chain.
*/
m = m_head;
cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
frag = 0;
for (m = m_head; m != NULL; m = m->m_next) {
if (m->m_len != 0) {
tot_len += m->m_len;
f = &cur_tx->lge_frags[frag];
f->lge_fraglen = m->m_len;
f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t));
f->lge_fragptr_hi = 0;
frag++;
}
}
if (m != NULL)
return(ENOBUFS);
cur_tx->lge_mbuf = m_head;
cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
LGE_INC((*txidx), LGE_TX_LIST_CNT);
/* Queue for transmit */
CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx));
return(0);
}
/*
* Main transmit routine. To avoid having to do mbuf copies, we put pointers
* to the mbuf data regions directly in the transmit lists. We also save a
* copy of the pointers since the transmit list fragment pointers are
* physical addresses.
*/
static void
lge_start(if_t ifp)
{
struct lge_softc *sc;
sc = if_getsoftc(ifp);
LGE_LOCK(sc);
lge_start_locked(ifp);
LGE_UNLOCK(sc);
}
static void
lge_start_locked(if_t ifp)
{
struct lge_softc *sc;
struct mbuf *m_head = NULL;
u_int32_t idx;
sc = if_getsoftc(ifp);
if (!sc->lge_link)
return;
idx = sc->lge_cdata.lge_tx_prod;
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
return;
while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0)
break;
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
if (lge_encap(sc, m_head, &idx)) {
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, m_head);
}
sc->lge_cdata.lge_tx_prod = idx;
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->lge_timer = 5;
return;
}
static void
lge_init(void *xsc)
{
struct lge_softc *sc = xsc;
LGE_LOCK(sc);
lge_init_locked(sc);
LGE_UNLOCK(sc);
}
static void
lge_init_locked(struct lge_softc *sc)
{
if_t ifp = sc->lge_ifp;
LGE_LOCK_ASSERT(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
lge_stop(sc);
lge_reset(sc);
/* Set MAC address */
CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&if_getlladdr(sc->lge_ifp)[0]));
CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&if_getlladdr(sc->lge_ifp)[4]));
/* Init circular RX list. */
if (lge_list_rx_init(sc) == ENOBUFS) {
device_printf(sc->lge_dev, "initialization failed: no "
"memory for rx buffers\n");
lge_stop(sc);
return;
}
/*
* Init tx descriptors.
*/
lge_list_tx_init(sc);
/* Set initial value for MODE1 register. */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST|
LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD|
LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0|
LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2);
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & IFF_PROMISC) {
CSR_WRITE_4(sc, LGE_MODE1,
LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC);
} else {
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
}
/*
* Set the capture broadcast bit to capture broadcast frames.
*/
if (if_getflags(ifp) & IFF_BROADCAST) {
CSR_WRITE_4(sc, LGE_MODE1,
LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST);
} else {
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
}
/* Packet padding workaround? */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
/* No error frames */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
/* Receive large frames */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS);
/* Workaround: disable RX/TX flow control */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
/* Make sure to strip CRC from received frames */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
/* Turn off magic packet mode */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
/* Turn off all VLAN stuff */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX|
LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT);
/* Workarond: FIFO overflow */
CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
/*
* Load the multicast filter.
*/
lge_setmulti(sc);
/*
* Enable hardware checksum validation for all received IPv4
* packets, do not reject packets with bad checksums.
*/
CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM|
LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM|
LGE_MODE2_RX_ERRCSUM);
/*
* Enable the delivery of PHY interrupts based on
* link/speed/duplex status chalges.
*/
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL);
/* Enable receiver and transmitter. */
CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB);
CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB);
/*
* Enable interrupts.
*/
CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|
LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS);
lge_ifmedia_upd_locked(ifp);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc);
return;
}
/*
* Set media options.
*/
static int
lge_ifmedia_upd(if_t ifp)
{
struct lge_softc *sc;
sc = if_getsoftc(ifp);
LGE_LOCK(sc);
lge_ifmedia_upd_locked(ifp);
LGE_UNLOCK(sc);
return(0);
}
static void
lge_ifmedia_upd_locked(if_t ifp)
{
struct lge_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
sc = if_getsoftc(ifp);
LGE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->lge_miibus);
sc->lge_link = 0;
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
mii_mediachg(mii);
}
/*
* Report current media status.
*/
static void
lge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct lge_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
LGE_LOCK(sc);
mii = device_get_softc(sc->lge_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
LGE_UNLOCK(sc);
return;
}
static int
lge_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct lge_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int error = 0;
switch(command) {
case SIOCSIFMTU:
LGE_LOCK(sc);
if (ifr->ifr_mtu > LGE_JUMBO_MTU)
error = EINVAL;
else
if_setmtu(ifp, ifr->ifr_mtu);
LGE_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
LGE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
if_getflags(ifp) & IFF_PROMISC &&
!(sc->lge_if_flags & IFF_PROMISC)) {
CSR_WRITE_4(sc, LGE_MODE1,
LGE_MODE1_SETRST_CTL1|
LGE_MODE1_RX_PROMISC);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
!(if_getflags(ifp) & IFF_PROMISC) &&
sc->lge_if_flags & IFF_PROMISC) {
CSR_WRITE_4(sc, LGE_MODE1,
LGE_MODE1_RX_PROMISC);
} else {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
lge_init_locked(sc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
lge_stop(sc);
}
sc->lge_if_flags = if_getflags(ifp);
LGE_UNLOCK(sc);
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
LGE_LOCK(sc);
lge_setmulti(sc);
LGE_UNLOCK(sc);
error = 0;
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->lge_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return(error);
}
static void
lge_watchdog(struct lge_softc *sc)
{
if_t ifp;
LGE_LOCK_ASSERT(sc);
ifp = sc->lge_ifp;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_printf(ifp, "watchdog timeout\n");
lge_stop(sc);
lge_reset(sc);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
lge_init_locked(sc);
if (!if_sendq_empty(ifp))
lge_start_locked(ifp);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
lge_stop(struct lge_softc *sc)
{
int i;
if_t ifp;
LGE_LOCK_ASSERT(sc);
ifp = sc->lge_ifp;
sc->lge_timer = 0;
callout_stop(&sc->lge_stat_callout);
CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
/* Disable receiver and transmitter. */
CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
sc->lge_link = 0;
/*
* Free data in the RX lists.
*/
for (i = 0; i < LGE_RX_LIST_CNT; i++) {
if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
}
}
bzero((char *)&sc->lge_ldata->lge_rx_list,
sizeof(sc->lge_ldata->lge_rx_list));
/*
* Free the TX list buffers.
*/
for (i = 0; i < LGE_TX_LIST_CNT; i++) {
if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
}
}
bzero((char *)&sc->lge_ldata->lge_tx_list,
sizeof(sc->lge_ldata->lge_tx_list));
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
return;
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
lge_shutdown(device_t dev)
{
struct lge_softc *sc;
sc = device_get_softc(dev);
LGE_LOCK(sc);
lge_reset(sc);
lge_stop(sc);
LGE_UNLOCK(sc);
return (0);
}
diff --git a/sys/dev/liquidio/lio_main.c b/sys/dev/liquidio/lio_main.c
index aa52612dda04..7104ff07674f 100644
--- a/sys/dev/liquidio/lio_main.c
+++ b/sys/dev/liquidio/lio_main.c
@@ -1,2305 +1,2300 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_ctrl.h"
#include "lio_main.h"
#include "lio_network.h"
#include "cn23xx_pf_device.h"
#include "lio_image.h"
#include "lio_ioctl.h"
#include "lio_rxtx.h"
#include "lio_rss.h"
/* Number of milliseconds to wait for DDR initialization */
#define LIO_DDR_TIMEOUT 10000
#define LIO_MAX_FW_TYPE_LEN 8
static char fw_type[LIO_MAX_FW_TYPE_LEN];
TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
/*
* Integers that specify number of queues per PF.
* Valid range is 0 to 64.
* Use 0 to derive from CPU count.
*/
static int num_queues_per_pf0;
static int num_queues_per_pf1;
TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
#ifdef RSS
static int lio_rss = 1;
TUNABLE_INT("hw.lio.rss", &lio_rss);
#endif /* RSS */
/* Hardware LRO */
unsigned int lio_hwlro = 0;
TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
/*
* Bitmask indicating which consoles have debug
* output redirected to syslog.
*/
static unsigned long console_bitmask;
TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
/*
* \brief determines if a given console has debug enabled.
* @param console console to check
* @returns 1 = enabled. 0 otherwise
*/
int
lio_console_debug_enabled(uint32_t console)
{
return (console_bitmask >> (console)) & 0x1;
}
static int lio_detach(device_t dev);
static int lio_device_init(struct octeon_device *octeon_dev);
static int lio_chip_specific_setup(struct octeon_device *oct);
static void lio_watchdog(void *param);
static int lio_load_firmware(struct octeon_device *oct);
static int lio_nic_starter(struct octeon_device *oct);
static int lio_init_nic_module(struct octeon_device *oct);
static int lio_setup_nic_devices(struct octeon_device *octeon_dev);
static int lio_link_info(struct lio_recv_info *recv_info, void *ptr);
static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
void *buf);
static int lio_set_rxcsum_command(if_t ifp, int command,
uint8_t rx_cmd);
static int lio_setup_glists(struct octeon_device *oct, struct lio *lio,
int num_iqs);
static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
static inline void lio_update_link_status(if_t ifp,
union octeon_link_status *ls);
static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
static int lio_stop_nic_module(struct octeon_device *oct);
static void lio_destroy_resources(struct octeon_device *oct);
static int lio_setup_rx_oom_poll_fn(if_t ifp);
static void lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid);
static void lio_vlan_rx_kill_vid(void *arg, if_t ifp,
uint16_t vid);
static struct octeon_device *
lio_get_other_octeon_device(struct octeon_device *oct);
static int lio_wait_for_oq_pkts(struct octeon_device *oct);
int lio_send_rss_param(struct lio *lio);
static int lio_dbg_console_print(struct octeon_device *oct,
uint32_t console_num, char *prefix,
char *suffix);
/* Polling interval for determining when NIC application is alive */
#define LIO_STARTER_POLL_INTERVAL_MS 100
/*
* vendor_info_array.
* This array contains the list of IDs on which the driver should load.
*/
struct lio_vendor_info {
uint16_t vendor_id;
uint16_t device_id;
uint16_t subdevice_id;
uint8_t revision_id;
uint8_t index;
};
static struct lio_vendor_info lio_pci_tbl[] = {
/* CN2350 10G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
0x02, 0},
/* CN2350 10G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
0x02, 0},
/* CN2360 10G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
0x02, 1},
/* CN2350 25G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
0x02, 2},
/* CN2360 25G */
{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
0x02, 3},
{0, 0, 0, 0, 0}
};
static char *lio_strings[] = {
"LiquidIO 2350 10GbE Server Adapter",
"LiquidIO 2360 10GbE Server Adapter",
"LiquidIO 2350 25GbE Server Adapter",
"LiquidIO 2360 25GbE Server Adapter",
};
struct lio_if_cfg_resp {
uint64_t rh;
struct octeon_if_cfg_info cfg_info;
uint64_t status;
};
struct lio_if_cfg_context {
int octeon_id;
volatile int cond;
};
struct lio_rx_ctl_context {
int octeon_id;
volatile int cond;
};
static int
lio_probe(device_t dev)
{
struct lio_vendor_info *tbl;
uint16_t vendor_id;
uint16_t device_id;
uint16_t subdevice_id;
uint8_t revision_id;
vendor_id = pci_get_vendor(dev);
if (vendor_id != PCI_VENDOR_ID_CAVIUM)
return (ENXIO);
device_id = pci_get_device(dev);
subdevice_id = pci_get_subdevice(dev);
revision_id = pci_get_revid(dev);
tbl = lio_pci_tbl;
while (tbl->vendor_id) {
if ((vendor_id == tbl->vendor_id) &&
(device_id == tbl->device_id) &&
(subdevice_id == tbl->subdevice_id) &&
(revision_id == tbl->revision_id)) {
device_set_descf(dev, "%s, Version - %s",
lio_strings[tbl->index], LIO_VERSION);
return (BUS_PROBE_DEFAULT);
}
tbl++;
}
return (ENXIO);
}
static int
lio_attach(device_t device)
{
struct octeon_device *oct_dev = NULL;
uint64_t scratch1;
uint32_t error;
int timeout, ret = 1;
uint8_t bus, dev, function;
oct_dev = lio_allocate_device(device);
if (oct_dev == NULL) {
device_printf(device, "Error: Unable to allocate device\n");
return (-ENOMEM);
}
oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
oct_dev->device = device;
bus = pci_get_bus(device);
dev = pci_get_slot(device);
function = pci_get_function(device);
lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
pci_get_vendor(device), pci_get_device(device), bus, dev,
function);
if (lio_device_init(oct_dev)) {
lio_dev_err(oct_dev, "Failed to init device\n");
lio_detach(device);
return (-ENOMEM);
}
scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
if (!(scratch1 & 4ULL)) {
/*
* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
* the lio watchdog kernel thread is running for this
* NIC. Each NIC gets one watchdog kernel thread.
*/
scratch1 |= 4ULL;
lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
error = kproc_create(lio_watchdog, oct_dev,
&oct_dev->watchdog_task, 0, 0,
"liowd/%02hhx:%02hhx.%hhx", bus,
dev, function);
if (!error) {
kproc_resume(oct_dev->watchdog_task);
} else {
oct_dev->watchdog_task = NULL;
lio_dev_err(oct_dev,
"failed to create kernel_thread\n");
lio_detach(device);
return (-1);
}
}
oct_dev->rx_pause = 1;
oct_dev->tx_pause = 1;
timeout = 0;
while (timeout < LIO_NIC_STARTER_TIMEOUT) {
lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
timeout += LIO_STARTER_POLL_INTERVAL_MS;
/*
* During the boot process interrupts are not available.
* So polling for first control message from FW.
*/
if (cold)
lio_droq_bh(oct_dev->droq[0], 0);
if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
ret = lio_nic_starter(oct_dev);
break;
}
}
if (ret) {
lio_dev_err(oct_dev, "Firmware failed to start\n");
lio_detach(device);
return (-EIO);
}
lio_dev_dbg(oct_dev, "Device is ready\n");
return (0);
}
static int
lio_detach(device_t dev)
{
struct octeon_device *oct_dev = device_get_softc(dev);
lio_dev_dbg(oct_dev, "Stopping device\n");
if (oct_dev->watchdog_task) {
uint64_t scratch1;
kproc_suspend(oct_dev->watchdog_task, 0);
scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
scratch1 &= ~4ULL;
lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
}
if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
lio_stop_nic_module(oct_dev);
/*
* Reset the octeon device and cleanup all memory allocated for
* the octeon device by driver.
*/
lio_destroy_resources(oct_dev);
lio_dev_info(oct_dev, "Device removed\n");
/*
* This octeon device has been removed. Update the global
* data structure to reflect this. Free the device structure.
*/
lio_free_device_mem(oct_dev);
return (0);
}
static int
lio_shutdown(device_t dev)
{
struct octeon_device *oct_dev = device_get_softc(dev);
struct lio *lio = if_getsoftc(oct_dev->props.ifp);
lio_send_rx_ctrl_cmd(lio, 0);
return (0);
}
static int
lio_suspend(device_t dev)
{
return (ENXIO);
}
static int
lio_resume(device_t dev)
{
return (ENXIO);
}
static int
lio_event(struct module *mod, int event, void *junk)
{
switch (event) {
case MOD_LOAD:
lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
break;
default:
break;
}
return (0);
}
/*********************************************************************
* FreeBSD Device Interface Entry Points
* *******************************************************************/
static device_method_t lio_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, lio_probe),
DEVMETHOD(device_attach, lio_attach),
DEVMETHOD(device_detach, lio_detach),
DEVMETHOD(device_shutdown, lio_shutdown),
DEVMETHOD(device_suspend, lio_suspend),
DEVMETHOD(device_resume, lio_resume),
DEVMETHOD_END
};
static driver_t lio_driver = {
LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
};
DRIVER_MODULE(lio, pci, lio_driver, lio_event, NULL);
MODULE_DEPEND(lio, pci, 1, 1, 1);
MODULE_DEPEND(lio, ether, 1, 1, 1);
MODULE_DEPEND(lio, firmware, 1, 1, 1);
static bool
fw_type_is_none(void)
{
return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
}
/*
* \brief Device initialization for each Octeon device that is probed
* @param octeon_dev octeon device
*/
static int
lio_device_init(struct octeon_device *octeon_dev)
{
unsigned long ddr_timeout = LIO_DDR_TIMEOUT;
char *dbg_enb = NULL;
int fw_loaded = 0;
int i, j, ret;
uint8_t bus, dev, function;
char bootcmd[] = "\n";
bus = pci_get_bus(octeon_dev->device);
dev = pci_get_slot(octeon_dev->device);
function = pci_get_function(octeon_dev->device);
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
/* Enable access to the octeon device */
if (pci_enable_busmaster(octeon_dev->device)) {
lio_dev_err(octeon_dev, "pci_enable_device failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
/* Identify the Octeon type and map the BAR address space. */
if (lio_chip_specific_setup(octeon_dev)) {
lio_dev_err(octeon_dev, "Chip specific setup failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
/*
* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
* since that is what is required for the reference to be removed
* during de-initialization (see 'octeon_destroy_resources').
*/
lio_register_device(octeon_dev, bus, dev, function, true);
octeon_dev->app_mode = LIO_DRV_INVALID_APP;
if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
fw_loaded = 0;
/* Do a soft reset of the Octeon device. */
if (octeon_dev->fn_list.soft_reset(octeon_dev))
return (1);
/* things might have changed */
if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
fw_loaded = 0;
else
fw_loaded = 1;
} else {
fw_loaded = 1;
}
/*
* Initialize the dispatch mechanism used to push packets arriving on
* Octeon Output queues.
*/
if (lio_init_dispatch_list(octeon_dev))
return (1);
lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
lio_core_drv_init, octeon_dev);
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev,
"Failed to configure device registers\n");
return (ret);
}
/* Initialize soft command buffer pool */
if (lio_setup_sc_buffer_pool(octeon_dev)) {
lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_SC_BUFF_POOL_INIT_DONE);
if (lio_allocate_ioq_vector(octeon_dev)) {
lio_dev_err(octeon_dev,
"IOQ vector allocation failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
octeon_dev->instr_queue[i] =
malloc(sizeof(struct lio_instr_queue),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (octeon_dev->instr_queue[i] == NULL)
return (1);
}
/* Setup the data structures that manage this Octeon's Input queues. */
if (lio_setup_instr_queue0(octeon_dev)) {
lio_dev_err(octeon_dev,
"Instruction queue initialization failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_INSTR_QUEUE_INIT_DONE);
/*
* Initialize lists to manage the requests of different types that
* arrive from user & kernel applications for this octeon device.
*/
if (lio_setup_response_list(octeon_dev)) {
lio_dev_err(octeon_dev, "Response list allocation failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (octeon_dev->droq[i] == NULL)
return (1);
}
if (lio_setup_output_queue0(octeon_dev)) {
lio_dev_err(octeon_dev, "Output queue initialization failed\n");
return (1);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
/*
* Setup the interrupt handler and record the INT SUM register address
*/
if (lio_setup_interrupt(octeon_dev,
octeon_dev->sriov_info.num_pf_rings))
return (1);
/* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
/*
* Send Credit for Octeon Output queues. Credits are always sent BEFORE
* the output queue is enabled.
* This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
* case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
* Otherwise, it is possible that the DRV_ACTIVE message will be sent
* before any credits have been issued, causing the ring to be reset
* (and the f/w appear to never have started).
*/
for (j = 0; j < octeon_dev->num_oqs; j++)
lio_write_csr32(octeon_dev,
octeon_dev->droq[j]->pkts_credit_reg,
octeon_dev->droq[j]->max_count);
/* Enable the input and output queues for this Octeon device */
ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev, "Failed to enable input/output queues");
return (ret);
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
if (!fw_loaded) {
lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
if (!ddr_timeout) {
lio_dev_info(octeon_dev,
"WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
}
lio_sleep_timeout(LIO_RESET_MSECS);
/*
* Wait for the octeon to initialize DDR after the
* soft-reset.
*/
while (!ddr_timeout) {
if (pause("-", lio_ms_to_ticks(100))) {
/* user probably pressed Control-C */
return (1);
}
}
ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
if (ret) {
lio_dev_err(octeon_dev,
"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
ret);
return (1);
}
if (lio_wait_for_bootloader(octeon_dev, 1100)) {
lio_dev_err(octeon_dev, "Board not responding\n");
return (1);
}
/* Divert uboot to take commands from host instead. */
ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
lio_dev_dbg(octeon_dev, "Initializing consoles\n");
ret = lio_init_consoles(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev, "Could not access board consoles\n");
return (1);
}
/*
* If console debug enabled, specify empty string to
* use default enablement ELSE specify NULL string for
* 'disabled'.
*/
dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
ret = lio_add_console(octeon_dev, 0, dbg_enb);
if (ret) {
lio_dev_err(octeon_dev, "Could not access board console\n");
return (1);
} else if (lio_console_debug_enabled(0)) {
/*
* If console was added AND we're logging console output
* then set our console print function.
*/
octeon_dev->console[0].print = lio_dbg_console_print;
}
atomic_store_rel_int(&octeon_dev->status,
LIO_DEV_CONSOLE_INIT_DONE);
lio_dev_dbg(octeon_dev, "Loading firmware\n");
ret = lio_load_firmware(octeon_dev);
if (ret) {
lio_dev_err(octeon_dev, "Could not load firmware to board\n");
return (1);
}
}
atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
return (0);
}
/*
* \brief PCI FLR for each Octeon device.
* @param oct octeon device
*/
static void
lio_pci_flr(struct octeon_device *oct)
{
uint32_t exppos, status;
pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
pci_save_state(oct->device);
/* Quiesce the device completely */
pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
/* Wait for Transaction Pending bit clean */
lio_mdelay(100);
status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
if (status & PCIEM_STA_TRANSACTION_PND) {
lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
lio_mdelay(5);
status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
if (status & PCIEM_STA_TRANSACTION_PND)
lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
}
pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
lio_mdelay(100);
pci_restore_state(oct->device);
}
/*
* \brief Debug console print function
* @param octeon_dev octeon device
* @param console_num console number
* @param prefix first portion of line to display
* @param suffix second portion of line to display
*
* The OCTEON debug console outputs entire lines (excluding '\n').
* Normally, the line will be passed in the 'prefix' parameter.
* However, due to buffering, it is possible for a line to be split into two
* parts, in which case they will be passed as the 'prefix' parameter and
* 'suffix' parameter.
*/
static int
lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
char *prefix, char *suffix)
{
if (prefix != NULL && suffix != NULL)
lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
else if (prefix != NULL)
lio_dev_info(oct, "%u: %s\n", console_num, prefix);
else if (suffix != NULL)
lio_dev_info(oct, "%u: %s\n", console_num, suffix);
return (0);
}
static void
lio_watchdog(void *param)
{
int core_num;
uint16_t mask_of_crashed_or_stuck_cores = 0;
struct octeon_device *oct = param;
bool err_msg_was_printed[12];
bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
while (1) {
kproc_suspend_check(oct->watchdog_task);
mask_of_crashed_or_stuck_cores =
(uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
if (mask_of_crashed_or_stuck_cores) {
struct octeon_device *other_oct;
oct->cores_crashed = true;
other_oct = lio_get_other_octeon_device(oct);
if (other_oct != NULL)
other_oct->cores_crashed = true;
for (core_num = 0; core_num < LIO_MAX_CORES;
core_num++) {
bool core_crashed_or_got_stuck;
core_crashed_or_got_stuck =
(mask_of_crashed_or_stuck_cores >>
core_num) & 1;
if (core_crashed_or_got_stuck &&
!err_msg_was_printed[core_num]) {
lio_dev_err(oct,
"ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
core_num);
err_msg_was_printed[core_num] = true;
}
}
}
/* sleep for two seconds */
pause("-", lio_ms_to_ticks(2000));
}
}
static int
lio_chip_specific_setup(struct octeon_device *oct)
{
char *s;
uint32_t dev_id;
int ret = 1;
dev_id = lio_read_pci_cfg(oct, 0);
oct->subdevice_id = pci_get_subdevice(oct->device);
switch (dev_id) {
case LIO_CN23XX_PF_PCIID:
oct->chip_id = LIO_CN23XX_PF_VID;
if (pci_get_function(oct->device) == 0) {
if (num_queues_per_pf0 < 0) {
lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
num_queues_per_pf0);
num_queues_per_pf0 = 0;
}
oct->sriov_info.num_pf_rings = num_queues_per_pf0;
} else {
if (num_queues_per_pf1 < 0) {
lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
num_queues_per_pf1);
num_queues_per_pf1 = 0;
}
oct->sriov_info.num_pf_rings = num_queues_per_pf1;
}
ret = lio_cn23xx_pf_setup_device(oct);
s = "CN23XX";
break;
default:
s = "?";
lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
}
if (!ret)
lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
lio_get_conf(oct)->card_name, LIO_VERSION);
return (ret);
}
static struct octeon_device *
lio_get_other_octeon_device(struct octeon_device *oct)
{
struct octeon_device *other_oct;
other_oct = lio_get_device(oct->octeon_id + 1);
if ((other_oct != NULL) && other_oct->device) {
int oct_busnum, other_oct_busnum;
oct_busnum = pci_get_bus(oct->device);
other_oct_busnum = pci_get_bus(other_oct->device);
if (oct_busnum == other_oct_busnum) {
int oct_slot, other_oct_slot;
oct_slot = pci_get_slot(oct->device);
other_oct_slot = pci_get_slot(other_oct->device);
if (oct_slot == other_oct_slot)
return (other_oct);
}
}
return (NULL);
}
/*
* \brief Load firmware to device
* @param oct octeon device
*
* Maps device to firmware filename, requests firmware, and downloads it
*/
static int
lio_load_firmware(struct octeon_device *oct)
{
const struct firmware *fw;
char *tmp_fw_type = NULL;
int ret = 0;
char fw_name[LIO_MAX_FW_FILENAME_LEN];
if (fw_type[0] == '\0')
tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
else
tmp_fw_type = fw_type;
sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
fw = firmware_get(fw_name);
if (fw == NULL) {
lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
fw_name);
return (EINVAL);
}
ret = lio_download_firmware(oct, fw->data, fw->datasize);
firmware_put(fw, FIRMWARE_UNLOAD);
return (ret);
}
static int
lio_nic_starter(struct octeon_device *oct)
{
int ret = 0;
atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
if (lio_init_nic_module(oct)) {
lio_dev_err(oct, "NIC initialization failed\n");
ret = -1;
#ifdef CAVIUM_ONiLY_23XX_VF
} else {
if (octeon_enable_sriov(oct) < 0)
ret = -1;
#endif
}
} else {
lio_dev_err(oct,
"Unexpected application running on NIC (%d). Check firmware.\n",
oct->app_mode);
ret = -1;
}
return (ret);
}
static int
lio_init_nic_module(struct octeon_device *oct)
{
int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
int retval = 0;
lio_dev_dbg(oct, "Initializing network interfaces\n");
/*
* only default iq and oq were initialized
* initialize the rest as well
*/
/* run port_config command for each port */
oct->ifcount = num_nic_ports;
bzero(&oct->props, sizeof(struct lio_if_props));
oct->props.gmxport = -1;
retval = lio_setup_nic_devices(oct);
if (retval) {
lio_dev_err(oct, "Setup NIC devices failed\n");
goto lio_init_failure;
}
lio_dev_dbg(oct, "Network interfaces ready\n");
return (retval);
lio_init_failure:
oct->ifcount = 0;
return (retval);
}
static int
lio_ifmedia_update(if_t ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct ifmedia *ifm;
ifm = &lio->ifmedia;
/* We only support Ethernet media type. */
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
break;
case IFM_10G_CX4:
case IFM_10G_SR:
case IFM_10G_T:
case IFM_10G_TWINAX:
default:
/* We don't support changing the media type. */
lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
IFM_SUBTYPE(ifm->ifm_media));
return (EINVAL);
}
return (0);
}
static int
lio_get_media_subtype(struct octeon_device *oct)
{
switch(oct->subdevice_id) {
case LIO_CN2350_10G_SUBDEVICE:
case LIO_CN2350_10G_SUBDEVICE1:
case LIO_CN2360_10G_SUBDEVICE:
return (IFM_10G_SR);
case LIO_CN2350_25G_SUBDEVICE:
case LIO_CN2360_25G_SUBDEVICE:
return (IFM_25G_SR);
}
return (IFM_10G_SR);
}
static uint64_t
lio_get_baudrate(struct octeon_device *oct)
{
switch(oct->subdevice_id) {
case LIO_CN2350_10G_SUBDEVICE:
case LIO_CN2350_10G_SUBDEVICE1:
case LIO_CN2360_10G_SUBDEVICE:
return (IF_Gbps(10));
case LIO_CN2350_25G_SUBDEVICE:
case LIO_CN2360_25G_SUBDEVICE:
return (IF_Gbps(25));
}
return (IF_Gbps(10));
}
static void
lio_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
{
struct lio *lio = if_getsoftc(ifp);
/* Report link down if the driver isn't running. */
if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
ifmr->ifm_active |= IFM_NONE;
return;
}
/* Setup the default interface info. */
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (lio->linfo.link.s.link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
} else {
ifmr->ifm_active |= IFM_NONE;
return;
}
ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
if (lio->linfo.link.s.duplex)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
}
static uint64_t
lio_get_counter(if_t ifp, ift_counter cnt)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
uint64_t counter = 0;
int i, q_no;
switch (cnt) {
case IFCOUNTER_IPACKETS:
for (i = 0; i < oct->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
counter += oct->droq[q_no]->stats.rx_pkts_received;
}
break;
case IFCOUNTER_OPACKETS:
for (i = 0; i < oct->num_iqs; i++) {
q_no = lio->linfo.txpciq[i].s.q_no;
counter += oct->instr_queue[q_no]->stats.tx_done;
}
break;
case IFCOUNTER_IBYTES:
for (i = 0; i < oct->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
counter += oct->droq[q_no]->stats.rx_bytes_received;
}
break;
case IFCOUNTER_OBYTES:
for (i = 0; i < oct->num_iqs; i++) {
q_no = lio->linfo.txpciq[i].s.q_no;
counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
}
break;
case IFCOUNTER_IQDROPS:
for (i = 0; i < oct->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
counter += oct->droq[q_no]->stats.rx_dropped;
}
break;
case IFCOUNTER_OQDROPS:
for (i = 0; i < oct->num_iqs; i++) {
q_no = lio->linfo.txpciq[i].s.q_no;
counter += oct->instr_queue[q_no]->stats.tx_dropped;
}
break;
case IFCOUNTER_IMCASTS:
counter = oct->link_stats.fromwire.total_mcst;
break;
case IFCOUNTER_OMCASTS:
counter = oct->link_stats.fromhost.mcast_pkts_sent;
break;
case IFCOUNTER_COLLISIONS:
counter = oct->link_stats.fromhost.total_collisions;
break;
case IFCOUNTER_IERRORS:
counter = oct->link_stats.fromwire.fcs_err +
oct->link_stats.fromwire.l2_err +
oct->link_stats.fromwire.frame_err;
break;
default:
return (if_get_counter_default(ifp, cnt));
}
return (counter);
}
static int
lio_init_ifnet(struct lio *lio)
{
struct octeon_device *oct = lio->oct_dev;
if_t ifp = lio->ifp;
/* ifconfig entrypoint for media type/status reporting */
ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
lio_ifmedia_status);
/* set the default interface values */
ifmedia_add(&lio->ifmedia,
(IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
0, NULL);
ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
if_initname(ifp, device_get_name(oct->device),
device_get_unit(oct->device));
if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
if_setioctlfn(ifp, lio_ioctl);
if_setgetcounterfn(ifp, lio_get_counter);
if_settransmitfn(ifp, lio_mq_start);
if_setqflushfn(ifp, lio_qflush);
if_setinitfn(ifp, lio_open);
if_setmtu(ifp, lio->linfo.link.s.mtu);
lio->mtu = lio->linfo.link.s.mtu;
if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
IFCAP_TSO | IFCAP_LRO |
IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setbaudrate(ifp, lio_get_baudrate(oct));
return (0);
}
static void
lio_tcp_lro_free(struct octeon_device *octeon_dev, if_t ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct lio_droq *droq;
int q_no;
int i;
for (i = 0; i < octeon_dev->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
droq = octeon_dev->droq[q_no];
if (droq->lro.ifp) {
tcp_lro_free(&droq->lro);
droq->lro.ifp = NULL;
}
}
}
static int
lio_tcp_lro_init(struct octeon_device *octeon_dev, if_t ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct lio_droq *droq;
struct lro_ctrl *lro;
int i, q_no, ret = 0;
for (i = 0; i < octeon_dev->num_oqs; i++) {
q_no = lio->linfo.rxpciq[i].s.q_no;
droq = octeon_dev->droq[q_no];
lro = &droq->lro;
ret = tcp_lro_init(lro);
if (ret) {
lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
ret);
goto lro_init_failed;
}
lro->ifp = ifp;
}
return (ret);
lro_init_failed:
lio_tcp_lro_free(octeon_dev, ifp);
return (ret);
}
static int
lio_setup_nic_devices(struct octeon_device *octeon_dev)
{
union octeon_if_cfg if_cfg;
struct lio *lio = NULL;
if_t ifp = NULL;
struct lio_version *vdata;
struct lio_soft_command *sc;
struct lio_if_cfg_context *ctx;
struct lio_if_cfg_resp *resp;
struct lio_if_props *props;
int num_iqueues, num_oqueues, retval;
unsigned int base_queue;
unsigned int gmx_port_id;
uint32_t ctx_size, data_size;
uint32_t ifidx_or_pfnum, resp_size;
uint8_t mac[ETHER_HDR_LEN], i, j;
/* This is to handle link status changes */
lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
LIO_OPCODE_NIC_INFO,
lio_link_info, octeon_dev);
for (i = 0; i < octeon_dev->ifcount; i++) {
resp_size = sizeof(struct lio_if_cfg_resp);
ctx_size = sizeof(struct lio_if_cfg_context);
data_size = sizeof(struct lio_version);
sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
ctx_size);
if (sc == NULL)
return (ENOMEM);
resp = (struct lio_if_cfg_resp *)sc->virtrptr;
ctx = (struct lio_if_cfg_context *)sc->ctxptr;
vdata = (struct lio_version *)sc->virtdptr;
*((uint64_t *)vdata) = 0;
vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
num_iqueues = octeon_dev->sriov_info.num_pf_rings;
num_oqueues = octeon_dev->sriov_info.num_pf_rings;
base_queue = octeon_dev->sriov_info.pf_srn;
gmx_port_id = octeon_dev->pf_num;
ifidx_or_pfnum = octeon_dev->pf_num;
lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
ifidx_or_pfnum, num_iqueues, num_oqueues);
ctx->cond = 0;
ctx->octeon_id = lio_get_device_id(octeon_dev);
if_cfg.if_cfg64 = 0;
if_cfg.s.num_iqueues = num_iqueues;
if_cfg.s.num_oqueues = num_oqueues;
if_cfg.s.base_queue = base_queue;
if_cfg.s.gmx_port_id = gmx_port_id;
sc->iq_no = 0;
lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
LIO_OPCODE_NIC_IF_CFG, 0,
if_cfg.if_cfg64, 0);
sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc;
sc->wait_time = 3000;
retval = lio_send_soft_command(octeon_dev, sc);
if (retval == LIO_IQ_SEND_FAILED) {
lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
retval);
/* Soft instr is freed by driver in case of failure. */
goto setup_nic_dev_fail;
}
/*
* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
lio_sleep_cond(octeon_dev, &ctx->cond);
retval = resp->status;
if (retval) {
lio_dev_err(octeon_dev, "iq/oq config failed\n");
goto setup_nic_dev_fail;
}
lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
(sizeof(struct octeon_if_cfg_info)) >> 3);
num_iqueues = bitcount64(resp->cfg_info.iqmask);
num_oqueues = bitcount64(resp->cfg_info.oqmask);
if (!(num_iqueues) || !(num_oqueues)) {
lio_dev_err(octeon_dev,
"Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n",
LIO_CAST64(resp->cfg_info.iqmask),
LIO_CAST64(resp->cfg_info.oqmask));
goto setup_nic_dev_fail;
}
lio_dev_dbg(octeon_dev,
"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
i, LIO_CAST64(resp->cfg_info.iqmask),
LIO_CAST64(resp->cfg_info.oqmask),
num_iqueues, num_oqueues);
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- lio_dev_err(octeon_dev, "Device allocation failed\n");
- goto setup_nic_dev_fail;
- }
-
lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
if (lio == NULL) {
lio_dev_err(octeon_dev, "Lio allocation failed\n");
goto setup_nic_dev_fail;
}
if_setsoftc(ifp, lio);
if_sethwtsomax(ifp, LIO_MAX_FRAME_SIZE);
if_sethwtsomaxsegcount(ifp, LIO_MAX_SG);
if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
lio->ifidx = ifidx_or_pfnum;
props = &octeon_dev->props;
props->gmxport = resp->cfg_info.linfo.gmxport;
props->ifp = ifp;
lio->linfo.num_rxpciq = num_oqueues;
lio->linfo.num_txpciq = num_iqueues;
for (j = 0; j < num_oqueues; j++) {
lio->linfo.rxpciq[j].rxpciq64 =
resp->cfg_info.linfo.rxpciq[j].rxpciq64;
}
for (j = 0; j < num_iqueues; j++) {
lio->linfo.txpciq[j].txpciq64 =
resp->cfg_info.linfo.txpciq[j].txpciq64;
}
lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
lio->linfo.link.link_status64 =
resp->cfg_info.linfo.link.link_status64;
/*
* Point to the properties for octeon device to which this
* interface belongs.
*/
lio->oct_dev = octeon_dev;
lio->ifp = ifp;
lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
lio_init_ifnet(lio);
/* 64-bit swap required on LE machines */
lio_swap_8B_data(&lio->linfo.hw_addr, 1);
for (j = 0; j < 6; j++)
mac[j] = *((uint8_t *)(
((uint8_t *)&lio->linfo.hw_addr) + 2 + j));
ether_ifattach(ifp, mac);
/*
* By default all interfaces on a single Octeon uses the same
* tx and rx queues
*/
lio->txq = lio->linfo.txpciq[0].s.q_no;
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
lio->linfo.num_rxpciq)) {
lio_dev_err(octeon_dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail;
}
lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
lio_dev_err(octeon_dev, "Gather list allocation failed\n");
goto setup_nic_dev_fail;
}
if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
goto setup_nic_dev_fail;
if (lio_hwlro &&
(if_getcapenable(ifp) & IFCAP_LRO) &&
(if_getcapenable(ifp) & IFCAP_RXCSUM) &&
(if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
LIO_LROIPV4 | LIO_LROIPV6);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
else
lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
if (lio_setup_rx_oom_poll_fn(ifp))
goto setup_nic_dev_fail;
lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
lio->link_changes++;
lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
/*
* Sending command to firmware to enable Rx checksum offload
* by default at the time of setup of Liquidio driver for
* this device
*/
lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
LIO_CMD_RXCSUM_ENABLE);
lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
LIO_CMD_TXCSUM_ENABLE);
#ifdef RSS
if (lio_rss) {
if (lio_send_rss_param(lio))
goto setup_nic_dev_fail;
} else
#endif /* RSS */
lio_set_feature(ifp, LIO_CMD_SET_FNV,
LIO_CMD_FNV_ENABLE);
lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
lio_free_soft_command(octeon_dev, sc);
lio->vlan_attach =
EVENTHANDLER_REGISTER(vlan_config,
lio_vlan_rx_add_vid, lio,
EVENTHANDLER_PRI_FIRST);
lio->vlan_detach =
EVENTHANDLER_REGISTER(vlan_unconfig,
lio_vlan_rx_kill_vid, lio,
EVENTHANDLER_PRI_FIRST);
/* Update stats periodically */
callout_init(&lio->stats_timer, 0);
lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
lio_add_hw_stats(lio);
}
return (0);
setup_nic_dev_fail:
lio_free_soft_command(octeon_dev, sc);
while (i--) {
lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
lio_destroy_nic_device(octeon_dev, i);
}
return (ENODEV);
}
static int
lio_link_info(struct lio_recv_info *recv_info, void *ptr)
{
struct octeon_device *oct = (struct octeon_device *)ptr;
struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt;
union octeon_link_status *ls;
int gmxport = 0, i;
lio_dev_dbg(oct, "%s Called\n", __func__);
if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
goto nic_info_err;
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
LIO_DROQ_INFO_SIZE);
lio_swap_8B_data((uint64_t *)ls,
(sizeof(union octeon_link_status)) >> 3);
if (oct->props.gmxport == gmxport)
lio_update_link_status(oct->props.ifp, ls);
nic_info_err:
for (i = 0; i < recv_pkt->buffer_count; i++)
lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
lio_free_recv_info(recv_info);
return (0);
}
void
lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
{
bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(iq->txtag, finfo->map);
m_freem(finfo->mb);
}
void
lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
{
struct lio_gather *g;
struct octeon_device *oct;
struct lio *lio;
int iq_no;
g = finfo->g;
iq_no = iq->txpciq.s.q_no;
oct = iq->oct_dev;
lio = if_getsoftc(oct->props.ifp);
mtx_lock(&lio->glist_lock[iq_no]);
STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
mtx_unlock(&lio->glist_lock[iq_no]);
bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(iq->txtag, finfo->map);
m_freem(finfo->mb);
}
static void
lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
{
struct lio_soft_command *sc = (struct lio_soft_command *)buf;
struct lio_if_cfg_resp *resp;
struct lio_if_cfg_context *ctx;
resp = (struct lio_if_cfg_resp *)sc->virtrptr;
ctx = (struct lio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
LIO_CAST64(resp->status), status);
ctx->cond = 1;
snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
resp->cfg_info.lio_firmware_version);
/*
* This barrier is required to be sure that the response has been
* written fully before waking up the handler
*/
wmb();
}
static int
lio_is_mac_changed(uint8_t *new, uint8_t *old)
{
return ((new[0] != old[0]) || (new[1] != old[1]) ||
(new[2] != old[2]) || (new[3] != old[3]) ||
(new[4] != old[4]) || (new[5] != old[5]));
}
void
lio_open(void *arg)
{
struct lio *lio = arg;
if_t ifp = lio->ifp;
struct octeon_device *oct = lio->oct_dev;
uint8_t *mac_new, mac_old[ETHER_HDR_LEN];
int ret = 0;
lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
/* Ready for link status updates */
lio->intf_open = 1;
lio_dev_info(oct, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
lio_send_rx_ctrl_cmd(lio, 1);
mac_new = if_getlladdr(ifp);
memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN);
if (lio_is_mac_changed(mac_new, mac_old)) {
ret = lio_set_mac(ifp, mac_new);
if (ret)
lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
}
/* Now inform the stack we're ready */
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
lio_dev_info(oct, "Interface is opened\n");
}
static int
lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
ret);
}
return (ret);
}
static int
lio_stop_nic_module(struct octeon_device *oct)
{
int i, j;
struct lio *lio;
lio_dev_dbg(oct, "Stopping network interfaces\n");
if (!oct->ifcount) {
lio_dev_err(oct, "Init for Octeon was not completed\n");
return (1);
}
mtx_lock(&oct->cmd_resp_wqlock);
oct->cmd_resp_state = LIO_DRV_OFFLINE;
mtx_unlock(&oct->cmd_resp_wqlock);
for (i = 0; i < oct->ifcount; i++) {
lio = if_getsoftc(oct->props.ifp);
for (j = 0; j < oct->num_oqs; j++)
lio_unregister_droq_ops(oct,
lio->linfo.rxpciq[j].s.q_no);
}
callout_drain(&lio->stats_timer);
for (i = 0; i < oct->ifcount; i++)
lio_destroy_nic_device(oct, i);
lio_dev_dbg(oct, "Network interface stopped\n");
return (0);
}
static void
lio_delete_glists(struct octeon_device *oct, struct lio *lio)
{
struct lio_gather *g;
int i;
if (lio->glist_lock != NULL) {
free((void *)lio->glist_lock, M_DEVBUF);
lio->glist_lock = NULL;
}
if (lio->ghead == NULL)
return;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
do {
g = (struct lio_gather *)
lio_delete_first_node(&lio->ghead[i]);
free(g, M_DEVBUF);
} while (g);
if ((lio->glists_virt_base != NULL) &&
(lio->glists_virt_base[i] != NULL)) {
lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i]);
}
}
free(lio->glists_virt_base, M_DEVBUF);
lio->glists_virt_base = NULL;
free(lio->glists_dma_base, M_DEVBUF);
lio->glists_dma_base = NULL;
free(lio->ghead, M_DEVBUF);
lio->ghead = NULL;
}
static int
lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
struct lio_gather *g;
int i, j;
lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (lio->glist_lock == NULL)
return (1);
lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (lio->ghead == NULL) {
free((void *)lio->glist_lock, M_DEVBUF);
lio->glist_lock = NULL;
return (1);
}
lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
LIO_SG_ENTRY_SIZE);
/*
* allocate memory to store virtual and dma base address of
* per glist consistent memory
*/
lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
M_NOWAIT | M_ZERO);
lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
M_NOWAIT | M_ZERO);
if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
lio_delete_glists(oct, lio);
return (1);
}
for (i = 0; i < num_iqs; i++) {
mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
STAILQ_INIT(&lio->ghead[i]);
lio->glists_virt_base[i] =
lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
(vm_paddr_t *)&lio->glists_dma_base[i]);
if (lio->glists_virt_base[i] == NULL) {
lio_delete_glists(oct, lio);
return (1);
}
for (j = 0; j < lio->tx_qsize; j++) {
g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
if (g == NULL)
break;
g->sg = (struct lio_sg_entry *)(uintptr_t)
((uint64_t)(uintptr_t)lio->glists_virt_base[i] +
(j * lio->glist_entry_size));
g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
(j * lio->glist_entry_size);
STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
}
if (j != lio->tx_qsize) {
lio_delete_glists(oct, lio);
return (1);
}
}
return (0);
}
void
lio_stop(if_t ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
if_link_state_change(ifp, LINK_STATE_DOWN);
lio->intf_open = 0;
lio->linfo.link.s.link_up = 0;
lio->link_changes++;
lio_send_rx_ctrl_cmd(lio, 0);
/* Tell the stack that the interface is no longer active */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
lio_dev_info(oct, "Interface is stopped\n");
}
static void
lio_check_rx_oom_status(struct lio *lio)
{
struct lio_droq *droq;
struct octeon_device *oct = lio->oct_dev;
int desc_refilled;
int q, q_no = 0;
for (q = 0; q < oct->num_oqs; q++) {
q_no = lio->linfo.rxpciq[q].s.q_no;
droq = oct->droq[q_no];
if (droq == NULL)
continue;
if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
mtx_lock(&droq->lock);
desc_refilled = lio_droq_refill(oct, droq);
/*
* Flush the droq descriptor data to memory to be sure
* that when we update the credits the data in memory
* is accurate.
*/
wmb();
lio_write_csr32(oct, droq->pkts_credit_reg,
desc_refilled);
/* make sure mmio write completes */
__compiler_membar();
mtx_unlock(&droq->lock);
}
}
}
static void
lio_poll_check_rx_oom_status(void *arg, int pending __unused)
{
struct lio_tq *rx_status_tq = arg;
struct lio *lio = rx_status_tq->ctxptr;
if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
lio_check_rx_oom_status(lio);
taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
lio_ms_to_ticks(50));
}
static int
lio_setup_rx_oom_poll_fn(if_t ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
struct lio_tq *rx_status_tq;
rx_status_tq = &lio->rx_status_tq;
rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
taskqueue_thread_enqueue,
&rx_status_tq->tq);
if (rx_status_tq->tq == NULL) {
lio_dev_err(oct, "unable to create lio rx oom status tq\n");
return (-1);
}
TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
lio_poll_check_rx_oom_status, (void *)rx_status_tq);
rx_status_tq->ctxptr = lio;
taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
"lio%d_rx_oom_status",
oct->octeon_id);
taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
lio_ms_to_ticks(50));
return (0);
}
static void
lio_cleanup_rx_oom_poll_fn(if_t ifp)
{
struct lio *lio = if_getsoftc(ifp);
if (lio->rx_status_tq.tq != NULL) {
while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
&lio->rx_status_tq.work, NULL))
taskqueue_drain_timeout(lio->rx_status_tq.tq,
&lio->rx_status_tq.work);
taskqueue_free(lio->rx_status_tq.tq);
lio->rx_status_tq.tq = NULL;
}
}
static void
lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
if_t ifp = oct->props.ifp;
struct lio *lio;
if (ifp == NULL) {
lio_dev_err(oct, "%s No ifp ptr for index %d\n",
__func__, ifidx);
return;
}
lio = if_getsoftc(ifp);
lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
lio_dev_dbg(oct, "NIC device cleanup\n");
if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
lio_stop(ifp);
if (lio_wait_for_pending_requests(oct))
lio_dev_err(oct, "There were pending requests\n");
if (lio_wait_for_instr_fetch(oct))
lio_dev_err(oct, "IQ had pending instructions\n");
if (lio_wait_for_oq_pkts(oct))
lio_dev_err(oct, "OQ had pending packets\n");
if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
ether_ifdetach(ifp);
lio_tcp_lro_free(oct, ifp);
lio_cleanup_rx_oom_poll_fn(ifp);
lio_delete_glists(oct, lio);
EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
free(lio, M_DEVBUF);
if_free(ifp);
oct->props.gmxport = -1;
oct->props.ifp = NULL;
}
static void
print_link_info(if_t ifp)
{
struct lio *lio = if_getsoftc(ifp);
if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
struct octeon_link_info *linfo = &lio->linfo;
if (linfo->link.s.link_up) {
lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
linfo->link.s.speed,
(linfo->link.s.duplex) ? "Full" : "Half");
} else {
lio_dev_info(lio->oct_dev, "Link Down\n");
}
}
}
static inline void
lio_update_link_status(if_t ifp, union octeon_link_status *ls)
{
struct lio *lio = if_getsoftc(ifp);
int changed = (lio->linfo.link.link_status64 != ls->link_status64);
lio->linfo.link.link_status64 = ls->link_status64;
if ((lio->intf_open) && (changed)) {
print_link_info(ifp);
lio->link_changes++;
if (lio->linfo.link.s.link_up)
if_link_state_change(ifp, LINK_STATE_UP);
else
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
/*
* \brief Callback for rx ctrl
* @param status status of request
* @param buf pointer to resp structure
*/
static void
lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
{
struct lio_soft_command *sc = (struct lio_soft_command *)buf;
struct lio_rx_ctl_context *ctx;
ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (status)
lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
LIO_CAST64(status));
ctx->cond = 1;
/*
* This barrier is required to be sure that the response has been
* written fully before waking up the handler
*/
wmb();
}
static void
lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct lio_soft_command *sc;
struct lio_rx_ctl_context *ctx;
union octeon_cmd *ncmd;
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
int ctx_size = sizeof(struct lio_rx_ctl_context);
int retval;
if (oct->props.rx_on == start_stop)
return;
sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
if (sc == NULL)
return;
ncmd = (union octeon_cmd *)sc->virtdptr;
ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
ctx->cond = 0;
ctx->octeon_id = lio_get_device_id(oct);
ncmd->cmd64 = 0;
ncmd->s.cmd = LIO_CMD_RX_CTL;
ncmd->s.param1 = start_stop;
lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
0, 0);
sc->callback = lio_rx_ctl_callback;
sc->callback_arg = sc;
sc->wait_time = 5000;
retval = lio_send_soft_command(oct, sc);
if (retval == LIO_IQ_SEND_FAILED) {
lio_dev_err(oct, "Failed to send RX Control message\n");
} else {
/*
* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
lio_sleep_cond(oct, &ctx->cond);
oct->props.rx_on = start_stop;
}
lio_free_soft_command(oct, sc);
}
static void
lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
if (if_getsoftc(ifp) != arg) /* Not our event */
return;
if ((vid == 0) || (vid > 4095)) /* Invalid */
return;
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
}
static void
lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
if (if_getsoftc(ifp) != arg) /* Not our event */
return;
if ((vid == 0) || (vid > 4095)) /* Invalid */
return;
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct,
"Kill VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
}
static int
lio_wait_for_oq_pkts(struct octeon_device *oct)
{
int i, pending_pkts, pkt_cnt = 0, retry = 100;
do {
pending_pkts = 0;
for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
&oct->droq[i]->droq_task);
}
}
pkt_cnt = 0;
lio_sleep_timeout(1);
} while (retry-- && pending_pkts);
return (pkt_cnt);
}
static void
lio_destroy_resources(struct octeon_device *oct)
{
int i, refcount;
switch (atomic_load_acq_int(&oct->status)) {
case LIO_DEV_RUNNING:
case LIO_DEV_CORE_OK:
/* No more instructions will be forwarded. */
atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
oct->app_mode = LIO_DRV_INVALID_APP;
lio_dev_dbg(oct, "Device state is now %s\n",
lio_get_state_string(&oct->status));
lio_sleep_timeout(100);
/* fallthrough */
case LIO_DEV_HOST_OK:
/* fallthrough */
case LIO_DEV_CONSOLE_INIT_DONE:
/* Remove any consoles */
lio_remove_consoles(oct);
/* fallthrough */
case LIO_DEV_IO_QUEUES_DONE:
if (lio_wait_for_pending_requests(oct))
lio_dev_err(oct, "There were pending requests\n");
if (lio_wait_for_instr_fetch(oct))
lio_dev_err(oct, "IQ had pending instructions\n");
/*
* Disable the input and output queues now. No more packets will
* arrive from Octeon, but we should wait for all packet
* processing to finish.
*/
oct->fn_list.disable_io_queues(oct);
if (lio_wait_for_oq_pkts(oct))
lio_dev_err(oct, "OQ had pending packets\n");
/* fallthrough */
case LIO_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
if (oct->msix_on) {
for (i = 0; i < oct->num_msix_irqs - 1; i++) {
if (oct->ioq_vector[i].tag != NULL) {
bus_teardown_intr(oct->device,
oct->ioq_vector[i].msix_res,
oct->ioq_vector[i].tag);
oct->ioq_vector[i].tag = NULL;
}
if (oct->ioq_vector[i].msix_res != NULL) {
bus_release_resource(oct->device,
SYS_RES_IRQ,
oct->ioq_vector[i].vector,
oct->ioq_vector[i].msix_res);
oct->ioq_vector[i].msix_res = NULL;
}
}
/* non-iov vector's argument is oct struct */
if (oct->tag != NULL) {
bus_teardown_intr(oct->device, oct->msix_res,
oct->tag);
oct->tag = NULL;
}
if (oct->msix_res != NULL) {
bus_release_resource(oct->device, SYS_RES_IRQ,
oct->aux_vector,
oct->msix_res);
oct->msix_res = NULL;
}
pci_release_msi(oct->device);
}
/* fallthrough */
case LIO_DEV_IN_RESET:
case LIO_DEV_DROQ_INIT_DONE:
/* Wait for any pending operations */
lio_mdelay(100);
for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
lio_delete_droq(oct, i);
}
/* fallthrough */
case LIO_DEV_RESP_LIST_INIT_DONE:
for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
if (oct->droq[i] != NULL) {
free(oct->droq[i], M_DEVBUF);
oct->droq[i] = NULL;
}
}
lio_delete_response_list(oct);
/* fallthrough */
case LIO_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
lio_delete_instr_queue(oct, i);
}
/* fallthrough */
case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
if (oct->instr_queue[i] != NULL) {
free(oct->instr_queue[i], M_DEVBUF);
oct->instr_queue[i] = NULL;
}
}
lio_free_ioq_vector(oct);
/* fallthrough */
case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
lio_free_sc_buffer_pool(oct);
/* fallthrough */
case LIO_DEV_DISPATCH_INIT_DONE:
lio_delete_dispatch_list(oct);
/* fallthrough */
case LIO_DEV_PCI_MAP_DONE:
refcount = lio_deregister_device(oct);
if (fw_type_is_none())
lio_pci_flr(oct);
if (!refcount)
oct->fn_list.soft_reset(oct);
lio_unmap_pci_barx(oct, 0);
lio_unmap_pci_barx(oct, 1);
/* fallthrough */
case LIO_DEV_PCI_ENABLE_DONE:
/* Disable the device, releasing the PCI INT */
pci_disable_busmaster(oct->device);
/* fallthrough */
case LIO_DEV_BEGIN_STATE:
break;
} /* end switch (oct->status) */
}
diff --git a/sys/dev/mana/mana_en.c b/sys/dev/mana/mana_en.c
index 0e5f86b5e105..961399172688 100644
--- a/sys/dev/mana/mana_en.c
+++ b/sys/dev/mana/mana_en.c
@@ -1,2987 +1,2982 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Microsoft Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/time.h>
#include <sys/eventhandler.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/in_cksum.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#ifdef RSS
#include <net/rss_config.h>
#endif
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include "mana.h"
#include "mana_sysctl.h"
static int mana_up(struct mana_port_context *apc);
static int mana_down(struct mana_port_context *apc);
static void
mana_rss_key_fill(void *k, size_t size)
{
static bool rss_key_generated = false;
static uint8_t rss_key[MANA_HASH_KEY_SIZE];
KASSERT(size <= MANA_HASH_KEY_SIZE,
("Request more buytes than MANA RSS key can hold"));
if (!rss_key_generated) {
arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
rss_key_generated = true;
}
memcpy(k, rss_key, size);
}
static int
mana_ifmedia_change(if_t ifp __unused)
{
return EOPNOTSUPP;
}
static void
mana_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
{
struct mana_port_context *apc = if_getsoftc(ifp);
if (!apc) {
if_printf(ifp, "Port not available\n");
return;
}
MANA_APC_LOCK_LOCK(apc);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (!apc->port_is_up) {
MANA_APC_LOCK_UNLOCK(apc);
mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
return;
}
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
MANA_APC_LOCK_UNLOCK(apc);
}
static uint64_t
mana_get_counter(if_t ifp, ift_counter cnt)
{
struct mana_port_context *apc = if_getsoftc(ifp);
struct mana_port_stats *stats = &apc->port_stats;
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (counter_u64_fetch(stats->rx_packets));
case IFCOUNTER_OPACKETS:
return (counter_u64_fetch(stats->tx_packets));
case IFCOUNTER_IBYTES:
return (counter_u64_fetch(stats->rx_bytes));
case IFCOUNTER_OBYTES:
return (counter_u64_fetch(stats->tx_bytes));
case IFCOUNTER_IQDROPS:
return (counter_u64_fetch(stats->rx_drops));
case IFCOUNTER_OQDROPS:
return (counter_u64_fetch(stats->tx_drops));
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
mana_qflush(if_t ifp)
{
if_qflush(ifp);
}
int
mana_restart(struct mana_port_context *apc)
{
int rc = 0;
MANA_APC_LOCK_LOCK(apc);
if (apc->port_is_up)
mana_down(apc);
rc = mana_up(apc);
MANA_APC_LOCK_UNLOCK(apc);
return (rc);
}
static int
mana_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct mana_port_context *apc = if_getsoftc(ifp);
struct ifrsskey *ifrk;
struct ifrsshash *ifrh;
struct ifreq *ifr;
uint16_t new_mtu;
int rc = 0, mask;
switch (command) {
case SIOCSIFMTU:
ifr = (struct ifreq *)data;
new_mtu = ifr->ifr_mtu;
if (if_getmtu(ifp) == new_mtu)
break;
if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
(new_mtu + 18 < MIN_FRAME_SIZE)) {
if_printf(ifp, "Invalid MTU. new_mtu: %d, "
"max allowed: %d, min allowed: %d\n",
new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
return EINVAL;
}
MANA_APC_LOCK_LOCK(apc);
if (apc->port_is_up)
mana_down(apc);
apc->frame_size = new_mtu + 18;
if_setmtu(ifp, new_mtu);
mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
rc = mana_up(apc);
MANA_APC_LOCK_UNLOCK(apc);
break;
case SIOCSIFFLAGS:
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
MANA_APC_LOCK_LOCK(apc);
if (!apc->port_is_up)
rc = mana_up(apc);
MANA_APC_LOCK_UNLOCK(apc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
MANA_APC_LOCK_LOCK(apc);
if (apc->port_is_up)
mana_down(apc);
MANA_APC_LOCK_UNLOCK(apc);
}
}
break;
case SIOCSIFCAP:
MANA_APC_LOCK_LOCK(apc);
ifr = (struct ifreq *)data;
/*
* Fix up requested capabilities w/ supported capabilities,
* since the supported capabilities could have been changed.
*/
mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^
if_getcapenable(ifp);
if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
if ((IFCAP_TSO4 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO4;
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
mana_warn(NULL,
"Also disabled tso4 due to -txcsum.\n");
}
}
if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if_togglehwassist(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
if ((IFCAP_TSO6 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO6;
if_setcapenablebit(ifp, 0, IFCAP_TSO6);
if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
mana_warn(ifp,
"Also disabled tso6 due to -txcsum6.\n");
}
}
if (mask & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
/* We can't diff IPv6 packets from IPv4 packets on RX path. */
if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
if (mask & IFCAP_TSO4) {
if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
MANA_APC_LOCK_UNLOCK(apc);
if_printf(ifp, "Enable txcsum first.\n");
rc = EAGAIN;
goto out;
}
if_togglecapenable(ifp, IFCAP_TSO4);
if_togglehwassist(ifp, CSUM_IP_TSO);
}
if (mask & IFCAP_TSO6) {
if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
MANA_APC_LOCK_UNLOCK(apc);
if_printf(ifp, "Enable txcsum6 first.\n");
rc = EAGAIN;
goto out;
}
if_togglecapenable(ifp, IFCAP_TSO6);
if_togglehwassist(ifp, CSUM_IP6_TSO);
}
MANA_APC_LOCK_UNLOCK(apc);
out:
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
ifr = (struct ifreq *)data;
rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
break;
case SIOCGIFRSSKEY:
ifrk = (struct ifrsskey *)data;
ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
break;
case SIOCGIFRSSHASH:
ifrh = (struct ifrsshash *)data;
ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
ifrh->ifrh_types =
RSS_TYPE_TCP_IPV4 |
RSS_TYPE_UDP_IPV4 |
RSS_TYPE_TCP_IPV6 |
RSS_TYPE_UDP_IPV6;
break;
default:
rc = ether_ioctl(ifp, command, data);
break;
}
return (rc);
}
static inline void
mana_alloc_counters(counter_u64_t *begin, int size)
{
counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
for (; begin < end; ++begin)
*begin = counter_u64_alloc(M_WAITOK);
}
static inline void
mana_free_counters(counter_u64_t *begin, int size)
{
counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
for (; begin < end; ++begin)
counter_u64_free(*begin);
}
static bool
mana_can_tx(struct gdma_queue *wq)
{
return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
}
static inline int
mana_tx_map_mbuf(struct mana_port_context *apc,
struct mana_send_buf_info *tx_info,
struct mbuf **m_head, struct mana_tx_package *tp,
struct mana_stats *tx_stats)
{
struct gdma_dev *gd = apc->ac->gdma_dev;
bus_dma_segment_t segs[MAX_MBUF_FRAGS];
struct mbuf *m = *m_head;
int err, nsegs, i;
err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
m, segs, &nsegs, BUS_DMA_NOWAIT);
if (err == EFBIG) {
struct mbuf *m_new;
counter_u64_add(tx_stats->collapse, 1);
m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
if (unlikely(m_new == NULL)) {
counter_u64_add(tx_stats->collapse_err, 1);
return ENOBUFS;
} else {
*m_head = m = m_new;
}
mana_warn(NULL,
"Too many segs in orig mbuf, m_collapse called\n");
err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
}
if (!err) {
for (i = 0; i < nsegs; i++) {
tp->wqe_req.sgl[i].address = segs[i].ds_addr;
tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
tp->wqe_req.sgl[i].size = segs[i].ds_len;
}
tp->wqe_req.num_sge = nsegs;
tx_info->mbuf = *m_head;
bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
BUS_DMASYNC_PREWRITE);
}
return err;
}
static inline void
mana_tx_unmap_mbuf(struct mana_port_context *apc,
struct mana_send_buf_info *tx_info)
{
bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
if (tx_info->mbuf) {
m_freem(tx_info->mbuf);
tx_info->mbuf = NULL;
}
}
static inline int
mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
{
bus_dma_segment_t segs[1];
struct mbuf *mbuf;
int nsegs, err;
uint32_t mlen;
if (alloc_mbuf) {
mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
if (unlikely(mbuf == NULL)) {
mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (unlikely(mbuf == NULL)) {
return ENOMEM;
}
mlen = MCLBYTES;
} else {
mlen = rxq->datasize;
}
mbuf->m_pkthdr.len = mbuf->m_len = mlen;
} else {
if (rx_oob->mbuf) {
mbuf = rx_oob->mbuf;
mlen = rx_oob->mbuf->m_pkthdr.len;
} else {
return ENOMEM;
}
}
err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
if (unlikely((err != 0) || (nsegs != 1))) {
mana_warn(NULL, "Failed to map mbuf, error: %d, "
"nsegs: %d\n", err, nsegs);
counter_u64_add(rxq->stats.dma_mapping_err, 1);
goto error;
}
bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
BUS_DMASYNC_PREREAD);
rx_oob->mbuf = mbuf;
rx_oob->num_sge = 1;
rx_oob->sgl[0].address = segs[0].ds_addr;
rx_oob->sgl[0].size = mlen;
rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
return 0;
error:
m_freem(mbuf);
return EFAULT;
}
static inline void
mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
{
bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
if (free_mbuf && rx_oob->mbuf) {
m_freem(rx_oob->mbuf);
rx_oob->mbuf = NULL;
}
}
/* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
#define MANA_L3_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
#define MANA_L4_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
#define MANA_TXQ_FULL (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
static void
mana_xmit(struct mana_txq *txq)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_send_buf_info *tx_info;
if_t ndev = txq->ndev;
struct mbuf *mbuf;
struct mana_port_context *apc = if_getsoftc(ndev);
struct mana_port_stats *port_stats = &apc->port_stats;
struct gdma_dev *gd = apc->ac->gdma_dev;
uint64_t packets, bytes;
uint16_t next_to_use;
struct mana_tx_package pkg = {};
struct mana_stats *tx_stats;
struct gdma_queue *gdma_sq;
struct mana_cq *cq;
int err, len;
bool is_tso;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq->idx].tx_cq;
tx_stats = &txq->stats;
packets = 0;
bytes = 0;
next_to_use = txq->next_to_use;
while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
if (!apc->port_is_up ||
(if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
drbr_putback(ndev, txq->txq_br, mbuf);
break;
}
if (!mana_can_tx(gdma_sq)) {
/* SQ is full. Set the IFF_DRV_OACTIVE flag */
if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
counter_u64_add(tx_stats->stop, 1);
uint64_t stops = counter_u64_fetch(tx_stats->stop);
uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
#define MANA_TXQ_STOP_THRESHOLD 50
if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
stops > wakeups && txq->alt_txq_idx == txq->idx) {
txq->alt_txq_idx =
(txq->idx + (stops / wakeups))
% apc->num_queues;
counter_u64_add(tx_stats->alt_chg, 1);
}
drbr_putback(ndev, txq->txq_br, mbuf);
taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
break;
}
tx_info = &txq->tx_buf_info[next_to_use];
memset(&pkg, 0, sizeof(struct mana_tx_package));
pkg.wqe_req.sgl = pkg.sgl_array;
err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
if (unlikely(err)) {
mana_dbg(NULL,
"Failed to map tx mbuf, err %d\n", err);
counter_u64_add(tx_stats->dma_mapping_err, 1);
/* The mbuf is still there. Free it */
m_freem(mbuf);
/* Advance the drbr queue */
drbr_advance(ndev, txq->txq_br);
continue;
}
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
pkt_fmt = MANA_LONG_PKT_FMT;
} else {
pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
}
pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
if (pkt_fmt == MANA_SHORT_PKT_FMT)
pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
else
pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
pkg.wqe_req.flags = 0;
pkg.wqe_req.client_data_unit = 0;
is_tso = false;
if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
is_tso = true;
if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
else
pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
pkg.tx_oob.s_oob.comp_tcp_csum = 1;
pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
} else if (mbuf->m_pkthdr.csum_flags &
(CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
} else {
pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
}
if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
pkg.tx_oob.s_oob.comp_tcp_csum = 1;
pkg.tx_oob.s_oob.trans_off =
mbuf->m_pkthdr.l3hlen;
} else {
pkg.tx_oob.s_oob.comp_udp_csum = 1;
}
} else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
} else {
if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
}
len = mbuf->m_pkthdr.len;
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
(struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
if (unlikely(err)) {
/* Should not happen */
if_printf(ndev, "Failed to post TX OOB: %d\n", err);
mana_tx_unmap_mbuf(apc, tx_info);
drbr_advance(ndev, txq->txq_br);
continue;
}
next_to_use =
(next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
(void)atomic_inc_return(&txq->pending_sends);
drbr_advance(ndev, txq->txq_br);
mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
packets++;
bytes += len;
if (is_tso) {
txq->tso_pkts++;
txq->tso_bytes += len;
}
}
counter_enter();
counter_u64_add_protected(tx_stats->packets, packets);
counter_u64_add_protected(port_stats->tx_packets, packets);
counter_u64_add_protected(tx_stats->bytes, bytes);
counter_u64_add_protected(port_stats->tx_bytes, bytes);
counter_exit();
txq->next_to_use = next_to_use;
}
static void
mana_xmit_taskfunc(void *arg, int pending)
{
struct mana_txq *txq = (struct mana_txq *)arg;
if_t ndev = txq->ndev;
struct mana_port_context *apc = if_getsoftc(ndev);
while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
(if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
mtx_lock(&txq->txq_mtx);
mana_xmit(txq);
mtx_unlock(&txq->txq_mtx);
}
}
#define PULLUP_HDR(m, len) \
do { \
if (unlikely((m)->m_len < (len))) { \
(m) = m_pullup((m), (len)); \
if ((m) == NULL) \
return (NULL); \
} \
} while (0)
/*
* If this function failed, the mbuf would be freed.
*/
static inline struct mbuf *
mana_tso_fixup(struct mbuf *mbuf)
{
struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
struct tcphdr *th;
uint16_t etype;
int ehlen;
if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
etype = ntohs(eh->evl_proto);
ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
etype = ntohs(eh->evl_encap_proto);
ehlen = ETHER_HDR_LEN;
}
if (etype == ETHERTYPE_IP) {
struct ip *ip;
int iphlen;
PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
ip = mtodo(mbuf, ehlen);
iphlen = ip->ip_hl << 2;
mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
th = mtodo(mbuf, ehlen + iphlen);
ip->ip_len = 0;
ip->ip_sum = 0;
th->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
} else if (etype == ETHERTYPE_IPV6) {
struct ip6_hdr *ip6;
PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
ip6 = mtodo(mbuf, ehlen);
if (ip6->ip6_nxt != IPPROTO_TCP) {
/* Realy something wrong, just return */
mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
m_freem(mbuf);
return NULL;
}
mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
th = mtodo(mbuf, ehlen + sizeof(*ip6));
ip6->ip6_plen = 0;
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
} else {
/* CSUM_TSO is set but not IP protocol. */
mana_warn(NULL, "TSO mbuf not right, freed.\n");
m_freem(mbuf);
return NULL;
}
MANA_L3_PROTO(mbuf) = etype;
return (mbuf);
}
/*
* If this function failed, the mbuf would be freed.
*/
static inline struct mbuf *
mana_mbuf_csum_check(struct mbuf *mbuf)
{
struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
struct mbuf *mbuf_next;
uint16_t etype;
int offset;
int ehlen;
if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
etype = ntohs(eh->evl_proto);
ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
etype = ntohs(eh->evl_encap_proto);
ehlen = ETHER_HDR_LEN;
}
mbuf_next = m_getptr(mbuf, ehlen, &offset);
MANA_L4_PROTO(mbuf) = 0;
if (etype == ETHERTYPE_IP) {
const struct ip *ip;
int iphlen;
ip = (struct ip *)(mtodo(mbuf_next, offset));
iphlen = ip->ip_hl << 2;
mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
MANA_L4_PROTO(mbuf) = ip->ip_p;
} else if (etype == ETHERTYPE_IPV6) {
const struct ip6_hdr *ip6;
ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
} else {
MANA_L4_PROTO(mbuf) = 0;
}
MANA_L3_PROTO(mbuf) = etype;
return (mbuf);
}
static int
mana_start_xmit(if_t ifp, struct mbuf *m)
{
struct mana_port_context *apc = if_getsoftc(ifp);
struct mana_txq *txq;
int is_drbr_empty;
uint16_t txq_id;
int err;
if (unlikely((!apc->port_is_up) ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
return ENODEV;
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
m = mana_tso_fixup(m);
if (unlikely(m == NULL)) {
counter_enter();
counter_u64_add_protected(apc->port_stats.tx_drops, 1);
counter_exit();
return EIO;
}
} else {
m = mana_mbuf_csum_check(m);
if (unlikely(m == NULL)) {
counter_enter();
counter_u64_add_protected(apc->port_stats.tx_drops, 1);
counter_exit();
return EIO;
}
}
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
uint32_t hash = m->m_pkthdr.flowid;
txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
apc->num_queues;
} else {
txq_id = m->m_pkthdr.flowid % apc->num_queues;
}
if (apc->enable_tx_altq)
txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
txq = &apc->tx_qp[txq_id].txq;
is_drbr_empty = drbr_empty(ifp, txq->txq_br);
err = drbr_enqueue(ifp, txq->txq_br, m);
if (unlikely(err)) {
mana_warn(NULL, "txq %u failed to enqueue: %d\n",
txq_id, err);
taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
return err;
}
if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
mana_xmit(txq);
mtx_unlock(&txq->txq_mtx);
} else {
taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
}
return 0;
}
static void
mana_cleanup_port_context(struct mana_port_context *apc)
{
bus_dma_tag_destroy(apc->tx_buf_tag);
bus_dma_tag_destroy(apc->rx_buf_tag);
apc->rx_buf_tag = NULL;
free(apc->rxqs, M_DEVBUF);
apc->rxqs = NULL;
mana_free_counters((counter_u64_t *)&apc->port_stats,
sizeof(struct mana_port_stats));
}
static int
mana_init_port_context(struct mana_port_context *apc)
{
device_t dev = apc->ac->gdma_dev->gdma_context->dev;
uint32_t tso_maxsize;
int err;
tso_maxsize = MANA_TSO_MAX_SZ;
/* Create DMA tag for tx bufs */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
tso_maxsize, /* maxsize */
MAX_MBUF_FRAGS, /* nsegments */
tso_maxsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg*/
&apc->tx_buf_tag);
if (unlikely(err)) {
device_printf(dev, "Feiled to create TX DMA tag\n");
return err;
}
/* Create DMA tag for rx bufs */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
64, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUMPAGESIZE, /* maxsize */
1, /* nsegments */
MJUMPAGESIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg*/
&apc->rx_buf_tag);
if (unlikely(err)) {
device_printf(dev, "Feiled to create RX DMA tag\n");
return err;
}
apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
M_DEVBUF, M_WAITOK | M_ZERO);
if (!apc->rxqs) {
bus_dma_tag_destroy(apc->tx_buf_tag);
bus_dma_tag_destroy(apc->rx_buf_tag);
apc->rx_buf_tag = NULL;
return ENOMEM;
}
return 0;
}
static int
mana_send_request(struct mana_context *ac, void *in_buf,
uint32_t in_len, void *out_buf, uint32_t out_len)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct gdma_resp_hdr *resp = out_buf;
struct gdma_req_hdr *req = in_buf;
device_t dev = gc->dev;
static atomic_t activity_id;
int err;
req->dev_id = gc->mana.dev_id;
req->activity_id = atomic_inc_return(&activity_id);
mana_dbg(NULL, "activity_id = %u\n", activity_id);
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
out_buf);
if (err || resp->status) {
device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
err, resp->status);
return err ? err : EPROTO;
}
if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
req->activity_id != resp->activity_id) {
device_printf(dev,
"Unexpected mana message response: %x,%x,%x,%x\n",
req->dev_id.as_uint32, resp->dev_id.as_uint32,
req->activity_id, resp->activity_id);
return EPROTO;
}
return 0;
}
static int
mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
const enum mana_command_code expected_code,
const uint32_t min_size)
{
if (resp_hdr->response.msg_type != expected_code)
return EPROTO;
if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
return EPROTO;
if (resp_hdr->response.msg_size < min_size)
return EPROTO;
return 0;
}
static int
mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
uint32_t proto_minor_ver, uint32_t proto_micro_ver,
uint16_t *max_num_vports)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_device_cfg_resp resp = {};
struct mana_query_device_cfg_req req = {};
device_t dev = gc->dev;
int err = 0;
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
req.proto_micro_ver = proto_micro_ver;
err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
if (err) {
device_printf(dev, "Failed to query config: %d", err);
return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(resp));
if (err || resp.hdr.status) {
device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
resp.hdr.status);
if (!err)
err = EPROTO;
return err;
}
*max_num_vports = resp.max_num_vports;
mana_dbg(NULL, "mana max_num_vports from device = %d\n",
*max_num_vports);
return 0;
}
static int
mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
{
struct mana_query_vport_cfg_resp resp = {};
struct mana_query_vport_cfg_req req = {};
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
sizeof(req), sizeof(resp));
req.vport_index = vport_index;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err)
return err;
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
sizeof(resp));
if (err)
return err;
if (resp.hdr.status)
return EPROTO;
*max_sq = resp.max_num_sq;
*max_rq = resp.max_num_rq;
*num_indir_entry = resp.num_indirection_ent;
apc->port_handle = resp.vport;
memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
return 0;
}
void
mana_uncfg_vport(struct mana_port_context *apc)
{
apc->vport_use_count--;
if (apc->vport_use_count < 0) {
mana_err(NULL,
"WARNING: vport_use_count less than 0: %u\n",
apc->vport_use_count);
}
}
int
mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
uint32_t doorbell_pg_id)
{
struct mana_config_vport_resp resp = {};
struct mana_config_vport_req req = {};
int err;
/* This function is used to program the Ethernet port in the hardware
* table. It can be called from the Ethernet driver or the RDMA driver.
*
* For Ethernet usage, the hardware supports only one active user on a
* physical port. The driver checks on the port usage before programming
* the hardware when creating the RAW QP (RDMA driver) or exposing the
* device to kernel NET layer (Ethernet driver).
*
* Because the RDMA driver doesn't know in advance which QP type the
* user will create, it exposes the device with all its ports. The user
* may not be able to create RAW QP on a port if this port is already
* in used by the Ethernet driver from the kernel.
*
* This physical port limitation only applies to the RAW QP. For RC QP,
* the hardware doesn't have this limitation. The user can create RC
* QPs on a physical port up to the hardware limits independent of the
* Ethernet usage on the same port.
*/
if (apc->vport_use_count > 0) {
return EBUSY;
}
apc->vport_use_count++;
mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
sizeof(req), sizeof(resp));
req.vport = apc->port_handle;
req.pdid = protection_dom_id;
req.doorbell_pageid = doorbell_pg_id;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
goto out;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
sizeof(resp));
if (err || resp.hdr.status) {
if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
err, resp.hdr.status);
if (!err)
err = EPROTO;
goto out;
}
apc->tx_shortform_allowed = resp.short_form_allowed;
apc->tx_vp_offset = resp.tx_vport_offset;
if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
apc->port_handle, protection_dom_id, doorbell_pg_id);
out:
if (err)
mana_uncfg_vport(apc);
return err;
}
static int
mana_cfg_vport_steering(struct mana_port_context *apc,
enum TRI_STATE rx,
bool update_default_rxobj, bool update_key,
bool update_tab)
{
uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
struct mana_cfg_rx_steer_req *req = NULL;
struct mana_cfg_rx_steer_resp resp = {};
if_t ndev = apc->ndev;
mana_handle_t *req_indir_tab;
uint32_t req_buf_size;
int err;
req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
if (!req)
return ENOMEM;
mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
sizeof(resp));
req->vport = apc->port_handle;
req->num_indir_entries = num_entries;
req->indir_tab_offset = sizeof(*req);
req->rx_enable = rx;
req->rss_enable = apc->rss_state;
req->update_default_rxobj = update_default_rxobj;
req->update_hashkey = update_key;
req->update_indir_tab = update_tab;
req->default_rxobj = apc->default_rxobj;
if (update_key)
memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
if (update_tab) {
req_indir_tab = (mana_handle_t *)(req + 1);
memcpy(req_indir_tab, apc->rxobj_table,
req->num_indir_entries * sizeof(mana_handle_t));
}
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
if (err) {
if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
goto out;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
sizeof(resp));
if (err) {
if_printf(ndev, "vPort RX configuration failed: %d\n", err);
goto out;
}
if (resp.hdr.status) {
if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
resp.hdr.status);
err = EPROTO;
}
if_printf(ndev, "Configured steering vPort %ju entries %u\n",
apc->port_handle, num_entries);
out:
free(req, M_DEVBUF);
return err;
}
int
mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
uint32_t wq_type, struct mana_obj_spec *wq_spec,
struct mana_obj_spec *cq_spec,
mana_handle_t *wq_obj)
{
struct mana_create_wqobj_resp resp = {};
struct mana_create_wqobj_req req = {};
if_t ndev = apc->ndev;
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
sizeof(req), sizeof(resp));
req.vport = vport;
req.wq_type = wq_type;
req.wq_gdma_region = wq_spec->gdma_region;
req.cq_gdma_region = cq_spec->gdma_region;
req.wq_size = wq_spec->queue_size;
req.cq_size = cq_spec->queue_size;
req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
req.cq_parent_qid = cq_spec->attached_eq;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
if_printf(ndev, "Failed to create WQ object: %d\n", err);
goto out;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
sizeof(resp));
if (err || resp.hdr.status) {
if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
resp.hdr.status);
if (!err)
err = EPROTO;
goto out;
}
if (resp.wq_obj == INVALID_MANA_HANDLE) {
if_printf(ndev, "Got an invalid WQ object handle\n");
err = EPROTO;
goto out;
}
*wq_obj = resp.wq_obj;
wq_spec->queue_index = resp.wq_id;
cq_spec->queue_index = resp.cq_id;
return 0;
out:
return err;
}
void
mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
mana_handle_t wq_obj)
{
struct mana_destroy_wqobj_resp resp = {};
struct mana_destroy_wqobj_req req = {};
if_t ndev = apc->ndev;
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
sizeof(req), sizeof(resp));
req.wq_type = wq_type;
req.wq_obj_handle = wq_obj;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
return;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
sizeof(resp));
if (err || resp.hdr.status)
if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
err, resp.hdr.status);
}
static void
mana_destroy_eq(struct mana_context *ac)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct gdma_queue *eq;
int i;
if (!ac->eqs)
return;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
if (!eq)
continue;
mana_gd_destroy_queue(gc, eq);
}
free(ac->eqs, M_DEVBUF);
ac->eqs = NULL;
}
static int
mana_create_eq(struct mana_context *ac)
{
struct gdma_dev *gd = ac->gdma_dev;
struct gdma_context *gc = gd->gdma_context;
struct gdma_queue_spec spec = {};
int err;
int i;
ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
M_DEVBUF, M_WAITOK | M_ZERO);
if (!ac->eqs)
return ENOMEM;
spec.type = GDMA_EQ;
spec.monitor_avl_buf = false;
spec.queue_size = EQ_SIZE;
spec.eq.callback = NULL;
spec.eq.context = ac->eqs;
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
for (i = 0; i < gc->max_num_queues; i++) {
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
if (err)
goto out;
}
return 0;
out:
mana_destroy_eq(ac);
return err;
}
static int
mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
{
struct mana_fence_rq_resp resp = {};
struct mana_fence_rq_req req = {};
int err;
init_completion(&rxq->fence_event);
mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
sizeof(req), sizeof(resp));
req.wq_obj_handle = rxq->rxobj;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
rxq->rxq_idx, err);
return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
if (err || resp.hdr.status) {
if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
rxq->rxq_idx, err, resp.hdr.status);
if (!err)
err = EPROTO;
return err;
}
if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
rxq->rxq_idx);
return ETIMEDOUT;
}
return 0;
}
static void
mana_fence_rqs(struct mana_port_context *apc)
{
unsigned int rxq_idx;
struct mana_rxq *rxq;
int err;
for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
rxq = apc->rxqs[rxq_idx];
err = mana_fence_rq(apc, rxq);
/* In case of any error, use sleep instead. */
if (err)
gdma_msleep(100);
}
}
static int
mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
{
uint32_t used_space_old;
uint32_t used_space_new;
used_space_old = wq->head - wq->tail;
used_space_new = wq->head - (wq->tail + num_units);
if (used_space_new > used_space_old) {
mana_err(NULL,
"WARNING: new used space %u greater than old one %u\n",
used_space_new, used_space_old);
return ERANGE;
}
wq->tail += num_units;
return 0;
}
static void
mana_poll_tx_cq(struct mana_cq *cq)
{
struct gdma_comp *completions = cq->gdma_comp_buf;
struct gdma_posted_wqe_info *wqe_info;
struct mana_send_buf_info *tx_info;
unsigned int pkt_transmitted = 0;
unsigned int wqe_unit_cnt = 0;
struct mana_txq *txq = cq->txq;
struct mana_port_context *apc;
uint16_t next_to_complete;
if_t ndev;
int comp_read;
int txq_idx = txq->idx;;
int i;
int sa_drop = 0;
struct gdma_queue *gdma_wq;
unsigned int avail_space;
bool txq_full = false;
ndev = txq->ndev;
apc = if_getsoftc(ndev);
comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
CQE_POLLING_BUFFER);
if (comp_read < 1)
return;
next_to_complete = txq->next_to_complete;
for (i = 0; i < comp_read; i++) {
struct mana_tx_comp_oob *cqe_oob;
if (!completions[i].is_sq) {
mana_err(NULL, "WARNING: Not for SQ\n");
return;
}
cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
if (cqe_oob->cqe_hdr.client_type !=
MANA_CQE_COMPLETION) {
mana_err(NULL,
"WARNING: Invalid CQE client type %u\n",
cqe_oob->cqe_hdr.client_type);
return;
}
switch (cqe_oob->cqe_hdr.cqe_type) {
case CQE_TX_OKAY:
break;
case CQE_TX_SA_DROP:
case CQE_TX_MTU_DROP:
case CQE_TX_INVALID_OOB:
case CQE_TX_INVALID_ETH_TYPE:
case CQE_TX_HDR_PROCESSING_ERROR:
case CQE_TX_VF_DISABLED:
case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
case CQE_TX_VPORT_DISABLED:
case CQE_TX_VLAN_TAGGING_VIOLATION:
sa_drop ++;
mana_dbg(NULL,
"TX: txq %d CQE error %d, ntc = %d, "
"pending sends = %d: err ignored.\n",
txq_idx, cqe_oob->cqe_hdr.cqe_type,
next_to_complete, txq->pending_sends);
counter_u64_add(txq->stats.cqe_err, 1);
break;
default:
/* If the CQE type is unknown, log a debug msg,
* and still free the mbuf, etc.
*/
mana_dbg(NULL,
"ERROR: TX: Unknown CQE type %d\n",
cqe_oob->cqe_hdr.cqe_type);
counter_u64_add(txq->stats.cqe_unknown_type, 1);
break;
}
if (txq->gdma_txq_id != completions[i].wq_num) {
mana_dbg(NULL,
"txq gdma id not match completion wq num: "
"%d != %d\n",
txq->gdma_txq_id, completions[i].wq_num);
break;
}
tx_info = &txq->tx_buf_info[next_to_complete];
if (!tx_info->mbuf) {
mana_err(NULL,
"WARNING: txq %d Empty mbuf on tx_info: %u, "
"ntu = %u, pending_sends = %d, "
"transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
txq_idx, next_to_complete, txq->next_to_use,
txq->pending_sends, pkt_transmitted, sa_drop,
i, comp_read);
break;
}
wqe_info = &tx_info->wqe_inf;
wqe_unit_cnt += wqe_info->wqe_size_in_bu;
mana_tx_unmap_mbuf(apc, tx_info);
mb();
next_to_complete =
(next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
pkt_transmitted++;
}
txq->next_to_complete = next_to_complete;
if (wqe_unit_cnt == 0) {
mana_err(NULL,
"WARNING: TX ring not proceeding!\n");
return;
}
mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
/* Ensure tail updated before checking q stop */
wmb();
gdma_wq = txq->gdma_sq;
avail_space = mana_gd_wq_avail_space(gdma_wq);
if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
txq_full = true;
}
/* Ensure checking txq_full before apc->port_is_up. */
rmb();
if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
/* Grab the txq lock and re-test */
mtx_lock(&txq->txq_mtx);
avail_space = mana_gd_wq_avail_space(gdma_wq);
if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
/* Clear the Q full flag */
if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
IFF_DRV_OACTIVE);
counter_u64_add(txq->stats.wakeup, 1);
if (txq->alt_txq_idx != txq->idx) {
uint64_t stops = counter_u64_fetch(txq->stats.stop);
uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
/* Reset alt_txq_idx back if it is not overloaded */
if (stops < wakeups) {
txq->alt_txq_idx = txq->idx;
counter_u64_add(txq->stats.alt_reset, 1);
}
}
rmb();
/* Schedule a tx enqueue task */
taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
}
mtx_unlock(&txq->txq_mtx);
}
if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
mana_err(NULL,
"WARNING: TX %d pending_sends error: %d\n",
txq->idx, txq->pending_sends);
cq->work_done = pkt_transmitted;
}
static void
mana_post_pkt_rxq(struct mana_rxq *rxq)
{
struct mana_recv_buf_oob *recv_buf_oob;
uint32_t curr_index;
int err;
curr_index = rxq->buf_index++;
if (rxq->buf_index == rxq->num_rx_buf)
rxq->buf_index = 0;
recv_buf_oob = &rxq->rx_oobs[curr_index];
err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
&recv_buf_oob->wqe_inf);
if (err) {
mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
rxq->rxq_idx, err);
return;
}
if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
}
}
static void
mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
struct mana_rxq *rxq)
{
struct mana_stats *rx_stats = &rxq->stats;
if_t ndev = rxq->ndev;
uint32_t pkt_len = cqe->ppi[0].pkt_len;
uint16_t rxq_idx = rxq->rxq_idx;
struct mana_port_context *apc;
bool do_lro = false;
bool do_if_input;
apc = if_getsoftc(ndev);
rxq->rx_cq.work_done++;
if (!mbuf) {
return;
}
mbuf->m_flags |= M_PKTHDR;
mbuf->m_pkthdr.len = pkt_len;
mbuf->m_len = pkt_len;
mbuf->m_pkthdr.rcvif = ndev;
if ((if_getcapenable(ndev) & IFCAP_RXCSUM ||
if_getcapenable(ndev) & IFCAP_RXCSUM_IPV6) &&
(cqe->rx_iphdr_csum_succeed)) {
mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
mbuf->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
mbuf->m_pkthdr.csum_data = 0xffff;
if (cqe->rx_tcp_csum_succeed)
do_lro = true;
}
}
if (cqe->rx_hashtype != 0) {
mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
uint16_t hashtype = cqe->rx_hashtype;
if (hashtype & NDIS_HASH_IPV4_MASK) {
hashtype &= NDIS_HASH_IPV4_MASK;
switch (hashtype) {
case NDIS_HASH_TCP_IPV4:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
break;
case NDIS_HASH_UDP_IPV4:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
break;
default:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
}
} else if (hashtype & NDIS_HASH_IPV6_MASK) {
hashtype &= NDIS_HASH_IPV6_MASK;
switch (hashtype) {
case NDIS_HASH_TCP_IPV6:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
break;
case NDIS_HASH_TCP_IPV6_EX:
M_HASHTYPE_SET(mbuf,
M_HASHTYPE_RSS_TCP_IPV6_EX);
break;
case NDIS_HASH_UDP_IPV6:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
break;
case NDIS_HASH_UDP_IPV6_EX:
M_HASHTYPE_SET(mbuf,
M_HASHTYPE_RSS_UDP_IPV6_EX);
break;
default:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
}
} else {
M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
}
} else {
mbuf->m_pkthdr.flowid = rxq_idx;
M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
}
do_if_input = true;
if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) {
rxq->lro_tried++;
if (rxq->lro.lro_cnt != 0 &&
tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
do_if_input = false;
else
rxq->lro_failed++;
}
if (do_if_input) {
if_input(ndev, mbuf);
}
counter_enter();
counter_u64_add_protected(rx_stats->packets, 1);
counter_u64_add_protected(apc->port_stats.rx_packets, 1);
counter_u64_add_protected(rx_stats->bytes, pkt_len);
counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
counter_exit();
}
static void
mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct gdma_comp *cqe)
{
struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
struct mana_recv_buf_oob *rxbuf_oob;
if_t ndev = rxq->ndev;
struct mana_port_context *apc;
struct mbuf *old_mbuf;
uint32_t curr, pktlen;
int err;
switch (oob->cqe_hdr.cqe_type) {
case CQE_RX_OKAY:
break;
case CQE_RX_TRUNCATED:
apc = if_getsoftc(ndev);
counter_u64_add(apc->port_stats.rx_drops, 1);
rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
if_printf(ndev, "Dropped a truncated packet\n");
goto drop;
case CQE_RX_COALESCED_4:
if_printf(ndev, "RX coalescing is unsupported\n");
return;
case CQE_RX_OBJECT_FENCE:
complete(&rxq->fence_event);
return;
default:
if_printf(ndev, "Unknown RX CQE type = %d\n",
oob->cqe_hdr.cqe_type);
return;
}
if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
return;
pktlen = oob->ppi[0].pkt_len;
if (pktlen == 0) {
/* data packets should never have packetlength of zero */
if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n",
rxq->gdma_id, cq->gdma_id, rxq->rxobj);
return;
}
curr = rxq->buf_index;
rxbuf_oob = &rxq->rx_oobs[curr];
if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
mana_err(NULL, "WARNING: Rx Incorrect complete "
"WQE size %u\n",
rxbuf_oob->wqe_inf.wqe_size_in_bu);
}
apc = if_getsoftc(ndev);
old_mbuf = rxbuf_oob->mbuf;
/* Unload DMA map for the old mbuf */
mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
/* Load a new mbuf to replace the old one */
err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
if (err) {
mana_dbg(NULL,
"failed to load rx mbuf, err = %d, packet dropped.\n",
err);
counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
/*
* Failed to load new mbuf, rxbuf_oob->mbuf is still
* pointing to the old one. Drop the packet.
*/
old_mbuf = NULL;
/* Reload the existing mbuf */
mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
}
mana_rx_mbuf(old_mbuf, oob, rxq);
drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
mana_post_pkt_rxq(rxq);
}
static void
mana_poll_rx_cq(struct mana_cq *cq)
{
struct gdma_comp *comp = cq->gdma_comp_buf;
int comp_read, i;
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
KASSERT(comp_read <= CQE_POLLING_BUFFER,
("comp_read %d great than buf size %d",
comp_read, CQE_POLLING_BUFFER));
for (i = 0; i < comp_read; i++) {
if (comp[i].is_sq == true) {
mana_err(NULL,
"WARNING: CQE not for receive queue\n");
return;
}
/* verify recv cqe references the right rxq */
if (comp[i].wq_num != cq->rxq->gdma_id) {
mana_err(NULL,
"WARNING: Received CQE %d not for "
"this receive queue %d\n",
comp[i].wq_num, cq->rxq->gdma_id);
return;
}
mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
}
if (comp_read > 0) {
struct gdma_context *gc =
cq->rxq->gdma_rq->gdma_dev->gdma_context;
mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
}
tcp_lro_flush_all(&cq->rxq->lro);
}
static void
mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = context;
uint8_t arm_bit;
KASSERT(cq->gdma_cq == gdma_queue,
("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
if (cq->type == MANA_CQ_TYPE_RX) {
mana_poll_rx_cq(cq);
} else {
mana_poll_tx_cq(cq);
}
if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
arm_bit = SET_ARM_BIT;
else
arm_bit = 0;
mana_gd_ring_cq(gdma_queue, arm_bit);
}
#define MANA_POLL_BUDGET 8
#define MANA_RX_BUDGET 256
#define MANA_TX_BUDGET MAX_SEND_BUFFERS_PER_QUEUE
static void
mana_poll(void *arg, int pending)
{
struct mana_cq *cq = arg;
int i;
cq->work_done = 0;
if (cq->type == MANA_CQ_TYPE_RX) {
cq->budget = MANA_RX_BUDGET;
} else {
cq->budget = MANA_TX_BUDGET;
}
for (i = 0; i < MANA_POLL_BUDGET; i++) {
/*
* If this is the last loop, set the budget big enough
* so it will arm the CQ any way.
*/
if (i == (MANA_POLL_BUDGET - 1))
cq->budget = CQE_POLLING_BUFFER + 1;
mana_cq_handler(cq, cq->gdma_cq);
if (cq->work_done < cq->budget)
break;
cq->work_done = 0;
}
}
static void
mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = arg;
taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
}
static void
mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
{
struct gdma_dev *gd = apc->ac->gdma_dev;
if (!cq->gdma_cq)
return;
/* Drain cleanup taskqueue */
if (cq->cleanup_tq) {
while (taskqueue_cancel(cq->cleanup_tq,
&cq->cleanup_task, NULL)) {
taskqueue_drain(cq->cleanup_tq,
&cq->cleanup_task);
}
taskqueue_free(cq->cleanup_tq);
}
mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
}
static void
mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
{
struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_send_buf_info *txbuf_info;
uint32_t pending_sends;
int i;
if (!txq->gdma_sq)
return;
if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
mana_err(NULL,
"WARNING: txq pending sends not zero: %u\n",
pending_sends);
}
if (txq->next_to_use != txq->next_to_complete) {
mana_err(NULL,
"WARNING: txq buf not completed, "
"next use %u, next complete %u\n",
txq->next_to_use, txq->next_to_complete);
}
/* Flush buf ring. Grab txq mtx lock */
if (txq->txq_br) {
mtx_lock(&txq->txq_mtx);
drbr_flush(apc->ndev, txq->txq_br);
mtx_unlock(&txq->txq_mtx);
buf_ring_free(txq->txq_br, M_DEVBUF);
}
/* Drain taskqueue */
if (txq->enqueue_tq) {
while (taskqueue_cancel(txq->enqueue_tq,
&txq->enqueue_task, NULL)) {
taskqueue_drain(txq->enqueue_tq,
&txq->enqueue_task);
}
taskqueue_free(txq->enqueue_tq);
}
if (txq->tx_buf_info) {
/* Free all mbufs which are still in-flight */
for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
txbuf_info = &txq->tx_buf_info[i];
if (txbuf_info->mbuf) {
mana_tx_unmap_mbuf(apc, txbuf_info);
}
}
free(txq->tx_buf_info, M_DEVBUF);
}
mana_free_counters((counter_u64_t *)&txq->stats,
sizeof(txq->stats));
mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
mtx_destroy(&txq->txq_mtx);
}
static void
mana_destroy_txq(struct mana_port_context *apc)
{
int i;
if (!apc->tx_qp)
return;
for (i = 0; i < apc->num_queues; i++) {
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
mana_deinit_txq(apc, &apc->tx_qp[i].txq);
}
free(apc->tx_qp, M_DEVBUF);
apc->tx_qp = NULL;
}
static int
mana_create_txq(struct mana_port_context *apc, if_t net)
{
struct mana_context *ac = apc->ac;
struct gdma_dev *gd = ac->gdma_dev;
struct mana_obj_spec wq_spec;
struct mana_obj_spec cq_spec;
struct gdma_queue_spec spec;
struct gdma_context *gc;
struct mana_txq *txq;
struct mana_cq *cq;
uint32_t txq_size;
uint32_t cq_size;
int err;
int i;
apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
M_DEVBUF, M_WAITOK | M_ZERO);
if (!apc->tx_qp)
return ENOMEM;
/* The minimum size of the WQE is 32 bytes, hence
* MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
* the SQ can store. This value is then used to size other queues
* to prevent overflow.
*/
txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
("txq size not page aligned"));
cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
gc = gd->gdma_context;
for (i = 0; i < apc->num_queues; i++) {
apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
/* Create SQ */
txq = &apc->tx_qp[i].txq;
txq->ndev = net;
txq->vp_offset = apc->tx_vp_offset;
txq->idx = i;
txq->alt_txq_idx = i;
memset(&spec, 0, sizeof(spec));
spec.type = GDMA_SQ;
spec.monitor_avl_buf = true;
spec.queue_size = txq_size;
err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
if (err)
goto out;
/* Create SQ's CQ */
cq = &apc->tx_qp[i].tx_cq;
cq->type = MANA_CQ_TYPE_TX;
cq->txq = txq;
memset(&spec, 0, sizeof(spec));
spec.type = GDMA_CQ;
spec.monitor_avl_buf = false;
spec.queue_size = cq_size;
spec.cq.callback = mana_schedule_task;
spec.cq.parent_eq = ac->eqs[i].eq;
spec.cq.context = cq;
err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
if (err)
goto out;
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
wq_spec.queue_size = txq->gdma_sq->queue_size;
cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
&wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
if (err)
goto out;
txq->gdma_sq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
txq->gdma_sq->mem_info.dma_region_handle =
GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.dma_region_handle =
GDMA_INVALID_DMA_REGION;
txq->gdma_txq_id = txq->gdma_sq->id;
cq->gdma_id = cq->gdma_cq->id;
mana_dbg(NULL,
"txq %d, txq gdma id %d, txq cq gdma id %d\n",
i, txq->gdma_txq_id, cq->gdma_id);;
if (cq->gdma_id >= gc->max_num_cqs) {
if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
err = EINVAL;
goto out;
}
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
/* Initialize tx specific data */
txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
sizeof(struct mana_send_buf_info),
M_DEVBUF, M_WAITOK | M_ZERO);
if (unlikely(txq->tx_buf_info == NULL)) {
if_printf(net,
"Failed to allocate tx buf info for SQ %u\n",
txq->gdma_sq->id);
err = ENOMEM;
goto out;
}
snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
"mana:tx(%d)", i);
mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
M_DEVBUF, M_WAITOK, &txq->txq_mtx);
if (unlikely(txq->txq_br == NULL)) {
if_printf(net,
"Failed to allocate buf ring for SQ %u\n",
txq->gdma_sq->id);
err = ENOMEM;
goto out;
}
/* Allocate taskqueue for deferred send */
TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
if (unlikely(txq->enqueue_tq == NULL)) {
if_printf(net,
"Unable to create tx %d enqueue task queue\n", i);
err = ENOMEM;
goto out;
}
taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
"mana txq p%u-tx%d", apc->port_idx, i);
mana_alloc_counters((counter_u64_t *)&txq->stats,
sizeof(txq->stats));
/* Allocate and start the cleanup task on CQ */
cq->do_not_ring_db = false;
NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
cq->cleanup_tq =
taskqueue_create_fast("mana tx cq cleanup",
M_WAITOK, taskqueue_thread_enqueue,
&cq->cleanup_tq);
if (apc->last_tx_cq_bind_cpu < 0)
apc->last_tx_cq_bind_cpu = CPU_FIRST();
cq->cpu = apc->last_tx_cq_bind_cpu;
apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
if (apc->bind_cleanup_thread_cpu) {
cpuset_t cpu_mask;
CPU_SETOF(cq->cpu, &cpu_mask);
taskqueue_start_threads_cpuset(&cq->cleanup_tq,
1, PI_NET, &cpu_mask,
"mana cq p%u-tx%u-cpu%d",
apc->port_idx, txq->idx, cq->cpu);
} else {
taskqueue_start_threads(&cq->cleanup_tq, 1,
PI_NET, "mana cq p%u-tx%u",
apc->port_idx, txq->idx);
}
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
}
return 0;
out:
mana_destroy_txq(apc);
return err;
}
static void
mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
bool validate_state)
{
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
struct mana_recv_buf_oob *rx_oob;
int i;
if (!rxq)
return;
if (validate_state) {
/*
* XXX Cancel and drain cleanup task queue here.
*/
;
}
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
mana_deinit_cq(apc, &rxq->rx_cq);
mana_free_counters((counter_u64_t *)&rxq->stats,
sizeof(rxq->stats));
/* Free LRO resources */
tcp_lro_free(&rxq->lro);
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];
if (rx_oob->mbuf)
mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
}
if (rxq->gdma_rq)
mana_gd_destroy_queue(gc, rxq->gdma_rq);
free(rxq, M_DEVBUF);
}
#define MANA_WQE_HEADER_SIZE 16
#define MANA_WQE_SGE_SIZE 16
static int
mana_alloc_rx_wqe(struct mana_port_context *apc,
struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
{
struct mana_recv_buf_oob *rx_oob;
uint32_t buf_idx;
int err;
if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
mana_err(NULL,
"WARNING: Invalid rxq datasize %u\n", rxq->datasize);
}
*rxq_size = 0;
*cq_size = 0;
for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
rx_oob = &rxq->rx_oobs[buf_idx];
memset(rx_oob, 0, sizeof(*rx_oob));
err = bus_dmamap_create(apc->rx_buf_tag, 0,
&rx_oob->dma_map);
if (err) {
mana_err(NULL,
"Failed to create rx DMA map for buf %d\n",
buf_idx);
return err;
}
err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
if (err) {
mana_err(NULL,
"Failed to create rx DMA map for buf %d\n",
buf_idx);
bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
return err;
}
rx_oob->wqe_req.sgl = rx_oob->sgl;
rx_oob->wqe_req.num_sge = rx_oob->num_sge;
rx_oob->wqe_req.inline_oob_size = 0;
rx_oob->wqe_req.inline_oob_data = NULL;
rx_oob->wqe_req.flags = 0;
rx_oob->wqe_req.client_data_unit = 0;
*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
*cq_size += COMP_ENTRY_SIZE;
}
return 0;
}
static int
mana_push_wqe(struct mana_rxq *rxq)
{
struct mana_recv_buf_oob *rx_oob;
uint32_t buf_idx;
int err;
for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
rx_oob = &rxq->rx_oobs[buf_idx];
err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
&rx_oob->wqe_inf);
if (err)
return ENOSPC;
}
return 0;
}
static struct mana_rxq *
mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
struct mana_eq *eq, if_t ndev)
{
struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_obj_spec wq_spec;
struct mana_obj_spec cq_spec;
struct gdma_queue_spec spec;
struct mana_cq *cq = NULL;
uint32_t cq_size, rq_size;
struct gdma_context *gc;
struct mana_rxq *rxq;
int err;
gc = gd->gdma_context;
rxq = malloc(sizeof(*rxq) +
RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
M_DEVBUF, M_WAITOK | M_ZERO);
if (!rxq)
return NULL;
rxq->ndev = ndev;
rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
rxq->rxq_idx = rxq_idx;
/*
* Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
* Now we just allow maximum size of 4096.
*/
rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
if (rxq->datasize > MAX_FRAME_SIZE)
rxq->datasize = MAX_FRAME_SIZE;
mana_dbg(NULL, "Setting rxq %d datasize %d\n",
rxq_idx, rxq->datasize);
rxq->rxobj = INVALID_MANA_HANDLE;
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
if (err)
goto out;
/* Create LRO for the RQ */
if (if_getcapenable(ndev) & IFCAP_LRO) {
err = tcp_lro_init(&rxq->lro);
if (err) {
if_printf(ndev, "Failed to create LRO for rxq %d\n",
rxq_idx);
} else {
rxq->lro.ifp = ndev;
}
}
mana_alloc_counters((counter_u64_t *)&rxq->stats,
sizeof(rxq->stats));
rq_size = ALIGN(rq_size, PAGE_SIZE);
cq_size = ALIGN(cq_size, PAGE_SIZE);
/* Create RQ */
memset(&spec, 0, sizeof(spec));
spec.type = GDMA_RQ;
spec.monitor_avl_buf = true;
spec.queue_size = rq_size;
err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
if (err)
goto out;
/* Create RQ's CQ */
cq = &rxq->rx_cq;
cq->type = MANA_CQ_TYPE_RX;
cq->rxq = rxq;
memset(&spec, 0, sizeof(spec));
spec.type = GDMA_CQ;
spec.monitor_avl_buf = false;
spec.queue_size = cq_size;
spec.cq.callback = mana_schedule_task;
spec.cq.parent_eq = eq->eq;
spec.cq.context = cq;
err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
if (err)
goto out;
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
wq_spec.queue_size = rxq->gdma_rq->queue_size;
cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
&wq_spec, &cq_spec, &rxq->rxobj);
if (err)
goto out;
rxq->gdma_rq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
rxq->gdma_id = rxq->gdma_rq->id;
cq->gdma_id = cq->gdma_cq->id;
err = mana_push_wqe(rxq);
if (err)
goto out;
if (cq->gdma_id >= gc->max_num_cqs) {
err = EINVAL;
goto out;
}
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
/* Allocate and start the cleanup task on CQ */
cq->do_not_ring_db = false;
NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
cq->cleanup_tq =
taskqueue_create_fast("mana rx cq cleanup",
M_WAITOK, taskqueue_thread_enqueue,
&cq->cleanup_tq);
if (apc->last_rx_cq_bind_cpu < 0)
apc->last_rx_cq_bind_cpu = CPU_FIRST();
cq->cpu = apc->last_rx_cq_bind_cpu;
apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
if (apc->bind_cleanup_thread_cpu) {
cpuset_t cpu_mask;
CPU_SETOF(cq->cpu, &cpu_mask);
taskqueue_start_threads_cpuset(&cq->cleanup_tq,
1, PI_NET, &cpu_mask,
"mana cq p%u-rx%u-cpu%d",
apc->port_idx, rxq->rxq_idx, cq->cpu);
} else {
taskqueue_start_threads(&cq->cleanup_tq, 1,
PI_NET, "mana cq p%u-rx%u",
apc->port_idx, rxq->rxq_idx);
}
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
if (!err)
return rxq;
if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
mana_destroy_rxq(apc, rxq, false);
if (cq)
mana_deinit_cq(apc, cq);
return NULL;
}
static int
mana_add_rx_queues(struct mana_port_context *apc, if_t ndev)
{
struct mana_context *ac = apc->ac;
struct mana_rxq *rxq;
int err = 0;
int i;
for (i = 0; i < apc->num_queues; i++) {
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = ENOMEM;
goto out;
}
apc->rxqs[i] = rxq;
}
apc->default_rxobj = apc->rxqs[0]->rxobj;
out:
return err;
}
static void
mana_destroy_vport(struct mana_port_context *apc)
{
struct mana_rxq *rxq;
uint32_t rxq_idx;
for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
rxq = apc->rxqs[rxq_idx];
if (!rxq)
continue;
mana_destroy_rxq(apc, rxq, true);
apc->rxqs[rxq_idx] = NULL;
}
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
}
static int
mana_create_vport(struct mana_port_context *apc, if_t net)
{
struct gdma_dev *gd = apc->ac->gdma_dev;
int err;
apc->default_rxobj = INVALID_MANA_HANDLE;
err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
if (err)
return err;
return mana_create_txq(apc, net);
}
static void mana_rss_table_init(struct mana_port_context *apc)
{
int i;
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
apc->indir_table[i] = i % apc->num_queues;
}
int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
bool update_hash, bool update_tab)
{
uint32_t queue_idx;
int err;
int i;
if (update_tab) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
queue_idx = apc->indir_table[i];
apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
}
}
err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
if (err)
return err;
mana_fence_rqs(apc);
return 0;
}
static int
mana_init_port(if_t ndev)
{
struct mana_port_context *apc = if_getsoftc(ndev);
uint32_t max_txq, max_rxq, max_queues;
int port_idx = apc->port_idx;
uint32_t num_indirect_entries;
int err;
err = mana_init_port_context(apc);
if (err)
return err;
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
&num_indirect_entries);
if (err) {
if_printf(ndev, "Failed to query info for vPort %d\n",
port_idx);
goto reset_apc;
}
max_queues = min_t(uint32_t, max_txq, max_rxq);
if (apc->max_queues > max_queues)
apc->max_queues = max_queues;
if (apc->num_queues > apc->max_queues)
apc->num_queues = apc->max_queues;
return 0;
reset_apc:
bus_dma_tag_destroy(apc->rx_buf_tag);
apc->rx_buf_tag = NULL;
free(apc->rxqs, M_DEVBUF);
apc->rxqs = NULL;
return err;
}
int
mana_alloc_queues(if_t ndev)
{
struct mana_port_context *apc = if_getsoftc(ndev);
int err;
err = mana_create_vport(apc, ndev);
if (err)
return err;
err = mana_add_rx_queues(apc, ndev);
if (err)
goto destroy_vport;
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
if (err)
goto destroy_vport;
return 0;
destroy_vport:
mana_destroy_vport(apc);
return err;
}
static int
mana_up(struct mana_port_context *apc)
{
int err;
mana_dbg(NULL, "mana_up called\n");
err = mana_alloc_queues(apc->ndev);
if (err) {
mana_err(NULL, "Faile alloc mana queues: %d\n", err);
return err;
}
/* Add queue specific sysctl */
mana_sysctl_add_queues(apc);
apc->port_is_up = true;
/* Ensure port state updated before txq state */
wmb();
if_link_state_change(apc->ndev, LINK_STATE_UP);
if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
return 0;
}
static void
mana_init(void *arg)
{
struct mana_port_context *apc = (struct mana_port_context *)arg;
MANA_APC_LOCK_LOCK(apc);
if (!apc->port_is_up) {
mana_up(apc);
}
MANA_APC_LOCK_UNLOCK(apc);
}
static int
mana_dealloc_queues(if_t ndev)
{
struct mana_port_context *apc = if_getsoftc(ndev);
struct mana_txq *txq;
int i, err;
if (apc->port_is_up)
return EINVAL;
/* No packet can be transmitted now since apc->port_is_up is false.
* There is still a tiny chance that mana_poll_tx_cq() can re-enable
* a txq because it may not timely see apc->port_is_up being cleared
* to false, but it doesn't matter since mana_start_xmit() drops any
* new packets due to apc->port_is_up being false.
*
* Drain all the in-flight TX packets
*/
for (i = 0; i < apc->num_queues; i++) {
txq = &apc->tx_qp[i].txq;
struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
tx_cq->do_not_ring_db = true;
rx_cq->do_not_ring_db = true;
/* Schedule a cleanup task */
taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
while (atomic_read(&txq->pending_sends) > 0)
usleep_range(1000, 2000);
}
/* We're 100% sure the queues can no longer be woken up, because
* we're sure now mana_poll_tx_cq() can't be running.
*/
apc->rss_state = TRI_STATE_FALSE;
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
if (err) {
if_printf(ndev, "Failed to disable vPort: %d\n", err);
return err;
}
mana_destroy_vport(apc);
return 0;
}
static int
mana_down(struct mana_port_context *apc)
{
int err = 0;
apc->port_st_save = apc->port_is_up;
apc->port_is_up = false;
/* Ensure port state updated before txq state */
wmb();
if (apc->port_st_save) {
if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
IFF_DRV_RUNNING);
if_link_state_change(apc->ndev, LINK_STATE_DOWN);
mana_sysctl_free_queues(apc);
err = mana_dealloc_queues(apc->ndev);
if (err) {
if_printf(apc->ndev,
"Failed to bring down mana interface: %d\n", err);
}
}
return err;
}
int
mana_detach(if_t ndev)
{
struct mana_port_context *apc = if_getsoftc(ndev);
int err;
ether_ifdetach(ndev);
if (!apc)
return 0;
MANA_APC_LOCK_LOCK(apc);
err = mana_down(apc);
MANA_APC_LOCK_UNLOCK(apc);
mana_cleanup_port_context(apc);
MANA_APC_LOCK_DESTROY(apc);
free(apc, M_DEVBUF);
return err;
}
static int
mana_probe_port(struct mana_context *ac, int port_idx,
if_t *ndev_storage)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_port_context *apc;
uint32_t hwassist;
if_t ndev;
int err;
ndev = if_alloc_dev(IFT_ETHER, gc->dev);
- if (!ndev) {
- mana_err(NULL, "Failed to allocate ifnet struct\n");
- return ENOMEM;
- }
-
*ndev_storage = ndev;
apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
if (!apc) {
mana_err(NULL, "Failed to allocate port context\n");
err = ENOMEM;
goto free_net;
}
apc->ac = ac;
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
apc->num_queues = min_t(unsigned int,
gc->max_num_queues, MANA_MAX_NUM_QUEUES);
apc->port_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
apc->frame_size = DEFAULT_FRAME_SIZE;
apc->last_tx_cq_bind_cpu = -1;
apc->last_rx_cq_bind_cpu = -1;
apc->vport_use_count = 0;
MANA_APC_LOCK_INIT(apc);
if_initname(ndev, device_get_name(gc->dev), port_idx);
if_setdev(ndev,gc->dev);
if_setsoftc(ndev, apc);
if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ndev, mana_init);
if_settransmitfn(ndev, mana_start_xmit);
if_setqflushfn(ndev, mana_qflush);
if_setioctlfn(ndev, mana_ioctl);
if_setgetcounterfn(ndev, mana_get_counter);
if_setmtu(ndev, ETHERMTU);
if_setbaudrate(ndev, IF_Gbps(100));
mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
err = mana_init_port(ndev);
if (err)
goto reset_apc;
if_setcapabilitiesbit(ndev,
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
IFCAP_TSO4 | IFCAP_TSO6 |
IFCAP_LRO | IFCAP_LINKSTATE, 0);
/* Enable all available capabilities by default. */
if_setcapenable(ndev, if_getcapabilities(ndev));
/* TSO parameters */
if_sethwtsomax(ndev, MANA_TSO_MAX_SZ -
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ndev, MAX_MBUF_FRAGS);
if_sethwtsomaxsegsize(ndev, PAGE_SIZE);
hwassist = 0;
if (if_getcapenable(ndev) & (IFCAP_TSO4 | IFCAP_TSO6))
hwassist |= CSUM_TSO;
if (if_getcapenable(ndev) & IFCAP_TXCSUM)
hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
if (if_getcapenable(ndev) & IFCAP_TXCSUM_IPV6)
hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
mana_dbg(NULL, "set hwassist 0x%x\n", hwassist);
if_sethwassist(ndev, hwassist);
ifmedia_init(&apc->media, IFM_IMASK,
mana_ifmedia_change, mana_ifmedia_status);
ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
ether_ifattach(ndev, apc->mac_addr);
/* Initialize statistics */
mana_alloc_counters((counter_u64_t *)&apc->port_stats,
sizeof(struct mana_port_stats));
mana_sysctl_add_port(apc);
/* Tell the stack that the interface is not active */
if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
return 0;
reset_apc:
free(apc, M_DEVBUF);
free_net:
*ndev_storage = NULL;
if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
if_free(ndev);
return err;
}
int mana_probe(struct gdma_dev *gd)
{
struct gdma_context *gc = gd->gdma_context;
device_t dev = gc->dev;
struct mana_context *ac;
int err;
int i;
device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
err = mana_gd_register_device(gd);
if (err)
return err;
ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
if (!ac)
return ENOMEM;
ac->gdma_dev = gd;
ac->num_ports = 1;
gd->driver_data = ac;
err = mana_create_eq(ac);
if (err)
goto out;
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
MANA_MICRO_VERSION, &ac->num_ports);
if (err)
goto out;
if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
ac->num_ports = MAX_PORTS_IN_MANA_DEV;
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
if (err) {
device_printf(dev,
"Failed to probe mana port %d\n", i);
break;
}
}
out:
if (err)
mana_remove(gd);
return err;
}
void
mana_remove(struct gdma_dev *gd)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
device_t dev = gc->dev;
if_t ndev;
int i;
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
if (!ndev) {
if (i == 0)
device_printf(dev, "No net device to remove\n");
goto out;
}
mana_detach(ndev);
if_free(ndev);
}
mana_destroy_eq(ac);
out:
mana_gd_deregister_device(gd);
gd->driver_data = NULL;
gd->gdma_context = NULL;
free(ac, M_DEVBUF);
}
diff --git a/sys/dev/mge/if_mge.c b/sys/dev/mge/if_mge.c
index 2331cb9f8f35..3cd05f104876 100644
--- a/sys/dev/mge/if_mge.c
+++ b/sys/dev/mge/if_mge.c
@@ -1,2161 +1,2155 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
* Copyright (C) 2009-2015 Semihalf
* Copyright (C) 2015 Stormshield
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of MARVELL nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <sys/sockio.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/mdio/mdio.h>
#include <dev/mge/if_mgevar.h>
#include <arm/mv/mvreg.h>
#include <arm/mv/mvvar.h>
#include "miibus_if.h"
#include "mdio_if.h"
#define MGE_DELAY(x) pause("SMI access sleep", (x) / tick_sbt)
static int mge_probe(device_t dev);
static int mge_attach(device_t dev);
static int mge_detach(device_t dev);
static int mge_shutdown(device_t dev);
static int mge_suspend(device_t dev);
static int mge_resume(device_t dev);
static int mge_miibus_readreg(device_t dev, int phy, int reg);
static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
static int mge_mdio_readreg(device_t dev, int phy, int reg);
static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
static int mge_ifmedia_upd(if_t ifp);
static void mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
static void mge_init(void *arg);
static void mge_init_locked(void *arg);
static void mge_start(if_t ifp);
static void mge_start_locked(if_t ifp);
static void mge_watchdog(struct mge_softc *sc);
static int mge_ioctl(if_t ifp, u_long command, caddr_t data);
static uint32_t mge_tfut_ipg(uint32_t val, int ver);
static uint32_t mge_rx_ipg(uint32_t val, int ver);
static void mge_ver_params(struct mge_softc *sc);
static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
static void mge_intr_rxtx(void *arg);
static void mge_intr_rx(void *arg);
static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
uint32_t int_cause_ext);
static int mge_intr_rx_locked(struct mge_softc *sc, int count);
static void mge_intr_tx(void *arg);
static void mge_intr_tx_locked(struct mge_softc *sc);
static void mge_intr_misc(void *arg);
static void mge_intr_sum(void *arg);
static void mge_intr_err(void *arg);
static void mge_stop(struct mge_softc *sc);
static void mge_tick(void *msc);
static uint32_t mge_set_port_serial_control(uint32_t media);
static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
static void mge_set_mac_address(struct mge_softc *sc);
static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
uint8_t queue);
static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
static int mge_allocate_dma(struct mge_softc *sc);
static int mge_alloc_desc_dma(struct mge_softc *sc,
struct mge_desc_wrapper* desc_tab, uint32_t size,
bus_dma_tag_t *buffer_tag);
static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf **mbufp, bus_addr_t *paddr);
static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
int error);
static void mge_free_dma(struct mge_softc *sc);
static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
static void mge_offload_process_frame(if_t ifp, struct mbuf *frame,
uint32_t status, uint16_t bufsize);
static void mge_offload_setup_descriptor(struct mge_softc *sc,
struct mge_desc_wrapper *dw);
static uint8_t mge_crc8(uint8_t *data, int size);
static void mge_setup_multicast(struct mge_softc *sc);
static void mge_set_rxic(struct mge_softc *sc);
static void mge_set_txic(struct mge_softc *sc);
static void mge_add_sysctls(struct mge_softc *sc);
static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
static device_method_t mge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mge_probe),
DEVMETHOD(device_attach, mge_attach),
DEVMETHOD(device_detach, mge_detach),
DEVMETHOD(device_shutdown, mge_shutdown),
DEVMETHOD(device_suspend, mge_suspend),
DEVMETHOD(device_resume, mge_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, mge_miibus_readreg),
DEVMETHOD(miibus_writereg, mge_miibus_writereg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, mge_mdio_readreg),
DEVMETHOD(mdio_writereg, mge_mdio_writereg),
{ 0, 0 }
};
DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
static int switch_attached = 0;
DRIVER_MODULE(mge, simplebus, mge_driver, 0, 0);
DRIVER_MODULE(miibus, mge, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
MODULE_DEPEND(mge, ether, 1, 1, 1);
MODULE_DEPEND(mge, miibus, 1, 1, 1);
MODULE_DEPEND(mge, mdio, 1, 1, 1);
static struct resource_spec res_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0 }
};
static struct {
driver_intr_t *handler;
char * description;
} mge_intrs[MGE_INTR_COUNT + 1] = {
{ mge_intr_rxtx,"GbE aggregated interrupt" },
{ mge_intr_rx, "GbE receive interrupt" },
{ mge_intr_tx, "GbE transmit interrupt" },
{ mge_intr_misc,"GbE misc interrupt" },
{ mge_intr_sum, "GbE summary interrupt" },
{ mge_intr_err, "GbE error interrupt" },
};
/* SMI access interlock */
static struct sx sx_smi;
static uint32_t
mv_read_ge_smi(device_t dev, int phy, int reg)
{
uint32_t timeout;
uint32_t ret;
struct mge_softc *sc;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("NULL softc ptr!"));
timeout = MGE_SMI_WRITE_RETRIES;
MGE_SMI_LOCK();
while (--timeout &&
(MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
MGE_DELAY(MGE_SMI_WRITE_DELAY);
if (timeout == 0) {
device_printf(dev, "SMI write timeout.\n");
ret = ~0U;
goto out;
}
MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
(MGE_SMI_READ | (reg << 21) | (phy << 16)));
/* Wait till finished. */
timeout = MGE_SMI_WRITE_RETRIES;
while (--timeout &&
!((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
MGE_DELAY(MGE_SMI_WRITE_DELAY);
if (timeout == 0) {
device_printf(dev, "SMI write validation timeout.\n");
ret = ~0U;
goto out;
}
/* Wait for the data to update in the SMI register */
MGE_DELAY(MGE_SMI_DELAY);
ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
out:
MGE_SMI_UNLOCK();
return (ret);
}
static void
mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
{
uint32_t timeout;
struct mge_softc *sc;
sc = device_get_softc(dev);
KASSERT(sc != NULL, ("NULL softc ptr!"));
MGE_SMI_LOCK();
timeout = MGE_SMI_READ_RETRIES;
while (--timeout &&
(MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
MGE_DELAY(MGE_SMI_READ_DELAY);
if (timeout == 0) {
device_printf(dev, "SMI read timeout.\n");
goto out;
}
MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
(MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
(value & MGE_SMI_DATA_MASK)));
out:
MGE_SMI_UNLOCK();
}
static int
mv_read_ext_phy(device_t dev, int phy, int reg)
{
uint32_t retries;
struct mge_softc *sc;
uint32_t ret;
sc = device_get_softc(dev);
MGE_SMI_LOCK();
MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
(MGE_SMI_READ | (reg << 21) | (phy << 16)));
retries = MGE_SMI_READ_RETRIES;
while (--retries &&
!(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
DELAY(MGE_SMI_READ_DELAY);
if (retries == 0)
device_printf(dev, "Timeout while reading from PHY\n");
ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
MGE_SMI_UNLOCK();
return (ret);
}
static void
mv_write_ext_phy(device_t dev, int phy, int reg, int value)
{
uint32_t retries;
struct mge_softc *sc;
sc = device_get_softc(dev);
MGE_SMI_LOCK();
MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
(MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
(value & MGE_SMI_DATA_MASK)));
retries = MGE_SMI_WRITE_RETRIES;
while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
DELAY(MGE_SMI_WRITE_DELAY);
if (retries == 0)
device_printf(dev, "Timeout while writing to PHY\n");
MGE_SMI_UNLOCK();
}
static void
mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
{
uint32_t mac_l, mac_h;
uint8_t lmac[6];
int i, valid;
/*
* Retrieve hw address from the device tree.
*/
i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
if (i == 6) {
valid = 0;
for (i = 0; i < 6; i++)
if (lmac[i] != 0) {
valid = 1;
break;
}
if (valid) {
bcopy(lmac, addr, 6);
return;
}
}
/*
* Fall back -- use the currently programmed address.
*/
mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
addr[0] = (mac_h & 0xff000000) >> 24;
addr[1] = (mac_h & 0x00ff0000) >> 16;
addr[2] = (mac_h & 0x0000ff00) >> 8;
addr[3] = (mac_h & 0x000000ff);
addr[4] = (mac_l & 0x0000ff00) >> 8;
addr[5] = (mac_l & 0x000000ff);
}
static uint32_t
mge_tfut_ipg(uint32_t val, int ver)
{
switch (ver) {
case 1:
return ((val & 0x3fff) << 4);
case 2:
default:
return ((val & 0xffff) << 4);
}
}
static uint32_t
mge_rx_ipg(uint32_t val, int ver)
{
switch (ver) {
case 1:
return ((val & 0x3fff) << 8);
case 2:
default:
return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
}
}
static void
mge_ver_params(struct mge_softc *sc)
{
uint32_t d, r;
soc_id(&d, &r);
if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
d == MV_DEV_88F6282 ||
d == MV_DEV_MV78100 ||
d == MV_DEV_MV78100_Z0 ||
(d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
sc->mge_ver = 2;
sc->mge_mtu = 0x4e8;
sc->mge_tfut_ipg_max = 0xFFFF;
sc->mge_rx_ipg_max = 0xFFFF;
sc->mge_tx_arb_cfg = 0xFC0000FF;
sc->mge_tx_tok_cfg = 0xFFFF7FFF;
sc->mge_tx_tok_cnt = 0x3FFFFFFF;
} else {
sc->mge_ver = 1;
sc->mge_mtu = 0x458;
sc->mge_tfut_ipg_max = 0x3FFF;
sc->mge_rx_ipg_max = 0x3FFF;
sc->mge_tx_arb_cfg = 0x000000FF;
sc->mge_tx_tok_cfg = 0x3FFFFFFF;
sc->mge_tx_tok_cnt = 0x3FFFFFFF;
}
if (d == MV_DEV_88RC8180)
sc->mge_intr_cnt = 1;
else
sc->mge_intr_cnt = 2;
if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
sc->mge_hw_csum = 0;
else
sc->mge_hw_csum = 1;
}
static void
mge_set_mac_address(struct mge_softc *sc)
{
char *if_mac;
uint32_t mac_l, mac_h;
MGE_GLOBAL_LOCK_ASSERT(sc);
if_mac = (char *)if_getlladdr(sc->ifp);
mac_l = (if_mac[4] << 8) | (if_mac[5]);
mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
(if_mac[2] << 8) | (if_mac[3] << 0);
MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
}
static void
mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
{
uint32_t reg_idx, reg_off, reg_val, i;
last_byte &= 0xf;
reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
reg_val = (1 | (queue << 1)) << reg_off;
for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
if ( i == reg_idx)
MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
else
MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
}
}
static void
mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
{
uint32_t port_config;
uint32_t reg_val, i;
/* Enable or disable promiscuous mode as needed */
if (if_getflags(sc->ifp) & IFF_PROMISC) {
port_config = MGE_READ(sc, MGE_PORT_CONFIG);
port_config |= PORT_CONFIG_UPM;
MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
(1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
}
for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
} else {
port_config = MGE_READ(sc, MGE_PORT_CONFIG);
port_config &= ~PORT_CONFIG_UPM;
MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
}
mge_set_mac_address(sc);
}
}
static void
mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
u_int32_t *paddr;
KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
paddr = arg;
*paddr = segs->ds_addr;
}
static int
mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
bus_addr_t *paddr)
{
struct mbuf *new_mbuf;
bus_dma_segment_t seg[1];
int error;
int nsegs;
KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (new_mbuf == NULL)
return (ENOBUFS);
new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
if (*mbufp) {
bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(tag, map);
}
error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
BUS_DMA_NOWAIT);
KASSERT(nsegs == 1, ("Too many segments returned!"));
if (nsegs != 1 || error)
panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
(*mbufp) = new_mbuf;
(*paddr) = seg->ds_addr;
return (0);
}
static int
mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
uint32_t size, bus_dma_tag_t *buffer_tag)
{
struct mge_desc_wrapper *dw;
bus_addr_t desc_paddr;
int i, error;
desc_paddr = 0;
for (i = size - 1; i >= 0; i--) {
dw = &(tab[i]);
error = bus_dmamem_alloc(sc->mge_desc_dtag,
(void**)&(dw->mge_desc),
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&(dw->desc_dmap));
if (error) {
if_printf(sc->ifp, "failed to allocate DMA memory\n");
dw->mge_desc = NULL;
return (ENXIO);
}
error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
&(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
if (error) {
if_printf(sc->ifp, "can't load descriptor\n");
bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
dw->desc_dmap);
dw->mge_desc = NULL;
return (ENXIO);
}
/* Chain descriptors */
dw->mge_desc->next_desc = desc_paddr;
desc_paddr = dw->mge_desc_paddr;
}
tab[size - 1].mge_desc->next_desc = desc_paddr;
/* Allocate a busdma tag for mbufs. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
MCLBYTES, 1, /* maxsize, nsegments */
MCLBYTES, 0, /* maxsegsz, flags */
NULL, NULL, /* lockfunc, lockfuncarg */
buffer_tag); /* dmat */
if (error) {
if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
return (ENXIO);
}
/* Create TX busdma maps */
for (i = 0; i < size; i++) {
dw = &(tab[i]);
error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
if (error) {
if_printf(sc->ifp, "failed to create map for mbuf\n");
return (ENXIO);
}
dw->buffer = (struct mbuf*)NULL;
dw->mge_desc->buffer = (bus_addr_t)NULL;
}
return (0);
}
static int
mge_allocate_dma(struct mge_softc *sc)
{
struct mge_desc_wrapper *dw;
int i;
/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
16, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
sizeof(struct mge_desc), 1, /* maxsize, nsegments */
sizeof(struct mge_desc), 0, /* maxsegsz, flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->mge_desc_dtag); /* dmat */
mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
&sc->mge_tx_dtag);
mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
&sc->mge_rx_dtag);
for (i = 0; i < MGE_RX_DESC_NUM; i++) {
dw = &(sc->mge_rx_desc[i]);
mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
&dw->mge_desc->buffer);
}
sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
return (0);
}
static void
mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
{
struct mge_desc_wrapper *dw;
int i;
for (i = 0; i < size; i++) {
/* Free RX mbuf */
dw = &(tab[i]);
if (dw->buffer_dmap) {
if (free_mbufs) {
bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
}
bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
if (free_mbufs)
m_freem(dw->buffer);
}
/* Free RX descriptors */
if (dw->desc_dmap) {
bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
dw->desc_dmap);
}
}
}
static void
mge_free_dma(struct mge_softc *sc)
{
/* Free descriptors and mbufs */
mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
/* Destroy mbuf dma tag */
bus_dma_tag_destroy(sc->mge_tx_dtag);
bus_dma_tag_destroy(sc->mge_rx_dtag);
/* Destroy descriptors tag */
bus_dma_tag_destroy(sc->mge_desc_dtag);
}
static void
mge_reinit_rx(struct mge_softc *sc)
{
struct mge_desc_wrapper *dw;
int i;
MGE_RECEIVE_LOCK_ASSERT(sc);
mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
&sc->mge_rx_dtag);
for (i = 0; i < MGE_RX_DESC_NUM; i++) {
dw = &(sc->mge_rx_desc[i]);
mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
&dw->mge_desc->buffer);
}
sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
sc->rx_desc_curr = 0;
MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
sc->rx_desc_start);
/* Enable RX queue */
MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
}
#ifdef DEVICE_POLLING
static poll_handler_t mge_poll;
static int
mge_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct mge_softc *sc = if_getsoftc(ifp);
uint32_t int_cause, int_cause_ext;
int rx_npkts = 0;
MGE_RECEIVE_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
MGE_RECEIVE_UNLOCK(sc);
return (rx_npkts);
}
if (cmd == POLL_AND_CHECK_STATUS) {
int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
/* Check for resource error */
if (int_cause & MGE_PORT_INT_RXERRQ0)
mge_reinit_rx(sc);
if (int_cause || int_cause_ext) {
MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
}
}
rx_npkts = mge_intr_rx_locked(sc, count);
MGE_RECEIVE_UNLOCK(sc);
MGE_TRANSMIT_LOCK(sc);
mge_intr_tx_locked(sc);
MGE_TRANSMIT_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static int
mge_attach(device_t dev)
{
struct mge_softc *sc;
struct mii_softc *miisc;
if_t ifp;
uint8_t hwaddr[ETHER_ADDR_LEN];
int i, error, phy;
sc = device_get_softc(dev);
sc->dev = dev;
sc->node = ofw_bus_get_node(dev);
phy = 0;
if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
device_get_nameunit(sc->phy_sc->dev));
sc->phy_attached = 1;
} else {
device_printf(dev, "PHY not attached.\n");
sc->phy_attached = 0;
sc->phy_sc = sc;
}
if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
device_printf(dev, "Switch attached.\n");
sc->switch_attached = 1;
/* additional variable available across instances */
switch_attached = 1;
} else {
sc->switch_attached = 0;
}
if (device_get_unit(dev) == 0) {
sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
}
/* Set chip version-dependent parameters */
mge_ver_params(sc);
/* Initialize mutexes */
mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
MTX_DEF);
mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
MTX_DEF);
/* Allocate IO and IRQ resources */
error = bus_alloc_resources(dev, res_spec, sc->res);
if (error) {
device_printf(dev, "could not allocate resources\n");
mge_detach(dev);
return (ENXIO);
}
/* Allocate DMA, buffers, buffer descriptors */
error = mge_allocate_dma(sc);
if (error) {
mge_detach(dev);
return (ENXIO);
}
sc->tx_desc_curr = 0;
sc->rx_desc_curr = 0;
sc->tx_desc_used_idx = 0;
sc->tx_desc_used_count = 0;
/* Configure defaults for interrupts coalescing */
sc->rx_ic_time = 768;
sc->tx_ic_time = 768;
mge_add_sysctls(sc);
/* Allocate network interface */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "if_alloc() failed\n");
- mge_detach(dev);
- return (ENOMEM);
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
if (sc->mge_hw_csum) {
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
/* Advertise that polling is supported */
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
if_setinitfn(ifp, mge_init);
if_setstartfn(ifp, mge_start);
if_setioctlfn(ifp, mge_ioctl);
if_setsendqlen(ifp, MGE_TX_DESC_NUM - 1);
if_setsendqready(ifp);
mge_get_mac_address(sc, hwaddr);
ether_ifattach(ifp, hwaddr);
callout_init(&sc->wd_callout, 1);
/* Attach PHY(s) */
if (sc->phy_attached) {
error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (error) {
device_printf(dev, "MII failed to find PHY\n");
if_free(ifp);
sc->ifp = NULL;
mge_detach(dev);
return (error);
}
sc->mii = device_get_softc(sc->miibus);
/* Tell the MAC where to find the PHY so autoneg works */
miisc = LIST_FIRST(&sc->mii->mii_phys);
MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
} else {
/* no PHY, so use hard-coded values */
ifmedia_init(&sc->mge_ifmedia, 0,
mge_ifmedia_upd,
mge_ifmedia_sts);
ifmedia_add(&sc->mge_ifmedia,
IFM_ETHER | IFM_1000_T | IFM_FDX,
0, NULL);
ifmedia_set(&sc->mge_ifmedia,
IFM_ETHER | IFM_1000_T | IFM_FDX);
}
/* Attach interrupt handlers */
/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
for (i = 1; i <= sc->mge_intr_cnt; ++i) {
error = bus_setup_intr(dev, sc->res[i],
INTR_TYPE_NET | INTR_MPSAFE,
NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
sc, &sc->ih_cookie[i - 1]);
if (error) {
device_printf(dev, "could not setup %s\n",
mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
mge_detach(dev);
return (error);
}
}
if (sc->switch_attached) {
MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
device_add_child(dev, "mdio", -1);
bus_generic_attach(dev);
}
return (0);
}
static int
mge_detach(device_t dev)
{
struct mge_softc *sc;
int error,i;
sc = device_get_softc(dev);
/* Stop controller and free TX queue */
if (sc->ifp)
mge_shutdown(dev);
/* Wait for stopping ticks */
callout_drain(&sc->wd_callout);
/* Stop and release all interrupts */
for (i = 0; i < sc->mge_intr_cnt; ++i) {
if (!sc->ih_cookie[i])
continue;
error = bus_teardown_intr(dev, sc->res[1 + i],
sc->ih_cookie[i]);
if (error)
device_printf(dev, "could not release %s\n",
mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
}
/* Detach network interface */
if (sc->ifp) {
ether_ifdetach(sc->ifp);
if_free(sc->ifp);
}
/* Free DMA resources */
mge_free_dma(sc);
/* Free IO memory handler */
bus_release_resources(dev, res_spec, sc->res);
/* Destroy mutexes */
mtx_destroy(&sc->receive_lock);
mtx_destroy(&sc->transmit_lock);
if (device_get_unit(dev) == 0)
sx_destroy(&sx_smi);
return (0);
}
static void
mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct mge_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
MGE_GLOBAL_LOCK(sc);
if (!sc->phy_attached) {
ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
goto out_unlock;
}
mii = sc->mii;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
out_unlock:
MGE_GLOBAL_UNLOCK(sc);
}
static uint32_t
mge_set_port_serial_control(uint32_t media)
{
uint32_t port_config;
port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
if (IFM_TYPE(media) == IFM_ETHER) {
switch(IFM_SUBTYPE(media)) {
case IFM_AUTO:
break;
case IFM_1000_T:
port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
| PORT_SERIAL_SPEED_AUTONEG);
break;
case IFM_100_TX:
port_config |= (PORT_SERIAL_MII_SPEED_100 |
PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
| PORT_SERIAL_SPEED_AUTONEG);
break;
case IFM_10_T:
port_config |= (PORT_SERIAL_AUTONEG |
PORT_SERIAL_AUTONEG_FC |
PORT_SERIAL_SPEED_AUTONEG);
break;
}
if (media & IFM_FDX)
port_config |= PORT_SERIAL_FULL_DUPLEX;
}
return (port_config);
}
static int
mge_ifmedia_upd(if_t ifp)
{
struct mge_softc *sc = if_getsoftc(ifp);
/*
* Do not do anything for switch here, as updating media between
* MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
* break the link.
*/
if (sc->phy_attached) {
MGE_GLOBAL_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
sc->mge_media_status = sc->mii->mii_media.ifm_media;
mii_mediachg(sc->mii);
/* MGE MAC needs to be reinitialized. */
mge_init_locked(sc);
}
MGE_GLOBAL_UNLOCK(sc);
}
return (0);
}
static void
mge_init(void *arg)
{
struct mge_softc *sc;
sc = arg;
MGE_GLOBAL_LOCK(sc);
mge_init_locked(arg);
MGE_GLOBAL_UNLOCK(sc);
}
static void
mge_init_locked(void *arg)
{
struct mge_softc *sc = arg;
struct mge_desc_wrapper *dw;
volatile uint32_t reg_val;
int i, count;
uint32_t media_status;
MGE_GLOBAL_LOCK_ASSERT(sc);
/* Stop interface */
mge_stop(sc);
/* Disable interrupts */
mge_intrs_ctrl(sc, 0);
/* Set MAC address */
mge_set_mac_address(sc);
/* Setup multicast filters */
mge_setup_multicast(sc);
if (sc->mge_ver == 2) {
MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
}
/* Initialize TX queue configuration registers */
MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
/* Clear TX queue configuration registers for unused queues */
for (i = 1; i < 7; i++) {
MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
}
/* Set default MTU */
MGE_WRITE(sc, sc->mge_mtu, 0);
/* Port configuration */
MGE_WRITE(sc, MGE_PORT_CONFIG,
PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
PORT_CONFIG_ARO_RXQ(0));
MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
/* Configure promisc mode */
mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
media_status = sc->mge_media_status;
if (sc->switch_attached) {
media_status &= ~IFM_TMASK;
media_status |= IFM_1000_T;
}
/* Setup port configuration */
reg_val = mge_set_port_serial_control(media_status);
MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
/* Setup SDMA configuration */
MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
MGE_SDMA_TX_BYTE_SWAP |
MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
sc->rx_desc_start);
/* Reset descriptor indexes */
sc->tx_desc_curr = 0;
sc->rx_desc_curr = 0;
sc->tx_desc_used_idx = 0;
sc->tx_desc_used_count = 0;
/* Enable RX descriptors */
for (i = 0; i < MGE_RX_DESC_NUM; i++) {
dw = &sc->mge_rx_desc[i];
dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
dw->mge_desc->buff_size = MCLBYTES;
bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
/* Enable RX queue */
MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
/* Enable port */
reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
reg_val |= PORT_SERIAL_ENABLE;
MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
count = 0x100000;
for (;;) {
reg_val = MGE_READ(sc, MGE_PORT_STATUS);
if (reg_val & MGE_STATUS_LINKUP)
break;
DELAY(100);
if (--count == 0) {
if_printf(sc->ifp, "Timeout on link-up\n");
break;
}
}
/* Setup interrupts coalescing */
mge_set_rxic(sc);
mge_set_txic(sc);
/* Enable interrupts */
#ifdef DEVICE_POLLING
/*
* * ...only if polling is not turned on. Disable interrupts explicitly
* if polling is enabled.
*/
if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
mge_intrs_ctrl(sc, 0);
else
#endif /* DEVICE_POLLING */
mge_intrs_ctrl(sc, 1);
/* Activate network interface */
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
sc->wd_timer = 0;
/* Schedule watchdog timeout */
if (sc->phy_attached)
callout_reset(&sc->wd_callout, hz, mge_tick, sc);
}
static void
mge_intr_rxtx(void *arg)
{
struct mge_softc *sc;
uint32_t int_cause, int_cause_ext;
sc = arg;
MGE_GLOBAL_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
MGE_GLOBAL_UNLOCK(sc);
return;
}
#endif
/* Get interrupt cause */
int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
/* Check for Transmit interrupt */
if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
MGE_PORT_INT_EXT_TXUR)) {
MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
(MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
mge_intr_tx_locked(sc);
}
MGE_TRANSMIT_UNLOCK(sc);
/* Check for Receive interrupt */
mge_intr_rx_check(sc, int_cause, int_cause_ext);
MGE_RECEIVE_UNLOCK(sc);
}
static void
mge_intr_err(void *arg)
{
struct mge_softc *sc;
if_t ifp;
sc = arg;
ifp = sc->ifp;
if_printf(ifp, "%s\n", __FUNCTION__);
}
static void
mge_intr_misc(void *arg)
{
struct mge_softc *sc;
if_t ifp;
sc = arg;
ifp = sc->ifp;
if_printf(ifp, "%s\n", __FUNCTION__);
}
static void
mge_intr_rx(void *arg) {
struct mge_softc *sc;
uint32_t int_cause, int_cause_ext;
sc = arg;
MGE_RECEIVE_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
MGE_RECEIVE_UNLOCK(sc);
return;
}
#endif
/* Get interrupt cause */
int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
mge_intr_rx_check(sc, int_cause, int_cause_ext);
MGE_RECEIVE_UNLOCK(sc);
}
static void
mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
uint32_t int_cause_ext)
{
/* Check for resource error */
if (int_cause & MGE_PORT_INT_RXERRQ0) {
mge_reinit_rx(sc);
MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
~(int_cause & MGE_PORT_INT_RXERRQ0));
}
int_cause &= MGE_PORT_INT_RXQ0;
int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
if (int_cause || int_cause_ext) {
MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
mge_intr_rx_locked(sc, -1);
}
}
static int
mge_intr_rx_locked(struct mge_softc *sc, int count)
{
if_t ifp = sc->ifp;
uint32_t status;
uint16_t bufsize;
struct mge_desc_wrapper* dw;
struct mbuf *mb;
int rx_npkts = 0;
MGE_RECEIVE_LOCK_ASSERT(sc);
while (count != 0) {
dw = &sc->mge_rx_desc[sc->rx_desc_curr];
bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_POSTREAD);
/* Get status */
status = dw->mge_desc->cmd_status;
bufsize = dw->mge_desc->buff_size;
if ((status & MGE_DMA_OWNED) != 0)
break;
if (dw->mge_desc->byte_count &&
~(status & MGE_ERR_SUMMARY)) {
bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
BUS_DMASYNC_POSTREAD);
mb = m_devget(dw->buffer->m_data,
dw->mge_desc->byte_count - ETHER_CRC_LEN,
0, ifp, NULL);
if (mb == NULL)
/* Give up if no mbufs */
break;
mb->m_len -= 2;
mb->m_pkthdr.len -= 2;
mb->m_data += 2;
mb->m_pkthdr.rcvif = ifp;
mge_offload_process_frame(ifp, mb, status,
bufsize);
MGE_RECEIVE_UNLOCK(sc);
if_input(ifp, mb);
MGE_RECEIVE_LOCK(sc);
rx_npkts++;
}
dw->mge_desc->byte_count = 0;
dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (count > 0)
count -= 1;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
return (rx_npkts);
}
static void
mge_intr_sum(void *arg)
{
struct mge_softc *sc = arg;
if_t ifp;
ifp = sc->ifp;
if_printf(ifp, "%s\n", __FUNCTION__);
}
static void
mge_intr_tx(void *arg)
{
struct mge_softc *sc = arg;
uint32_t int_cause_ext;
MGE_TRANSMIT_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
MGE_TRANSMIT_UNLOCK(sc);
return;
}
#endif
/* Ack the interrupt */
int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
(MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
mge_intr_tx_locked(sc);
MGE_TRANSMIT_UNLOCK(sc);
}
static void
mge_intr_tx_locked(struct mge_softc *sc)
{
if_t ifp = sc->ifp;
struct mge_desc_wrapper *dw;
struct mge_desc *desc;
uint32_t status;
int send = 0;
MGE_TRANSMIT_LOCK_ASSERT(sc);
/* Disable watchdog */
sc->wd_timer = 0;
while (sc->tx_desc_used_count) {
/* Get the descriptor */
dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
desc = dw->mge_desc;
bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_POSTREAD);
/* Get descriptor status */
status = desc->cmd_status;
if (status & MGE_DMA_OWNED)
break;
sc->tx_desc_used_idx =
(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
sc->tx_desc_used_count--;
/* Update collision statistics */
if (status & MGE_ERR_SUMMARY) {
if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
}
bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
m_freem(dw->buffer);
dw->buffer = (struct mbuf*)NULL;
send++;
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
if (send) {
/* Now send anything that was pending */
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
mge_start_locked(ifp);
}
}
static int
mge_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct mge_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int mask, error;
uint32_t flags;
error = 0;
switch (command) {
case SIOCSIFFLAGS:
MGE_GLOBAL_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
flags = if_getflags(ifp) ^ sc->mge_if_flags;
if (flags & IFF_PROMISC)
mge_set_prom_mode(sc,
MGE_RX_DEFAULT_QUEUE);
if (flags & IFF_ALLMULTI)
mge_setup_multicast(sc);
} else
mge_init_locked(sc);
}
else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
mge_stop(sc);
sc->mge_if_flags = if_getflags(ifp);
MGE_GLOBAL_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
MGE_GLOBAL_LOCK(sc);
mge_setup_multicast(sc);
MGE_GLOBAL_UNLOCK(sc);
}
break;
case SIOCSIFCAP:
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if (mask & IFCAP_HWCSUM) {
if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
else
if_sethwassist(ifp, 0);
}
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(mge_poll, ifp);
if (error)
return(error);
MGE_GLOBAL_LOCK(sc);
mge_intrs_ctrl(sc, 0);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
MGE_GLOBAL_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
MGE_GLOBAL_LOCK(sc);
mge_intrs_ctrl(sc, 1);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
MGE_GLOBAL_UNLOCK(sc);
}
}
#endif
break;
case SIOCGIFMEDIA: /* fall through */
case SIOCSIFMEDIA:
/*
* Setting up media type via ioctls is *not* supported for MAC
* which is connected to switch. Use etherswitchcfg.
*/
if (!sc->phy_attached && (command == SIOCSIFMEDIA))
return (0);
else if (!sc->phy_attached) {
error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
command);
break;
}
if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
&& !(ifr->ifr_media & IFM_FDX)) {
device_printf(sc->dev,
"1000baseTX half-duplex unsupported\n");
return 0;
}
error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
static int
mge_miibus_readreg(device_t dev, int phy, int reg)
{
KASSERT(!switch_attached, ("miibus used with switch attached"));
return (mv_read_ext_phy(dev, phy, reg));
}
static int
mge_miibus_writereg(device_t dev, int phy, int reg, int value)
{
KASSERT(!switch_attached, ("miibus used with switch attached"));
mv_write_ext_phy(dev, phy, reg, value);
return (0);
}
static int
mge_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
return (ENXIO);
device_set_desc(dev, "Marvell Gigabit Ethernet controller");
return (BUS_PROBE_DEFAULT);
}
static int
mge_resume(device_t dev)
{
device_printf(dev, "%s\n", __FUNCTION__);
return (0);
}
static int
mge_shutdown(device_t dev)
{
struct mge_softc *sc = device_get_softc(dev);
MGE_GLOBAL_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
ether_poll_deregister(sc->ifp);
#endif
mge_stop(sc);
MGE_GLOBAL_UNLOCK(sc);
return (0);
}
static int
mge_encap(struct mge_softc *sc, struct mbuf *m0)
{
struct mge_desc_wrapper *dw = NULL;
bus_dma_segment_t segs[MGE_TX_DESC_NUM];
bus_dmamap_t mapp;
int error;
int seg, nsegs;
int desc_no;
/* Fetch unused map */
desc_no = sc->tx_desc_curr;
dw = &sc->mge_tx_desc[desc_no];
mapp = dw->buffer_dmap;
/* Create mapping in DMA memory */
error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(m0);
return (error);
}
/* Only one segment is supported. */
if (nsegs != 1) {
bus_dmamap_unload(sc->mge_tx_dtag, mapp);
m_freem(m0);
return (-1);
}
bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
/* Everything is ok, now we can send buffers */
for (seg = 0; seg < nsegs; seg++) {
dw->mge_desc->byte_count = segs[seg].ds_len;
dw->mge_desc->buffer = segs[seg].ds_addr;
dw->buffer = m0;
dw->mge_desc->cmd_status = 0;
if (seg == 0)
mge_offload_setup_descriptor(sc, dw);
dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
MGE_DMA_OWNED;
}
bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
sc->tx_desc_used_count++;
return (0);
}
static void
mge_tick(void *msc)
{
struct mge_softc *sc = msc;
KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
MGE_GLOBAL_LOCK(sc);
/* Check for TX timeout */
mge_watchdog(sc);
mii_tick(sc->mii);
/* Check for media type change */
if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
mge_ifmedia_upd(sc->ifp);
MGE_GLOBAL_UNLOCK(sc);
/* Schedule another timeout one second from now */
callout_reset(&sc->wd_callout, hz, mge_tick, sc);
return;
}
static void
mge_watchdog(struct mge_softc *sc)
{
if_t ifp;
ifp = sc->ifp;
if (sc->wd_timer == 0 || --sc->wd_timer) {
return;
}
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_printf(ifp, "watchdog timeout\n");
mge_stop(sc);
mge_init_locked(sc);
}
static void
mge_start(if_t ifp)
{
struct mge_softc *sc = if_getsoftc(ifp);
MGE_TRANSMIT_LOCK(sc);
mge_start_locked(ifp);
MGE_TRANSMIT_UNLOCK(sc);
}
static void
mge_start_locked(if_t ifp)
{
struct mge_softc *sc;
struct mbuf *m0, *mtmp;
uint32_t reg_val, queued = 0;
sc = if_getsoftc(ifp);
MGE_TRANSMIT_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
for (;;) {
/* Get packet from the queue */
m0 = if_dequeue(ifp);
if (m0 == NULL)
break;
if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
m0->m_flags & M_VLANTAG) {
if (M_WRITABLE(m0) == 0) {
mtmp = m_dup(m0, M_NOWAIT);
m_freem(m0);
if (mtmp == NULL)
continue;
m0 = mtmp;
}
}
/* The driver support only one DMA fragment. */
if (m0->m_next != NULL) {
mtmp = m_defrag(m0, M_NOWAIT);
if (mtmp != NULL)
m0 = mtmp;
}
/* Check for free descriptors */
if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
if_sendq_prepend(ifp, m0);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
if (mge_encap(sc, m0) != 0)
break;
queued++;
BPF_MTAP(ifp, m0);
}
if (queued) {
/* Enable transmitter and watchdog timer */
reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
sc->wd_timer = 5;
}
}
static void
mge_stop(struct mge_softc *sc)
{
if_t ifp;
volatile uint32_t reg_val, status;
struct mge_desc_wrapper *dw;
struct mge_desc *desc;
int count;
ifp = sc->ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
/* Stop tick engine */
callout_stop(&sc->wd_callout);
/* Disable interface */
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->wd_timer = 0;
/* Disable interrupts */
mge_intrs_ctrl(sc, 0);
/* Disable Rx and Tx */
reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
/* Remove pending data from TX queue */
while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
sc->tx_desc_used_count) {
/* Get the descriptor */
dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
desc = dw->mge_desc;
bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_POSTREAD);
/* Get descriptor status */
status = desc->cmd_status;
if (status & MGE_DMA_OWNED)
break;
sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
MGE_TX_DESC_NUM;
sc->tx_desc_used_count--;
bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
m_freem(dw->buffer);
dw->buffer = (struct mbuf*)NULL;
}
/* Wait for end of transmission */
count = 0x100000;
while (count--) {
reg_val = MGE_READ(sc, MGE_PORT_STATUS);
if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
(reg_val & MGE_STATUS_TX_FIFO_EMPTY))
break;
DELAY(100);
}
if (count == 0)
if_printf(ifp,
"%s: timeout while waiting for end of transmission\n",
__FUNCTION__);
reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
reg_val &= ~(PORT_SERIAL_ENABLE);
MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
}
static int
mge_suspend(device_t dev)
{
device_printf(dev, "%s\n", __FUNCTION__);
return (0);
}
static void
mge_offload_process_frame(if_t ifp, struct mbuf *frame,
uint32_t status, uint16_t bufsize)
{
int csum_flags = 0;
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
(MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
(status & MGE_RX_L4_CSUM_OK)) {
csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
frame->m_pkthdr.csum_data = 0xFFFF;
}
frame->m_pkthdr.csum_flags = csum_flags;
}
}
static void
mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
{
struct mbuf *m0 = dw->buffer;
struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
int csum_flags = m0->m_pkthdr.csum_flags;
int cmd_status = 0;
struct ip *ip;
int ehlen, etype;
if (csum_flags != 0) {
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
etype = ntohs(eh->evl_proto);
ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
csum_flags |= MGE_TX_VLAN_TAGGED;
} else {
etype = ntohs(eh->evl_encap_proto);
ehlen = ETHER_HDR_LEN;
}
if (etype != ETHERTYPE_IP) {
if_printf(sc->ifp,
"TCP/IP Offload enabled for unsupported "
"protocol!\n");
return;
}
ip = (struct ip *)(m0->m_data + ehlen);
cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
cmd_status |= MGE_TX_NOT_FRAGMENT;
}
if (csum_flags & CSUM_IP)
cmd_status |= MGE_TX_GEN_IP_CSUM;
if (csum_flags & CSUM_TCP)
cmd_status |= MGE_TX_GEN_L4_CSUM;
if (csum_flags & CSUM_UDP)
cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
dw->mge_desc->cmd_status |= cmd_status;
}
static void
mge_intrs_ctrl(struct mge_softc *sc, int enable)
{
if (enable) {
MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
MGE_PORT_INT_EXT_TXBUF0);
} else {
MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
MGE_WRITE(sc, MGE_INT_MASK, 0x0);
MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
}
}
static uint8_t
mge_crc8(uint8_t *data, int size)
{
uint8_t crc = 0;
static const uint8_t ct[256] = {
0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
};
while(size--)
crc = ct[crc ^ *(data++)];
return(crc);
}
struct mge_hash_maddr_ctx {
uint32_t smt[MGE_MCAST_REG_NUMBER];
uint32_t omt[MGE_MCAST_REG_NUMBER];
};
static u_int
mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
struct mge_hash_maddr_ctx *ctx = arg;
static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
uint8_t *mac;
int i;
mac = LLADDR(sdl);
if (memcmp(mac, special, sizeof(special)) == 0) {
i = mac[5];
ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
} else {
i = mge_crc8(mac, ETHER_ADDR_LEN);
ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
}
return (1);
}
static void
mge_setup_multicast(struct mge_softc *sc)
{
struct mge_hash_maddr_ctx ctx;
if_t ifp = sc->ifp;
static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
int i;
if (if_getflags(ifp) & IFF_ALLMULTI) {
for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
ctx.smt[i] = ctx.omt[i] =
(v << 24) | (v << 16) | (v << 8) | v;
} else {
memset(&ctx, 0, sizeof(ctx));
if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
}
for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
}
}
static void
mge_set_rxic(struct mge_softc *sc)
{
uint32_t reg;
if (sc->rx_ic_time > sc->mge_rx_ipg_max)
sc->rx_ic_time = sc->mge_rx_ipg_max;
reg = MGE_READ(sc, MGE_SDMA_CONFIG);
reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
}
static void
mge_set_txic(struct mge_softc *sc)
{
uint32_t reg;
if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
sc->tx_ic_time = sc->mge_tfut_ipg_max;
reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
}
static int
mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
{
struct mge_softc *sc = (struct mge_softc *)arg1;
uint32_t time;
int error;
time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
error = sysctl_handle_int(oidp, &time, 0, req);
if (error != 0)
return(error);
MGE_GLOBAL_LOCK(sc);
if (arg2 == MGE_IC_RX) {
sc->rx_ic_time = time;
mge_set_rxic(sc);
} else {
sc->tx_ic_time = time;
mge_set_txic(sc);
}
MGE_GLOBAL_UNLOCK(sc);
return(0);
}
static void
mge_add_sysctls(struct mge_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
struct sysctl_oid *tree;
ctx = device_get_sysctl_ctx(sc->dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
children = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
mge_sysctl_ic, "I", "IC RX time threshold");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
mge_sysctl_ic, "I", "IC TX time threshold");
}
static int
mge_mdio_writereg(device_t dev, int phy, int reg, int value)
{
mv_write_ge_smi(dev, phy, reg, value);
return (0);
}
static int
mge_mdio_readreg(device_t dev, int phy, int reg)
{
int ret;
ret = mv_read_ge_smi(dev, phy, reg);
return (ret);
}
diff --git a/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c b/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
index 583de1816d1b..ac4bfd1b1a14 100644
--- a/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
+++ b/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
@@ -1,2943 +1,2938 @@
/*
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/compat.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
#include <linux/list.h>
#include <linux/if_ether.h>
#include <dev/mlx4/driver.h>
#include <dev/mlx4/device.h>
#include <dev/mlx4/cmd.h>
#include <dev/mlx4/cq.h>
#include <sys/eventhandler.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include "en.h"
#include "en_port.h"
DEBUGNET_DEFINE(mlx4_en);
static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv);
static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv);
#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int mlx4_en_low_latency_recv(struct napi_struct *napi)
{
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
if_t dev = cq->dev;
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
int done;
if (!priv->port_up)
return LL_FLUSH_FAILED;
if (!mlx4_en_cq_lock_poll(cq))
return LL_FLUSH_BUSY;
done = mlx4_en_process_rx_cq(dev, cq, 4);
#ifdef LL_EXTENDED_STATS
if (likely(done))
rx_ring->cleaned += done;
else
rx_ring->misses++;
#endif
mlx4_en_cq_unlock_poll(cq);
return done;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_RFS_ACCEL
struct mlx4_en_filter {
struct list_head next;
struct work_struct work;
u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
__be16 dst_port;
int rxq_index;
struct mlx4_en_priv *priv;
u32 flow_id; /* RFS infrastructure id */
int id; /* mlx4_en driver id */
u64 reg_id; /* Flow steering API id */
u8 activated; /* Used to prevent expiry before filter
* is attached
*/
struct hlist_node filter_chain;
};
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
{
switch (ip_proto) {
case IPPROTO_UDP:
return MLX4_NET_TRANS_RULE_ID_UDP;
case IPPROTO_TCP:
return MLX4_NET_TRANS_RULE_ID_TCP;
default:
return MLX4_NET_TRANS_RULE_NUM;
}
};
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter,
work);
struct mlx4_en_priv *priv = filter->priv;
struct mlx4_spec_list spec_tcp_udp = {
.id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
{
.tcp_udp = {
.dst_port = filter->dst_port,
.dst_port_msk = (__force __be16)-1,
.src_port = filter->src_port,
.src_port_msk = (__force __be16)-1,
},
},
};
struct mlx4_spec_list spec_ip = {
.id = MLX4_NET_TRANS_RULE_ID_IPV4,
{
.ipv4 = {
.dst_ip = filter->dst_ip,
.dst_ip_msk = (__force __be32)-1,
.src_ip = filter->src_ip,
.src_ip_msk = (__force __be32)-1,
},
},
};
struct mlx4_spec_list spec_eth = {
.id = MLX4_NET_TRANS_RULE_ID_ETH,
};
struct mlx4_net_trans_rule rule = {
.list = LIST_HEAD_INIT(rule.list),
.queue_mode = MLX4_NET_TRANS_Q_LIFO,
.exclusive = 1,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.port = priv->port,
.priority = MLX4_DOMAIN_RFS,
};
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
filter->ip_proto);
goto ignore;
}
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp_udp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0;
if (filter->reg_id) {
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
if (rc && rc != -ENOENT)
en_err(priv, "Error detaching flow. rc = %d\n", rc);
}
rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc);
ignore:
mlx4_en_filter_rfs_expire(priv);
filter->activated = 1;
}
static inline struct hlist_head *
filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port)
{
unsigned long l;
int bucket_idx;
l = (__force unsigned long)src_port |
((__force unsigned long)dst_port << 2);
l ^= (__force unsigned long)(src_ip ^ dst_ip);
bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
return &priv->filter_hash[bucket_idx];
}
static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
__be32 dst_ip, u8 ip_proto, __be16 src_port,
__be16 dst_port, u32 flow_id)
{
struct mlx4_en_filter *filter = NULL;
filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
if (!filter)
return NULL;
filter->priv = priv;
filter->rxq_index = rxq_index;
INIT_WORK(&filter->work, mlx4_en_filter_work);
filter->src_ip = src_ip;
filter->dst_ip = dst_ip;
filter->ip_proto = ip_proto;
filter->src_port = src_port;
filter->dst_port = dst_port;
filter->flow_id = flow_id;
filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
list_add_tail(&filter->next, &priv->filters);
hlist_add_head(&filter->filter_chain,
filter_hash_bucket(priv, src_ip, dst_ip, src_port,
dst_port));
return filter;
}
static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
{
struct mlx4_en_priv *priv = filter->priv;
int rc;
list_del(&filter->next);
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
if (rc && rc != -ENOENT)
en_err(priv, "Error detaching flow. rc = %d\n", rc);
kfree(filter);
}
static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
u8 ip_proto, __be16 src_port, __be16 dst_port)
{
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
hlist_for_each_entry(filter,
filter_hash_bucket(priv, src_ip, dst_ip,
src_port, dst_port),
filter_chain) {
if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip &&
filter->ip_proto == ip_proto &&
filter->src_port == src_port &&
filter->dst_port == dst_port) {
ret = filter;
break;
}
}
return ret;
}
static int
mlx4_en_filter_rfs(if_t net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(net_dev);
struct mlx4_en_filter *filter;
const struct iphdr *ip;
const __be16 *ports;
u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
__be16 dst_port;
int nhoff = skb_network_offset(skb);
int ret = 0;
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
ip = (const struct iphdr *)(skb->data + nhoff);
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
ip_proto = ip->protocol;
src_ip = ip->saddr;
dst_ip = ip->daddr;
src_port = ports[0];
dst_port = ports[1];
spin_lock_bh(&priv->filters_lock);
filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
src_port, dst_port);
if (filter) {
if (filter->rxq_index == rxq_index)
goto out;
filter->rxq_index = rxq_index;
} else {
filter = mlx4_en_filter_alloc(priv, rxq_index,
src_ip, dst_ip, ip_proto,
src_port, dst_port, flow_id);
if (!filter) {
ret = -ENOMEM;
goto err;
}
}
queue_work(priv->mdev->workqueue, &filter->work);
out:
ret = filter->id;
err:
spin_unlock_bh(&priv->filters_lock);
return ret;
}
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list);
spin_lock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
list_move(&filter->next, &del_list);
hlist_del(&filter->filter_chain);
}
spin_unlock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &del_list, next) {
cancel_work_sync(&filter->work);
mlx4_en_filter_free(filter);
}
}
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
LIST_HEAD(del_list);
int i = 0;
spin_lock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
break;
if (filter->activated &&
!work_pending(&filter->work) &&
rps_may_expire_flow(priv->dev,
filter->rxq_index, filter->flow_id,
filter->id)) {
list_move(&filter->next, &del_list);
hlist_del(&filter->filter_chain);
} else
last_filter = filter;
i++;
}
if (last_filter && (&last_filter->next != priv->filters.next))
list_move(&priv->filters, &last_filter->next);
spin_unlock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &del_list, next)
mlx4_en_filter_free(filter);
}
#endif
static void mlx4_en_vlan_rx_add_vid(void *arg, if_t dev, u16 vid)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
int idx;
if (arg != priv)
return;
en_dbg(HW, priv, "adding VLAN:%d\n", vid);
set_bit(vid, priv->active_vlans);
/* Add VID to port VLAN filter */
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
en_dbg(HW, priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
}
static void mlx4_en_vlan_rx_kill_vid(void *arg, if_t dev, u16 vid)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
if (arg != priv)
return;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
clear_bit(vid, priv->active_vlans);
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
mlx4_unregister_vlan(mdev->dev, priv->port, vid);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
}
static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
int qpn, u64 *reg_id)
{
int err;
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
MLX4_DOMAIN_NIC, reg_id);
if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err;
}
en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id);
return 0;
}
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = *qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec_eth = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
rule.port = priv->port;
rule.qpn = *qpn;
INIT_LIST_HEAD(&rule.list);
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
list_add_tail(&spec_eth.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
break;
}
default:
return -EINVAL;
}
if (err)
en_warn(priv, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
unsigned char *mac, int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
mlx4_flow_detach(dev, reg_id);
break;
}
default:
en_err(priv, "Invalid steering mode.\n");
}
}
static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int index = 0;
int err = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(if_getlladdr(priv->dev));
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
if_getlladdr(priv->dev));
index = mlx4_register_mac(dev, priv->port, mac);
if (index < 0) {
err = index;
en_err(priv, "Failed adding MAC: %pM\n",
if_getlladdr(priv->dev));
return err;
}
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
int base_qpn = mlx4_get_base_qpn(dev, priv->port);
*qpn = base_qpn + index;
return 0;
}
err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
mlx4_unregister_mac(dev, priv->port, mac);
return err;
}
return 0;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
u64 mac = mlx4_mac_to_u64(if_getlladdr(priv->dev));
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
if_getlladdr(priv->dev));
mlx4_unregister_mac(dev, priv->port, mac);
} else {
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
}
}
static void mlx4_en_clear_uclist(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_addr_list *tmp, *uc_to_del;
list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) {
list_del(&uc_to_del->list);
kfree(uc_to_del);
}
}
static u_int mlx4_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct mlx4_en_priv *priv = arg;
struct mlx4_en_addr_list *tmp;
if (sdl->sdl_alen != ETHER_ADDR_LEN) /* XXXGL: can that happen? */
return (0);
tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
if (tmp == NULL) {
en_err(priv, "Failed to allocate address list\n");
return (0);
}
memcpy(tmp->addr, LLADDR(sdl), ETH_ALEN);
list_add_tail(&tmp->list, &priv->uc_list);
return (1);
}
static void mlx4_en_cache_uclist(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
mlx4_en_clear_uclist(dev);
if_foreach_lladdr(dev, mlx4_copy_addr, priv);
}
static void mlx4_en_clear_mclist(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_addr_list *tmp, *mc_to_del;
list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
list_del(&mc_to_del->list);
kfree(mc_to_del);
}
}
static u_int mlx4_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int count)
{
struct mlx4_en_priv *priv = arg;
struct mlx4_en_addr_list *tmp;
if (sdl->sdl_alen != ETHER_ADDR_LEN) /* XXXGL: can that happen? */
return (0);
tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
if (tmp == NULL) {
en_err(priv, "Failed to allocate address list\n");
return (0);
}
memcpy(tmp->addr, LLADDR(sdl), ETH_ALEN);
list_add_tail(&tmp->list, &priv->mc_list);
return (1);
}
static void mlx4_en_cache_mclist(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
mlx4_en_clear_mclist(dev);
if_foreach_llmaddr(dev, mlx4_copy_maddr, priv);
}
static void update_addr_list_flags(struct mlx4_en_priv *priv,
struct list_head *dst,
struct list_head *src)
{
struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc;
bool found;
/* Find all the entries that should be removed from dst,
* These are the entries that are not found in src
*/
list_for_each_entry(dst_tmp, dst, list) {
found = false;
list_for_each_entry(src_tmp, src, list) {
if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
found = true;
break;
}
}
if (!found)
dst_tmp->action = MLX4_ADDR_LIST_REM;
}
/* Add entries that exist in src but not in dst
* mark them as need to add
*/
list_for_each_entry(src_tmp, src, list) {
found = false;
list_for_each_entry(dst_tmp, dst, list) {
if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
dst_tmp->action = MLX4_ADDR_LIST_NONE;
found = true;
break;
}
}
if (!found) {
new_mc = kmalloc(sizeof(struct mlx4_en_addr_list),
GFP_KERNEL);
if (!new_mc) {
en_err(priv, "Failed to allocate current multicast list\n");
return;
}
memcpy(new_mc, src_tmp,
sizeof(struct mlx4_en_addr_list));
new_mc->action = MLX4_ADDR_LIST_ADD;
list_add_tail(&new_mc->list, dst);
}
}
}
static void mlx4_en_set_rx_mode(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
if (!priv->port_up)
return;
queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
}
static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev)
{
int err = 0;
if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed enabling promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_unicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed enabling unicast promiscuous mode\n");
/* Add the default qp number as multicast
* promisc
*/
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
err = mlx4_multicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed enabling multicast promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
priv->base_qpn,
1);
if (err)
en_err(priv, "Failed enabling promiscuous mode\n");
break;
}
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
}
}
static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev)
{
int err = 0;
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed disabling promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_unicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed disabling unicast promiscuous mode\n");
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
err = mlx4_multicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
priv->base_qpn, 0);
if (err)
en_err(priv, "Failed disabling promiscuous mode\n");
break;
}
}
static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if_t dev,
struct mlx4_en_dev *mdev)
{
struct mlx4_en_addr_list *addr_list, *tmp;
u8 mc_list[16] = {0};
int err = 0;
u64 mcast_addr = 0;
/*
* Enable/disable the multicast filter according to
* IFF_ALLMULTI and IFF_PROMISC:
*/
if (if_getflags(dev) & (IFF_ALLMULTI | IFF_PROMISC)) {
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
/* Add the default qp number as multicast promisc */
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_multicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
break;
case MLX4_STEERING_MODE_A0:
break;
}
if (err)
en_err(priv, "Failed entering multicast promisc mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
} else {
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_multicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
break;
case MLX4_STEERING_MODE_A0:
break;
}
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
* change while HW is updated holding the command semaphor */
mlx4_en_cache_mclist(dev);
list_for_each_entry(addr_list, &priv->mc_list, list) {
mcast_addr = mlx4_mac_to_u64(addr_list->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list);
list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
if (addr_list->action == MLX4_ADDR_LIST_REM) {
/* detach this address and delete from list */
memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
mc_list[5] = priv->port;
err = mlx4_multicast_detach(mdev->dev,
&priv->rss_map.indir_qp,
mc_list,
MLX4_PROT_ETH,
addr_list->reg_id);
if (err)
en_err(priv, "Fail to detach multicast address\n");
if (addr_list->tunnel_reg_id) {
err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id);
if (err)
en_err(priv, "Failed to detach multicast address\n");
}
/* remove from list */
list_del(&addr_list->list);
kfree(addr_list);
} else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
/* attach the address */
memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
/* needed for B0 steering support */
mc_list[5] = priv->port;
err = mlx4_multicast_attach(mdev->dev,
&priv->rss_map.indir_qp,
mc_list,
priv->port, 0,
MLX4_PROT_ETH,
&addr_list->reg_id);
if (err)
en_err(priv, "Fail to attach multicast address\n");
err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
&addr_list->tunnel_reg_id);
if (err)
en_err(priv, "Failed to attach multicast address\n");
}
}
}
}
static void mlx4_en_do_unicast(struct mlx4_en_priv *priv,
if_t dev,
struct mlx4_en_dev *mdev)
{
struct mlx4_en_addr_list *addr_list, *tmp;
int err;
/* Update unicast list */
mlx4_en_cache_uclist(dev);
update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list);
list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
if (addr_list->action == MLX4_ADDR_LIST_REM) {
mlx4_en_uc_steer_release(priv, addr_list->addr,
priv->rss_map.indir_qp.qpn,
addr_list->reg_id);
/* remove from list */
list_del(&addr_list->list);
kfree(addr_list);
} else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
err = mlx4_en_uc_steer_add(priv, addr_list->addr,
&priv->rss_map.indir_qp.qpn,
&addr_list->reg_id);
if (err)
en_err(priv, "Fail to add unicast address\n");
}
}
}
static void mlx4_en_do_set_rx_mode(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
rx_mode_task);
struct mlx4_en_dev *mdev = priv->mdev;
if_t dev = priv->dev;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
goto out;
}
if (!priv->port_up) {
en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
goto out;
}
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
/* update netif baudrate */
if_setbaudrate(priv->dev,
IF_Mbps(priv->port_state.link_speed));
/* Important note: the following call for if_link_state_change
* is needed for interface up scenario (start port, link state
* change) */
if_link_state_change(priv->dev, LINK_STATE_UP);
en_dbg(HW, priv, "Link Up\n");
}
}
/* Set unicast rules */
mlx4_en_do_unicast(priv, dev, mdev);
/* Promsicuous mode: disable all filters */
if ((if_getflags(dev) & IFF_PROMISC) ||
(priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
mlx4_en_set_promisc_mode(priv, mdev);
} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
/* Not in promiscuous mode */
mlx4_en_clear_promisc_mode(priv, mdev);
}
/* Set multicast rules */
mlx4_en_do_multicast(priv, dev, mdev);
out:
mutex_unlock(&mdev->state_lock);
}
static void mlx4_en_watchdog_timeout(void *arg)
{
struct mlx4_en_priv *priv = arg;
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
if (priv->port_up)
callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
mlx4_en_watchdog_timeout, priv);
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
{
struct mlx4_en_cq *cq;
int i;
/* If we haven't received a specific coalescing setting
* (module param), we set the moderation parameters as follows:
* - moder_cnt is set to the number of mtu sized packets to
* satisfy our coalescing target.
* - moder_time is set to a fixed value.
*/
priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
en_dbg(INTR, priv, "Default coalesing params for mtu: %u - "
"rx_frames:%d rx_usecs:%d\n",
(unsigned)if_getmtu(priv->dev), priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
priv->last_moder_packets[i] = 0;
priv->last_moder_bytes[i] = 0;
}
for (i = 0; i < priv->tx_ring_num; i++) {
cq = priv->tx_cq[i];
cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs;
}
/* Reset auto-moderation params */
priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
priv->adaptive_rx_coal = 1;
priv->last_moder_jiffies = 0;
priv->last_moder_tx_packets = 0;
}
static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
{
unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
struct mlx4_en_cq *cq;
unsigned long packets;
unsigned long rate;
unsigned long avg_pkt_size;
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long rx_pkt_diff;
int moder_time;
int ring, err;
if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
return;
for (ring = 0; ring < priv->rx_ring_num; ring++) {
spin_lock(&priv->stats_lock);
rx_packets = priv->rx_ring[ring]->packets;
rx_bytes = priv->rx_ring[ring]->bytes;
spin_unlock(&priv->stats_lock);
rx_pkt_diff = ((unsigned long) (rx_packets -
priv->last_moder_packets[ring]));
packets = rx_pkt_diff;
rate = packets * HZ / period;
avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
priv->last_moder_bytes[ring])) / packets : 0;
/* Apply auto-moderation only when packet rate
* exceeds a rate that it matters */
if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
if (rate < priv->pkt_rate_low)
moder_time = priv->rx_usecs_low;
else if (rate > priv->pkt_rate_high)
moder_time = priv->rx_usecs_high;
else
moder_time = (rate - priv->pkt_rate_low) *
(priv->rx_usecs_high - priv->rx_usecs_low) /
(priv->pkt_rate_high - priv->pkt_rate_low) +
priv->rx_usecs_low;
} else {
moder_time = priv->rx_usecs_low;
}
if (moder_time != priv->last_moder_time[ring]) {
priv->last_moder_time[ring] = moder_time;
cq = priv->rx_cq[ring];
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
en_err(priv, "Failed modifying moderation for cq:%d\n",
ring);
}
priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes;
}
priv->last_moder_jiffies = jiffies;
}
static void mlx4_en_do_get_stats(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
stats_task);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
if (priv->port_up) {
if (mlx4_is_slave(mdev->dev))
err = mlx4_en_get_vport_stats(mdev, priv->port);
else
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
en_dbg(HW, priv, "Could not update stats\n");
mlx4_en_auto_moderation(priv);
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
}
mutex_unlock(&mdev->state_lock);
}
/* mlx4_en_service_task - Run service task for tasks that needed to be done
* periodically
*/
static void mlx4_en_service_task(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
service_task);
struct mlx4_en_dev *mdev = priv->mdev;
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
}
mutex_unlock(&mdev->state_lock);
}
static void mlx4_en_linkstate(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
linkstate_task);
struct mlx4_en_dev *mdev = priv->mdev;
int linkstate = priv->link_state;
mutex_lock(&mdev->state_lock);
/* If observable port state changed set carrier state and
* report to system log */
if (priv->last_link_state != linkstate) {
if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
en_info(priv, "Link Down\n");
if_link_state_change(priv->dev, LINK_STATE_DOWN);
/* update netif baudrate */
if_setbaudrate(priv->dev, 0);
/* make sure the port is up before notifying the OS.
* This is tricky since we get here on INIT_PORT and
* in such case we can't tell the OS the port is up.
* To solve this there is a call to if_link_state_change
* in set_rx_mode.
* */
} else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
en_info(priv, "Query port failed\n");
if_setbaudrate(priv->dev,
IF_Mbps(priv->port_state.link_speed));
en_info(priv, "Link Up\n");
if_link_state_change(priv->dev, LINK_STATE_UP);
}
}
priv->last_link_state = linkstate;
mutex_unlock(&mdev->state_lock);
}
int mlx4_en_start_port(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
struct mlx4_en_tx_ring *tx_ring;
int rx_index = 0;
int tx_index = 0;
int err = 0;
int i;
int j;
u8 mc_list[16] = {0};
if (priv->port_up) {
en_dbg(DRV, priv, "start port called while port already up\n");
return 0;
}
INIT_LIST_HEAD(&priv->mc_list);
INIT_LIST_HEAD(&priv->uc_list);
INIT_LIST_HEAD(&priv->curr_mc_list);
INIT_LIST_HEAD(&priv->curr_uc_list);
INIT_LIST_HEAD(&priv->ethtool_list);
/* Calculate Rx buf size */
if_setmtu(dev, min(if_getmtu(dev), priv->max_mtu));
mlx4_en_calc_rx_buf(dev);
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size);
/* Configure rx cq's and rings */
err = mlx4_en_activate_rx_rings(priv);
if (err) {
en_err(priv, "Failed to activate RX rings\n");
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
mlx4_en_cq_init_lock(cq);
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
goto cq_err;
}
for (j = 0; j < cq->size; j++)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters");
mlx4_en_deactivate_cq(priv, cq);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
priv->rx_ring[i]->cqn = cq->mcq.cqn;
++rx_index;
}
/* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
err = mlx4_en_get_qp(priv);
if (err) {
en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
priv->counter_index =
mlx4_get_default_counter_index(mdev->dev, priv->port);
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
goto mac_err;
}
err = mlx4_en_create_drop_qp(priv);
if (err)
goto rss_err;
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
cq = priv->tx_cq[i];
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Tx CQ\n");
goto tx_err;
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
tx_ring = priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
i / priv->num_tx_rings_p_up);
if (err) {
en_err(priv, "Failed activating Tx ring %d\n", i);
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
/* Arm CQ for TX completions */
mlx4_en_arm_cq(priv, cq);
/* Set initial ownership of all Tx TXBBs to SW (1) */
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
*((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT;
++tx_index;
}
/* Configure port */
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_mb_size,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
priv->port, err);
goto tx_err;
}
/* Set default qp number */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
if (err) {
en_err(priv, "Failed setting default qp numbers\n");
goto tx_err;
}
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
en_err(priv, "Failed Initializing port\n");
goto tx_err;
}
/* Attach rx QP to bradcast address */
memset(&mc_list[10], 0xff, ETH_ALEN);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
priv->port, 0, MLX4_PROT_ETH,
&priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->rx_mode_task);
priv->port_up = true;
/* Enable the queues. */
if_setdrvflagbits(dev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
#ifdef CONFIG_DEBUG_FS
mlx4_en_create_debug_files(priv);
#endif
callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT,
mlx4_en_watchdog_timeout, priv);
return 0;
tx_err:
while (tx_index--) {
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
return err; /* need to close devices */
}
void mlx4_en_stop_port(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_addr_list *addr_list, *tmp;
int i;
u8 mc_list[16] = {0};
if (!priv->port_up) {
en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
#ifdef CONFIG_DEBUG_FS
mlx4_en_delete_debug_files(priv);
#endif
/* close port*/
mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Set port as not active */
priv->port_up = false;
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
MLX4_EN_FLAG_MC_PROMISC);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_ALL_DEFAULT);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_MC_DEFAULT);
} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
}
/* Detach All unicasts */
list_for_each_entry(addr_list, &priv->curr_uc_list, list) {
mlx4_en_uc_steer_release(priv, addr_list->addr,
priv->rss_map.indir_qp.qpn,
addr_list->reg_id);
}
mlx4_en_clear_uclist(dev);
list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
list_del(&addr_list->list);
kfree(addr_list);
}
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
MLX4_PROT_ETH, priv->broadcast_id);
list_for_each_entry(addr_list, &priv->curr_mc_list, list) {
memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, addr_list->reg_id);
}
mlx4_en_clear_mclist(dev);
list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
list_del(&addr_list->list);
kfree(addr_list);
}
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
mlx4_en_destroy_drop_qp(priv);
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
}
msleep(10);
for (i = 0; i < priv->tx_ring_num; i++)
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
mlx4_en_put_qp(priv);
mdev->mac_removed[priv->port] = 1;
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
struct mlx4_en_cq *cq = priv->rx_cq[i];
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
}
callout_stop(&priv->watchdog_timer);
if_setdrvflagbits(dev, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
}
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
watchdog_task);
struct mlx4_en_dev *mdev = priv->mdev;
if_t dev = priv->dev;
struct mlx4_en_tx_ring *ring;
int i;
if (priv->blocked == 0 || priv->port_up == 0)
return;
for (i = 0; i < priv->tx_ring_num; i++) {
int watchdog_time;
ring = priv->tx_ring[i];
watchdog_time = READ_ONCE(ring->watchdog_time);
if (watchdog_time != 0 &&
time_after(ticks, ring->watchdog_time))
goto reset;
}
return;
reset:
priv->port_stats.tx_timeout++;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev);
//for (i = 0; i < priv->tx_ring_num; i++)
// netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
}
static void mlx4_en_clear_stats(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i;
if (!mlx4_is_slave(mdev->dev))
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
memset(&priv->vport_stats, 0, sizeof(priv->vport_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
priv->tx_ring[i]->oversized_packets = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
priv->rx_ring[i]->packets = 0;
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
}
}
static void mlx4_en_open(void* arg)
{
struct mlx4_en_priv *priv;
struct mlx4_en_dev *mdev;
if_t dev;
int err = 0;
priv = arg;
mdev = priv->mdev;
dev = priv->dev;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_err(priv, "Cannot open - device down/disabled\n");
goto out;
}
/* Reset HW statistics and SW counters */
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port:%d\n", priv->port);
out:
mutex_unlock(&mdev->state_lock);
return;
}
void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
#ifdef CONFIG_RFS_ACCEL
if (priv->dev->rx_cpu_rmap) {
free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
priv->dev->rx_cpu_rmap = NULL;
}
#endif
for (i = 0; i < priv->tx_ring_num; i++) {
if (priv->tx_ring && priv->tx_ring[i])
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
if (priv->tx_cq && priv->tx_cq[i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size);
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
if (priv->stat_sysctl != NULL)
sysctl_ctx_free(&priv->stat_ctx);
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
int node = 0;
/* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
prof->rx_ring_size, i, RX, node))
goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, node))
goto err;
}
/* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
prof->tx_ring_size, i, TX, node))
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
prof->tx_ring_size, TXBB_SIZE, node, i))
goto err;
}
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
if (!priv->dev->rx_cpu_rmap)
goto err;
#endif
/* Re-create stat sysctls in case the number of rings changed. */
mlx4_en_sysctl_stat(priv);
return 0;
err:
en_err(priv, "Failed to allocate NIC resources\n");
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size);
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
for (i = 0; i < priv->tx_ring_num; i++) {
if (priv->tx_ring[i])
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
if (priv->tx_cq[i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
priv->port_up = false;
return -ENOMEM;
}
struct en_port_attribute {
struct attribute attr;
ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf);
ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count);
};
#define PORT_ATTR_RO(_name) \
struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name)
#define EN_PORT_ATTR(_name, _mode, _show, _store) \
struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store)
void mlx4_en_destroy_netdev(if_t dev)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* don't allow more IOCTLs */
priv->gone = 1;
/* XXX wait a bit to allow IOCTL handlers to complete */
pause("W", hz);
if (priv->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
if (priv->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
mutex_lock(&mdev->state_lock);
mlx4_en_stop_port(dev);
mutex_unlock(&mdev->state_lock);
/* Unregister device - this will close the port if it was up */
if (priv->registered)
ether_ifdetach(dev);
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
cancel_delayed_work(&priv->service_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
callout_drain(&priv->watchdog_timer);
/* Detach the netdev so tasks would not attempt to access it */
mutex_lock(&mdev->state_lock);
mdev->pndev[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
mlx4_en_free_resources(priv);
/* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */
if (priv->conf_sysctl != NULL)
sysctl_ctx_free(&priv->conf_ctx);
kfree(priv->tx_ring);
kfree(priv->tx_cq);
kfree(priv);
if_free(dev);
}
static int mlx4_en_change_mtu(if_t dev, int new_mtu)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n",
(unsigned)if_getmtu(dev), (unsigned)new_mtu);
if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu,
priv->max_mtu);
return -EPERM;
}
mutex_lock(&mdev->state_lock);
if_setmtu(dev, new_mtu);
if (if_getdrvflags(dev) & IFF_DRV_RUNNING) {
if (!mdev->device_up) {
/* NIC is probably restarting - let watchdog task reset
* * the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
mlx4_en_stop_port(dev);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
}
}
}
mutex_unlock(&mdev->state_lock);
return 0;
}
static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
{
int trans_type;
int active;
active = IFM_ETHER;
if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
return (active);
active |= IFM_FDX;
trans_type = priv->port_state.transceiver;
/* XXX I don't know all of the transceiver values. */
switch (priv->port_state.link_speed) {
case 100:
active |= IFM_100_T;
break;
case 1000:
active |= IFM_1000_T;
break;
case 10000:
if (trans_type > 0 && trans_type <= 0xC)
active |= IFM_10G_SR;
else if (trans_type == 0x80 || trans_type == 0)
active |= IFM_10G_CX4;
break;
case 40000:
active |= IFM_40G_CR4;
break;
}
if (priv->prof->tx_pause)
active |= IFM_ETH_TXPAUSE;
if (priv->prof->rx_pause)
active |= IFM_ETH_RXPAUSE;
return (active);
}
static void mlx4_en_media_status(if_t dev, struct ifmediareq *ifmr)
{
struct mlx4_en_priv *priv;
priv = if_getsoftc(dev);
ifmr->ifm_status = IFM_AVALID;
if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN)
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active = mlx4_en_calc_media(priv);
return;
}
static int mlx4_en_media_change(if_t dev)
{
struct mlx4_en_priv *priv;
struct ifmedia *ifm;
int rxpause;
int txpause;
int error;
priv = if_getsoftc(dev);
ifm = &priv->media;
rxpause = txpause = 0;
error = 0;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
break;
case IFM_10G_SR:
case IFM_10G_CX4:
case IFM_1000_T:
case IFM_40G_CR4:
if ((IFM_SUBTYPE(ifm->ifm_media)
== IFM_SUBTYPE(mlx4_en_calc_media(priv)))
&& (ifm->ifm_media & IFM_FDX))
break;
/* Fallthrough */
default:
printf("%s: Only auto media type\n", if_name(dev));
return (EINVAL);
}
/* Allow user to set/clear pause */
if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
rxpause = 1;
if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
txpause = 1;
if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) {
priv->prof->tx_pause = txpause;
priv->prof->rx_pause = rxpause;
error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause,
priv->prof->tx_ppp, priv->prof->rx_pause,
priv->prof->rx_ppp);
}
return (error);
}
static int mlx4_en_ioctl(if_t dev, u_long command, caddr_t data)
{
struct mlx4_en_priv *priv;
struct mlx4_en_dev *mdev;
struct ifreq *ifr;
int error;
int mask;
struct ifrsskey *ifrk;
const u32 *key;
struct ifrsshash *ifrh;
u8 rss_mask;
error = 0;
mask = 0;
priv = if_getsoftc(dev);
/* check if detaching */
if (priv == NULL || priv->gone != 0)
return (ENXIO);
mdev = priv->mdev;
ifr = (struct ifreq *) data;
switch (command) {
case SIOCSIFMTU:
error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu);
break;
case SIOCSIFFLAGS:
if (if_getflags(dev) & IFF_UP) {
if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0) {
mutex_lock(&mdev->state_lock);
mlx4_en_start_port(dev);
mutex_unlock(&mdev->state_lock);
} else {
mlx4_en_set_rx_mode(dev);
}
} else {
mutex_lock(&mdev->state_lock);
if (if_getdrvflags(dev) & IFF_DRV_RUNNING) {
mlx4_en_stop_port(dev);
if_link_state_change(dev, LINK_STATE_DOWN);
}
mutex_unlock(&mdev->state_lock);
}
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
mlx4_en_set_rx_mode(dev);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(dev, ifr, &priv->media, command);
break;
case SIOCSIFCAP:
mutex_lock(&mdev->state_lock);
mask = ifr->ifr_reqcap ^ if_getcapenable(dev);
if (mask & IFCAP_TXCSUM) {
if_togglecapenable(dev, IFCAP_TXCSUM);
if_togglehwassist(dev, CSUM_TCP | CSUM_UDP | CSUM_IP);
if (IFCAP_TSO4 & if_getcapenable(dev) &&
!(IFCAP_TXCSUM & if_getcapenable(dev))) {
mask &= ~IFCAP_TSO4;
if_setcapenablebit(dev, 0, IFCAP_TSO4);
if_sethwassistbits(dev, 0, CSUM_IP_TSO);
if_printf(dev,
"tso4 disabled due to -txcsum.\n");
}
}
if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(dev, IFCAP_TXCSUM_IPV6);
if_togglehwassist(dev, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
if (IFCAP_TSO6 & if_getcapenable(dev) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(dev))) {
mask &= ~IFCAP_TSO6;
if_setcapenablebit(dev, 0, IFCAP_TSO6);
if_sethwassistbits(dev, 0, CSUM_IP6_TSO);
if_printf(dev,
"tso6 disabled due to -txcsum6.\n");
}
}
if (mask & IFCAP_RXCSUM)
if_togglecapenable(dev, IFCAP_RXCSUM);
if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(dev, IFCAP_RXCSUM_IPV6);
if (mask & IFCAP_TSO4) {
if (!(IFCAP_TSO4 & if_getcapenable(dev)) &&
!(IFCAP_TXCSUM & if_getcapenable(dev))) {
if_printf(dev, "enable txcsum first.\n");
error = EAGAIN;
goto out;
}
if_togglecapenable(dev, IFCAP_TSO4);
if_togglehwassist(dev, CSUM_IP_TSO);
}
if (mask & IFCAP_TSO6) {
if (!(IFCAP_TSO6 & if_getcapenable(dev)) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(dev))) {
if_printf(dev, "enable txcsum6 first.\n");
error = EAGAIN;
goto out;
}
if_togglecapenable(dev, IFCAP_TSO6);
if_togglehwassist(dev, CSUM_IP6_TSO);
}
if (mask & IFCAP_LRO)
if_togglecapenable(dev, IFCAP_LRO);
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(dev, IFCAP_VLAN_HWTAGGING);
if (mask & IFCAP_VLAN_HWFILTER)
if_togglecapenable(dev, IFCAP_VLAN_HWFILTER);
if (mask & IFCAP_WOL_MAGIC)
if_togglecapenable(dev, IFCAP_WOL_MAGIC);
if (if_getdrvflags(dev) & IFF_DRV_RUNNING)
mlx4_en_start_port(dev);
out:
mutex_unlock(&mdev->state_lock);
VLAN_CAPABILITIES(dev);
break;
case SIOCGI2C: {
struct ifi2creq i2c;
error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (error)
break;
if (i2c.len > sizeof(i2c.data)) {
error = EINVAL;
break;
}
/*
* Note that we ignore i2c.addr here. The driver hardcodes
* the address to 0x50, while standard expects it to be 0xA0.
*/
error = mlx4_get_module_info(mdev->dev, priv->port,
i2c.offset, i2c.len, i2c.data);
if (error < 0) {
error = -error;
break;
}
error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
break;
}
case SIOCGIFRSSKEY:
ifrk = (struct ifrsskey *)data;
ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
mutex_lock(&mdev->state_lock);
key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen);
if (ifrk->ifrk_keylen > RSS_KEYLEN)
error = EINVAL;
else
memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen);
mutex_unlock(&mdev->state_lock);
break;
case SIOCGIFRSSHASH:
mutex_lock(&mdev->state_lock);
rss_mask = mlx4_en_get_rss_mask(priv);
mutex_unlock(&mdev->state_lock);
ifrh = (struct ifrsshash *)data;
ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
ifrh->ifrh_types = 0;
if (rss_mask & MLX4_RSS_IPV4)
ifrh->ifrh_types |= RSS_TYPE_IPV4;
if (rss_mask & MLX4_RSS_TCP_IPV4)
ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4;
if (rss_mask & MLX4_RSS_IPV6)
ifrh->ifrh_types |= RSS_TYPE_IPV6;
if (rss_mask & MLX4_RSS_TCP_IPV6)
ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6;
if (rss_mask & MLX4_RSS_UDP_IPV4)
ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4;
if (rss_mask & MLX4_RSS_UDP_IPV6)
ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6;
break;
default:
error = ether_ioctl(dev, command, data);
break;
}
return (error);
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
if_t dev;
struct mlx4_en_priv *priv;
uint32_t hwassist;
uint8_t dev_addr[ETHER_ADDR_LEN];
int err;
int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
dev = priv->dev = if_alloc(IFT_ETHER);
- if (dev == NULL) {
- en_err(priv, "Net device allocation failed\n");
- kfree(priv);
- return -ENOMEM;
- }
if_setsoftc(dev, priv);
if_initname(dev, "mlxen", (device_get_unit(
mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1);
if_setmtu(dev, ETHERMTU);
if_setinitfn(dev, mlx4_en_open);
if_setflags(dev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(dev, mlx4_en_ioctl);
if_settransmitfn(dev, mlx4_en_transmit);
if_setqflushfn(dev, mlx4_en_qflush);
if_setsendqlen(dev, prof->tx_ring_size);
/*
* Initialize driver private data
*/
priv->counter_index = 0xff;
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
callout_init(&priv->watchdog_timer, 1);
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
#endif
priv->msg_enable = MLX4_EN_MSG_LEVEL;
priv->dev = dev;
priv->mdev = mdev;
priv->ddev = &mdev->pdev->dev;
priv->prof = prof;
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
priv->tx_ring = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL);
if (!priv->tx_ring) {
err = -ENOMEM;
goto out;
}
priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
goto out;
}
priv->rx_ring_num = prof->rx_ring_num;
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->mac_index = -1;
priv->last_ifq_jiffies = 0;
priv->if_counters_rx_errors = 0;
priv->if_counters_rx_no_buffer = 0;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
priv->dcbx_cap = DCB_CAP_DCBX_HOST;
priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
} else {
en_info(priv, "QoS disabled - no HW support\n");
dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
}
}
#endif
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
priv->mac = mdev->dev->caps.def_mac[priv->port];
if (ILLEGAL_MAC(priv->mac)) {
#if BITS_PER_LONG == 64
en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n",
priv->port, priv->mac);
#elif BITS_PER_LONG == 32
en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
priv->port, priv->mac);
#endif
err = -EINVAL;
goto out;
}
mlx4_en_sysctl_conf(priv);
err = mlx4_en_alloc_resources(priv);
if (err)
goto out;
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
if (err) {
en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
}
priv->allocated = 1;
/*
* Set driver features
*/
if_setcapabilitiesbit(dev, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER |
IFCAP_LINKSTATE | IFCAP_JUMBO_MTU |
IFCAP_LRO | IFCAP_HWSTATS, 0);
if (mdev->LSO_support)
if_setcapabilitiesbit(dev, IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO, 0);
/* set TSO limits so that we don't have to drop TX packets */
if_sethwtsomax(dev, MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */);
if_sethwtsomaxsegcount(dev, MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */);
if_sethwtsomaxsegsize(dev, MLX4_EN_TX_MAX_MBUF_SIZE);
if_setcapenable(dev, if_getcapabilities(dev));
hwassist = 0;
if (if_getcapenable(dev) & (IFCAP_TSO4 | IFCAP_TSO6))
hwassist |= CSUM_TSO;
if (if_getcapenable(dev) & IFCAP_TXCSUM)
hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
if (if_getcapenable(dev) & IFCAP_TXCSUM_IPV6)
hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if_sethwassist(dev, hwassist);
/* Register for VLAN events */
priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
mdev->pndev[priv->port] = dev;
priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN;
mlx4_en_set_default_moderation(priv);
/* Set default MAC */
for (i = 0; i < ETHER_ADDR_LEN; i++)
dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i));
if_link_state_change(dev, LINK_STATE_DOWN);
ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
mlx4_en_media_change, mlx4_en_media_status);
ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
DEBUGNET_SET(dev, mlx4_en);
ether_ifattach(dev, dev_addr);
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
priv->registered = 1;
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
priv->rx_mb_size = if_getmtu(dev) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_mb_size,
prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations "
"for port %d, with error %d\n", priv->port, err);
goto out;
}
/* Init port */
en_warn(priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
en_err(priv, "Failed Initializing port\n");
goto out;
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
return 0;
out:
mlx4_en_destroy_netdev(dev);
return err;
}
static int mlx4_en_set_ring_size(if_t dev,
int rx_size, int tx_size)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
rx_size = roundup_pow_of_two(rx_size);
rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
tx_size = roundup_pow_of_two(tx_size);
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == (priv->port_up ?
priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) &&
tx_size == priv->tx_ring[0]->size)
return 0;
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev);
}
mlx4_en_free_resources(priv);
priv->prof->tx_ring_size = tx_size;
priv->prof->rx_ring_size = rx_size;
err = mlx4_en_alloc_resources(priv);
if (err) {
en_err(priv, "Failed reallocating port resources\n");
goto out;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS)
{
struct mlx4_en_priv *priv;
int size;
int error;
priv = arg1;
size = priv->prof->rx_ring_size;
error = sysctl_handle_int(oidp, &size, 0, req);
if (error || !req->newptr)
return (error);
error = -mlx4_en_set_ring_size(priv->dev, size,
priv->prof->tx_ring_size);
return (error);
}
static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
{
struct mlx4_en_priv *priv;
int size;
int error;
priv = arg1;
size = priv->prof->tx_ring_size;
error = sysctl_handle_int(oidp, &size, 0, req);
if (error || !req->newptr)
return (error);
error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size,
size);
return (error);
}
static int mlx4_en_get_module_info(if_t dev,
struct ethtool_modinfo *modinfo)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int ret;
u8 data[4];
/* Read first 2 bytes to get Module & REV ID */
ret = mlx4_get_module_info(mdev->dev, priv->port,
0/*offset*/, 2/*size*/, data);
if (ret < 2) {
en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret);
return -EIO;
}
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n");
return -EINVAL;
}
return 0;
}
static int mlx4_en_get_module_eeprom(if_t dev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct mlx4_en_priv *priv = mlx4_netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int offset = ee->offset;
int i = 0, ret;
if (ee->len == 0)
return -EINVAL;
memset(data, 0, ee->len);
while (i < ee->len) {
en_dbg(DRV, priv,
"mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
i, offset, ee->len - i);
ret = mlx4_get_module_info(mdev->dev, priv->port,
offset, ee->len - i, data + i);
if (!ret) /* Done reading */
return 0;
if (ret < 0) {
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
return -1;
}
i += ret;
offset += ret;
}
return 0;
}
static void mlx4_en_print_eeprom(u8 *data, __u32 len)
{
int i;
int j = 0;
int row = 0;
const int NUM_OF_BYTES = 16;
printf("\nOffset\t\tValues\n");
printf("------\t\t------\n");
while(row < len){
printf("0x%04x\t\t",row);
for(i=0; i < NUM_OF_BYTES; i++){
printf("%02x ", data[j]);
row++;
j++;
}
printf("\n");
}
}
/* Read cable EEPROM module information by first inspecting the first
* two bytes to get the length and then read the rest of the information.
* The information is printed to dmesg. */
static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS)
{
u8* data;
int error;
int result = 0;
struct mlx4_en_priv *priv;
struct ifnet *dev;
struct ethtool_modinfo modinfo;
struct ethtool_eeprom ee;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
priv = arg1;
dev = priv->dev;
data = kmalloc(PAGE_SIZE, GFP_KERNEL);
error = mlx4_en_get_module_info(dev, &modinfo);
if (error) {
en_err(priv,
"mlx4_en_get_module_info returned with error - FAILED (0x%x)\n",
-error);
goto out;
}
ee.len = modinfo.eeprom_len;
ee.offset = 0;
error = mlx4_en_get_module_eeprom(dev, &ee, data);
if (error) {
en_err(priv,
"mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n",
-error);
/* Continue printing partial information in case of an error */
}
/* EEPROM information will be printed in dmesg */
mlx4_en_print_eeprom(data, ee.len);
out:
kfree(data);
}
/* Return zero to prevent sysctl failure. */
return (0);
}
static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
{
struct mlx4_en_priv *priv;
int ppp;
int error;
priv = arg1;
ppp = priv->prof->tx_ppp;
error = sysctl_handle_int(oidp, &ppp, 0, req);
if (error || !req->newptr)
return (error);
if (ppp > 0xff || ppp < 0)
return (-EINVAL);
priv->prof->tx_ppp = ppp;
error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
priv->rx_mb_size + ETHER_CRC_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
return (error);
}
static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS)
{
struct mlx4_en_priv *priv;
struct mlx4_en_dev *mdev;
int ppp;
int error;
int port_up;
port_up = 0;
priv = arg1;
mdev = priv->mdev;
ppp = priv->prof->rx_ppp;
error = sysctl_handle_int(oidp, &ppp, 0, req);
if (error || !req->newptr)
return (error);
if (ppp > 0xff || ppp < 0)
return (-EINVAL);
/* See if we have to change the number of tx queues. */
if (!ppp != !priv->prof->rx_ppp) {
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(priv->dev);
}
mlx4_en_free_resources(priv);
priv->prof->rx_ppp = ppp;
error = -mlx4_en_alloc_resources(priv);
if (error)
en_err(priv, "Failed reallocating port resources\n");
if (error == 0 && port_up) {
error = -mlx4_en_start_port(priv->dev);
if (error)
en_err(priv, "Failed starting port\n");
}
mutex_unlock(&mdev->state_lock);
return (error);
}
priv->prof->rx_ppp = ppp;
error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port,
priv->rx_mb_size + ETHER_CRC_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
return (error);
}
static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
{
if_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *node;
struct sysctl_oid_list *node_list;
struct sysctl_oid *coal;
struct sysctl_oid_list *coal_list;
const char *pnameunit;
dev = priv->dev;
ctx = &priv->conf_ctx;
pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
sysctl_ctx_init(ctx);
priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw),
OID_AUTO, if_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"mlx4 10gig ethernet");
node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
"conf", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Configuration");
node_list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable",
CTLFLAG_RW, &priv->msg_enable, 0,
"Driver message enable bitfield");
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings",
CTLFLAG_RD, &priv->rx_ring_num, 0,
"Number of receive rings");
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings",
CTLFLAG_RD, &priv->tx_ring_num, 0,
"Number of transmit rings");
SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
mlx4_en_set_rx_ring_size, "I", "Receive ring size");
SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
mlx4_en_set_tx_ring_size, "I", "Transmit ring size");
SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
mlx4_en_set_tx_ppp, "I", "TX Per-priority pause");
SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
mlx4_en_set_rx_ppp, "I", "RX Per-priority pause");
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num",
CTLFLAG_RD, &priv->port, 0,
"Port Number");
SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name",
CTLFLAG_RD, __DECONST(void *, pnameunit), 0,
"PCI device name");
/* Add coalescer configuration. */
coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
"coalesce", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"Interrupt coalesce configuration");
coal_list = SYSCTL_CHILDREN(coal);
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
CTLFLAG_RW, &priv->pkt_rate_low, 0,
"Packets per-second for minimum delay");
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low",
CTLFLAG_RW, &priv->rx_usecs_low, 0,
"Minimum RX delay in micro-seconds");
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high",
CTLFLAG_RW, &priv->pkt_rate_high, 0,
"Packets per-second for maximum delay");
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high",
CTLFLAG_RW, &priv->rx_usecs_high, 0,
"Maximum RX delay in micro-seconds");
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval",
CTLFLAG_RW, &priv->sample_interval, 0,
"adaptive frequency in units of HZ ticks");
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
"Enable adaptive rx coalescing");
/* EEPROM support */
SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
mlx4_en_read_eeprom, "I", "EEPROM information");
}
static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *node_list;
struct sysctl_oid *ring_node;
struct sysctl_oid_list *ring_list;
struct mlx4_en_tx_ring *tx_ring;
struct mlx4_en_rx_ring *rx_ring;
char namebuf[128];
int i;
ctx = &priv->stat_ctx;
sysctl_ctx_init(ctx);
priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO,
"stat", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
node_list = SYSCTL_CHILDREN(priv->stat_sysctl);
#ifdef MLX4_EN_PERF_STAT
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD,
&priv->pstats.tx_poll, "TX Poll calls");
SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD,
&priv->pstats.tx_pktsz_avg, "TX average packet size");
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD,
&priv->pstats.inflight_avg, "TX average packets in-flight");
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD,
&priv->pstats.tx_coal_avg, "TX average coalesced completions");
SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD,
&priv->pstats.rx_coal_avg, "RX average coalesced completions");
#endif
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
&priv->port_stats.tso_packets, 0, "TSO packets sent");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
&priv->port_stats.queue_stopped, 0, "Queue full");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
&priv->port_stats.wake_queue, 0, "Queue resumed after full");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
&priv->port_stats.tx_timeout, 0, "Transmit timeouts");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
&priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
&priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
&priv->port_stats.rx_chksum_good, 0, "RX checksum offload success");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
&priv->port_stats.rx_chksum_none, 0, "RX without checksum offload");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload",
CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0,
"TX checksum offloads");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts",
CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0,
"Oversized chains defragged");
/* Could strdup the names and add in a loop. This is simpler. */
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
&priv->pkstats.rx_bytes, 0, "RX Bytes");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
&priv->pkstats.rx_packets, 0, "RX packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
&priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
&priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
&priv->pkstats.rx_errors, 0, "RX Errors");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
&priv->pkstats.rx_dropped, 0, "RX Dropped");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
&priv->pkstats.rx_length_errors, 0, "RX Length Errors");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
&priv->pkstats.rx_over_errors, 0, "RX Over Errors");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
&priv->pkstats.rx_crc_errors, 0, "RX CRC Errors");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
&priv->pkstats.rx_jabbers, 0, "RX Jabbers");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
&priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0,
"RX Out Range Length Error");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
&priv->pkstats.rx_gt_1548_bytes_packets, 0,
"RX Greater Then 1548 bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
&priv->pkstats.tx_packets, 0, "TX packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
&priv->pkstats.tx_bytes, 0, "TX Bytes");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
&priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
&priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
&priv->pkstats.tx_errors, 0, "TX Errors");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
&priv->pkstats.tx_dropped, 0, "TX Dropped");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets");
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
&priv->pkstats.tx_gt_1548_bytes_packets, 0,
"TX Greater Then 1548 Bytes Packets");
for (i = 0; i < priv->tx_ring_num; i++) {
tx_ring = priv->tx_ring[i];
snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i);
ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Ring");
ring_list = SYSCTL_CHILDREN(ring_node);
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
CTLFLAG_RD, &tx_ring->packets, 0, "TX packets");
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes");
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets",
CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets");
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts",
CTLFLAG_RD, &tx_ring->defrag_attempts, 0,
"Oversized chains defragged");
}
for (i = 0; i < priv->rx_ring_num; i++) {
rx_ring = priv->rx_ring[i];
snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);
ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Ring");
ring_list = SYSCTL_CHILDREN(ring_node);
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
CTLFLAG_RD, &rx_ring->packets, 0, "RX packets");
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes");
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error",
CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors");
}
}
#ifdef DEBUGNET
static void
mlx4_en_debugnet_init(if_t dev, int *nrxr, int *ncl, int *clsize)
{
struct mlx4_en_priv *priv;
priv = if_getsoftc(dev);
mutex_lock(&priv->mdev->state_lock);
*nrxr = priv->rx_ring_num;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
*clsize = MCLBYTES;
mutex_unlock(&priv->mdev->state_lock);
}
static void
mlx4_en_debugnet_event(if_t dev, enum debugnet_ev event)
{
}
static int
mlx4_en_debugnet_transmit(if_t dev, struct mbuf *m)
{
struct mlx4_en_priv *priv;
int err;
priv = if_getsoftc(dev);
if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || !priv->link_state)
return (ENOENT);
err = mlx4_en_xmit(priv, 0, &m);
if (err != 0 && m != NULL)
m_freem(m);
return (err);
}
static int
mlx4_en_debugnet_poll(if_t dev, int count)
{
struct mlx4_en_priv *priv;
priv = if_getsoftc(dev);
if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state)
return (ENOENT);
mlx4_poll_interrupts(priv->mdev->dev);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index 602a94345e50..36a2a37a6e8b 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -1,5034 +1,5028 @@
/*-
* Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
* Copyright (c) 2022 NVIDIA corporation & affiliates.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_kern_tls.h"
#include "opt_rss.h"
#include "opt_ratelimit.h"
#include <dev/mlx5/mlx5_en/en.h>
#include <sys/eventhandler.h>
#include <sys/sockio.h>
#include <machine/atomic.h>
#include <net/debugnet.h>
static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
static if_snd_tag_query_t mlx5e_ul_snd_tag_query;
static if_snd_tag_free_t mlx5e_ul_snd_tag_free;
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
};
struct media {
u32 subtype;
u64 baudrate;
};
static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER] =
{
[MLX5E_1000BASE_CX_SGMII] = {
.subtype = IFM_1000_CX_SGMII,
.baudrate = IF_Mbps(1000ULL),
},
[MLX5E_1000BASE_KX] = {
.subtype = IFM_1000_KX,
.baudrate = IF_Mbps(1000ULL),
},
[MLX5E_10GBASE_CX4] = {
.subtype = IFM_10G_CX4,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_10GBASE_KX4] = {
.subtype = IFM_10G_KX4,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_10GBASE_KR] = {
.subtype = IFM_10G_KR,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_20GBASE_KR2] = {
.subtype = IFM_20G_KR2,
.baudrate = IF_Gbps(20ULL),
},
[MLX5E_40GBASE_CR4] = {
.subtype = IFM_40G_CR4,
.baudrate = IF_Gbps(40ULL),
},
[MLX5E_40GBASE_KR4] = {
.subtype = IFM_40G_KR4,
.baudrate = IF_Gbps(40ULL),
},
[MLX5E_56GBASE_R4] = {
.subtype = IFM_56G_R4,
.baudrate = IF_Gbps(56ULL),
},
[MLX5E_10GBASE_CR] = {
.subtype = IFM_10G_CR1,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_10GBASE_SR] = {
.subtype = IFM_10G_SR,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_10GBASE_ER_LR] = {
.subtype = IFM_10G_ER,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_40GBASE_SR4] = {
.subtype = IFM_40G_SR4,
.baudrate = IF_Gbps(40ULL),
},
[MLX5E_40GBASE_LR4_ER4] = {
.subtype = IFM_40G_LR4,
.baudrate = IF_Gbps(40ULL),
},
[MLX5E_100GBASE_CR4] = {
.subtype = IFM_100G_CR4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100GBASE_SR4] = {
.subtype = IFM_100G_SR4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100GBASE_KR4] = {
.subtype = IFM_100G_KR4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100GBASE_LR4] = {
.subtype = IFM_100G_LR4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100BASE_TX] = {
.subtype = IFM_100_TX,
.baudrate = IF_Mbps(100ULL),
},
[MLX5E_1000BASE_T] = {
.subtype = IFM_1000_T,
.baudrate = IF_Mbps(1000ULL),
},
[MLX5E_10GBASE_T] = {
.subtype = IFM_10G_T,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_25GBASE_CR] = {
.subtype = IFM_25G_CR,
.baudrate = IF_Gbps(25ULL),
},
[MLX5E_25GBASE_KR] = {
.subtype = IFM_25G_KR,
.baudrate = IF_Gbps(25ULL),
},
[MLX5E_25GBASE_SR] = {
.subtype = IFM_25G_SR,
.baudrate = IF_Gbps(25ULL),
},
[MLX5E_50GBASE_CR2] = {
.subtype = IFM_50G_CR2,
.baudrate = IF_Gbps(50ULL),
},
[MLX5E_50GBASE_KR2] = {
.subtype = IFM_50G_KR2,
.baudrate = IF_Gbps(50ULL),
},
[MLX5E_50GBASE_KR4] = {
.subtype = IFM_50G_KR4,
.baudrate = IF_Gbps(50ULL),
},
};
static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_CABLE_TYPE_NUMBER] =
{
/**/
[MLX5E_SGMII_100M][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_100_SGMII,
.baudrate = IF_Mbps(100),
},
/**/
[MLX5E_1000BASE_X_SGMII][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_1000_CX,
.baudrate = IF_Mbps(1000),
},
[MLX5E_1000BASE_X_SGMII][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_1000_SX,
.baudrate = IF_Mbps(1000),
},
/**/
[MLX5E_5GBASE_R][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_5000_KR,
.baudrate = IF_Mbps(5000),
},
[MLX5E_5GBASE_R][MLX5E_CABLE_TYPE_TWISTED_PAIR] = {
.subtype = IFM_5000_T,
.baudrate = IF_Mbps(5000),
},
/**/
[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_10G_KR,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_10G_CR1,
.baudrate = IF_Gbps(10ULL),
},
[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_10G_SR,
.baudrate = IF_Gbps(10ULL),
},
/**/
[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_40G_KR4,
.baudrate = IF_Gbps(40ULL),
},
[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_40G_CR4,
.baudrate = IF_Gbps(40ULL),
},
[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_40G_SR4,
.baudrate = IF_Gbps(40ULL),
},
/**/
[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_25G_KR,
.baudrate = IF_Gbps(25ULL),
},
[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_25G_CR,
.baudrate = IF_Gbps(25ULL),
},
[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_25G_SR,
.baudrate = IF_Gbps(25ULL),
},
[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_TWISTED_PAIR] = {
.subtype = IFM_25G_T,
.baudrate = IF_Gbps(25ULL),
},
/**/
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_50G_KR2,
.baudrate = IF_Gbps(50ULL),
},
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_50G_CR2,
.baudrate = IF_Gbps(50ULL),
},
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_50G_SR2,
.baudrate = IF_Gbps(50ULL),
},
/**/
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_50G_KR_PAM4,
.baudrate = IF_Gbps(50ULL),
},
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_50G_CP,
.baudrate = IF_Gbps(50ULL),
},
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_50G_SR,
.baudrate = IF_Gbps(50ULL),
},
/**/
[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_100G_KR4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_100G_CR4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_100G_SR4,
.baudrate = IF_Gbps(100ULL),
},
/**/
[MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_100G_KR_PAM4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_100G_CR_PAM4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_100G_SR2, /* XXX */
.baudrate = IF_Gbps(100ULL),
},
/**/
[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_100G_KR4,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_100G_CP2,
.baudrate = IF_Gbps(100ULL),
},
[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_100G_SR2,
.baudrate = IF_Gbps(100ULL),
},
/**/
[MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_200G_KR4_PAM4, /* XXX */
.baudrate = IF_Gbps(200ULL),
},
[MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_200G_CR4_PAM4, /* XXX */
.baudrate = IF_Gbps(200ULL),
},
[MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_200G_SR4, /* XXX */
.baudrate = IF_Gbps(200ULL),
},
/**/
[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_200G_KR4_PAM4,
.baudrate = IF_Gbps(200ULL),
},
[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
.subtype = IFM_200G_CR4_PAM4,
.baudrate = IF_Gbps(200ULL),
},
[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
.subtype = IFM_200G_SR4,
.baudrate = IF_Gbps(200ULL),
},
/**/
[MLX5E_400GAUI_8][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_400G_LR8, /* XXX */
.baudrate = IF_Gbps(400ULL),
},
/**/
[MLX5E_400GAUI_4_400GBASE_CR4_KR4][MLX5E_CABLE_TYPE_UNKNOWN] = {
.subtype = IFM_400G_LR8, /* XXX */
.baudrate = IF_Gbps(400ULL),
},
};
static const struct if_snd_tag_sw mlx5e_ul_snd_tag_sw = {
.snd_tag_query = mlx5e_ul_snd_tag_query,
.snd_tag_free = mlx5e_ul_snd_tag_free,
.type = IF_SND_TAG_TYPE_UNLIMITED
};
DEBUGNET_DEFINE(mlx5_en);
MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
static void
mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 eth_proto_oper;
int error;
u8 i;
u8 cable_type;
u8 port_state;
u8 is_er_type;
bool ext;
struct media media_entry = {};
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
if (port_state == VPORT_STATE_UP) {
priv->media_status_last |= IFM_ACTIVE;
} else {
priv->media_status_last &= ~IFM_ACTIVE;
priv->media_active_last = IFM_ETHER;
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
return;
}
error = mlx5_query_port_ptys(mdev, out, sizeof(out),
MLX5_PTYS_EN, 1);
if (error) {
priv->media_active_last = IFM_ETHER;
if_setbaudrate(priv->ifp, 1);
mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n",
error);
return;
}
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_oper);
i = ilog2(eth_proto_oper);
if (ext) {
error = mlx5_query_pddr_cable_type(mdev, 1, &cable_type);
if (error != 0) {
/* use fallback entry */
media_entry = mlx5e_ext_mode_table[i][MLX5E_CABLE_TYPE_UNKNOWN];
mlx5_en_err(priv->ifp,
"query port pddr failed: %d\n", error);
} else {
media_entry = mlx5e_ext_mode_table[i][cable_type];
/* check if we should use fallback entry */
if (media_entry.subtype == 0)
media_entry = mlx5e_ext_mode_table[i][MLX5E_CABLE_TYPE_UNKNOWN];
}
} else {
media_entry = mlx5e_mode_table[i];
}
if (media_entry.subtype == 0) {
mlx5_en_err(priv->ifp,
"Could not find operational media subtype\n");
return;
}
switch (media_entry.subtype) {
case IFM_10G_ER:
error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
if (error != 0) {
mlx5_en_err(priv->ifp,
"query port pddr failed: %d\n", error);
}
if (error != 0 || is_er_type == 0)
media_entry.subtype = IFM_10G_LR;
break;
case IFM_40G_LR4:
error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
if (error != 0) {
mlx5_en_err(priv->ifp,
"query port pddr failed: %d\n", error);
}
if (error == 0 && is_er_type != 0)
media_entry.subtype = IFM_40G_ER4;
break;
}
priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX;
if_setbaudrate(priv->ifp, media_entry.baudrate);
if_link_state_change(priv->ifp, LINK_STATE_UP);
}
static void
mlx5e_media_status(if_t dev, struct ifmediareq *ifmr)
{
struct mlx5e_priv *priv = if_getsoftc(dev);
ifmr->ifm_status = priv->media_status_last;
ifmr->ifm_current = ifmr->ifm_active = priv->media_active_last |
(priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
(priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
}
static u32
mlx5e_find_link_mode(u32 subtype, bool ext)
{
u32 link_mode = 0;
switch (subtype) {
case 0:
goto done;
case IFM_10G_LR:
subtype = IFM_10G_ER;
break;
case IFM_40G_ER4:
subtype = IFM_40G_LR4;
break;
default:
break;
}
if (ext) {
for (unsigned i = 0; i != MLX5E_EXT_LINK_SPEEDS_NUMBER; i++) {
for (unsigned j = 0; j != MLX5E_CABLE_TYPE_NUMBER; j++) {
if (mlx5e_ext_mode_table[i][j].subtype == subtype)
link_mode |= MLX5E_PROT_MASK(i);
}
}
} else {
for (unsigned i = 0; i != MLX5E_LINK_SPEEDS_NUMBER; i++) {
if (mlx5e_mode_table[i].subtype == subtype)
link_mode |= MLX5E_PROT_MASK(i);
}
}
done:
return (link_mode);
}
static int
mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
{
return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
priv->params.rx_pauseframe_control,
priv->params.tx_pauseframe_control,
priv->params.rx_priority_flow_control,
priv->params.tx_priority_flow_control));
}
static int
mlx5e_set_port_pfc(struct mlx5e_priv *priv)
{
int error;
if (priv->gone != 0) {
error = -ENXIO;
} else if (priv->params.rx_pauseframe_control ||
priv->params.tx_pauseframe_control) {
mlx5_en_err(priv->ifp,
"Global pauseframes must be disabled before enabling PFC.\n");
error = -EINVAL;
} else {
error = mlx5e_set_port_pause_and_pfc(priv);
}
return (error);
}
static int
mlx5e_media_change(if_t dev)
{
struct mlx5e_priv *priv = if_getsoftc(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 eth_proto_cap;
u32 link_mode;
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int was_opened;
int locked;
int error;
bool ext;
locked = PRIV_LOCKED(priv);
if (!locked)
PRIV_LOCK(priv);
if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
error = EINVAL;
goto done;
}
error = mlx5_query_port_ptys(mdev, out, sizeof(out),
MLX5_PTYS_EN, 1);
if (error != 0) {
mlx5_en_err(dev, "Query port media capability failed\n");
goto done;
}
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext);
/* query supported capabilities */
eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability);
/* check for autoselect */
if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
link_mode = eth_proto_cap;
if (link_mode == 0) {
mlx5_en_err(dev, "Port media capability is zero\n");
error = EINVAL;
goto done;
}
} else {
link_mode = link_mode & eth_proto_cap;
if (link_mode == 0) {
mlx5_en_err(dev, "Not supported link mode requested\n");
error = EINVAL;
goto done;
}
}
if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
/* check if PFC is enabled */
if (priv->params.rx_priority_flow_control ||
priv->params.tx_priority_flow_control) {
mlx5_en_err(dev, "PFC must be disabled before enabling global pauseframes.\n");
error = EINVAL;
goto done;
}
}
/* update pauseframe control bits */
priv->params.rx_pauseframe_control =
(priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
priv->params.tx_pauseframe_control =
(priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
/* check if device is opened */
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
/* reconfigure the hardware */
mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext);
error = -mlx5e_set_port_pause_and_pfc(priv);
if (was_opened)
mlx5_set_port_status(mdev, MLX5_PORT_UP);
done:
if (!locked)
PRIV_UNLOCK(priv);
return (error);
}
static void
mlx5e_update_carrier_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
update_carrier_work);
PRIV_LOCK(priv);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_carrier(priv);
PRIV_UNLOCK(priv);
}
#define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \
s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c);
#define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \
s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c);
static void
mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
void *out;
void *in;
int err;
/* allocate firmware request structures */
in = mlx5_vzalloc(sz);
out = mlx5_vzalloc(sz);
if (in == NULL || out == NULL)
goto free_out;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
if (err != 0)
goto free_out;
MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64)
MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
if (err != 0)
goto free_out;
MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP);
err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
if (err != 0)
goto free_out;
MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
free_out:
/* free firmware request structures */
kvfree(in);
kvfree(out);
}
/*
* This function reads the physical port counters from the firmware
* using a pre-defined layout defined by various MLX5E_PPORT_XXX()
* macros. The output is converted from big-endian 64-bit values into
* host endian ones and stored in the "priv->stats.pport" structure.
*/
static void
mlx5e_update_pport_counters(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_pport_stats *s = &priv->stats.pport;
struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
u32 *in;
u32 *out;
const u64 *ptr;
unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
unsigned x;
unsigned y;
unsigned z;
/* allocate firmware request structures */
in = mlx5_vzalloc(sz);
out = mlx5_vzalloc(sz);
if (in == NULL || out == NULL)
goto free_out;
/*
* Get pointer to the 64-bit counter set which is located at a
* fixed offset in the output firmware request structure:
*/
ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
MLX5_SET(ppcnt_reg, in, local_port, 1);
/* read IEEE802_3 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
s->arg[y] = be64toh(ptr[x]);
/* read RFC2819 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
s->arg[y] = be64toh(ptr[x]);
for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
/* read RFC2863 counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
/* read physical layer stats counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
/* read Extended Ethernet counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
/* read Extended Statistical Group */
if (MLX5_CAP_GEN(mdev, pcam_reg) &&
MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) &&
MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) {
/* read Extended Statistical counter group using predefined counter layout */
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++)
s_debug->arg[y] = be64toh(ptr[x]);
}
/* read PCIE counters */
mlx5e_update_pcie_counters(priv);
/* read per-priority counters */
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
/* iterate all the priorities */
for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
MLX5_SET(ppcnt_reg, in, prio_tc, z);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
/* read per priority stats counter group using predefined counter layout */
for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
s->arg[y] = be64toh(ptr[x]);
}
free_out:
/* free firmware request structures */
kvfree(in);
kvfree(out);
}
static void
mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
{
u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
return;
MLX5_SET(query_vnic_env_in, in, opcode,
MLX5_CMD_OP_QUERY_VNIC_ENV);
MLX5_SET(query_vnic_env_in, in, op_mod, 0);
MLX5_SET(query_vnic_env_in, in, other_vport, 0);
if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0)
return;
priv->stats.vport.rx_steer_missed_packets =
MLX5_GET64(query_vnic_env_out, out,
vport_env.nic_receive_steering_discard);
}
/*
* This function is called regularly to collect all statistics
* counters from the firmware. The values can be viewed through the
* sysctl interface. Execution is serialized using the priv's global
* configuration lock.
*/
static void
mlx5e_update_stats_locked(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_vport_stats *s = &priv->stats.vport;
struct mlx5e_sq_stats *sq_stats;
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u64 tso_packets = 0;
u64 tso_bytes = 0;
u64 tx_queue_dropped = 0;
u64 tx_defragged = 0;
u64 tx_offload_none = 0;
u64 lro_packets = 0;
u64 lro_bytes = 0;
u64 sw_lro_queued = 0;
u64 sw_lro_flushed = 0;
u64 rx_csum_none = 0;
u64 rx_wqe_err = 0;
u64 rx_packets = 0;
u64 rx_bytes = 0;
u64 rx_decrypted_error = 0;
u64 rx_decrypted_ok = 0;
u32 rx_out_of_buffer = 0;
int error;
int i;
int j;
out = mlx5_vzalloc(outlen);
if (out == NULL)
goto free_out;
/* Collect firts the SW counters and then HW for consistency */
for (i = 0; i < priv->params.num_channels; i++) {
struct mlx5e_channel *pch = priv->channel + i;
struct mlx5e_rq *rq = &pch->rq;
struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
/* collect stats from LRO */
rq_stats->sw_lro_queued = rq->lro.lro_queued;
rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
sw_lro_queued += rq_stats->sw_lro_queued;
sw_lro_flushed += rq_stats->sw_lro_flushed;
lro_packets += rq_stats->lro_packets;
lro_bytes += rq_stats->lro_bytes;
rx_csum_none += rq_stats->csum_none;
rx_wqe_err += rq_stats->wqe_err;
rx_packets += rq_stats->packets;
rx_bytes += rq_stats->bytes;
rx_decrypted_error += rq_stats->decrypted_error_packets;
rx_decrypted_ok += rq_stats->decrypted_ok_packets;
for (j = 0; j < priv->num_tc; j++) {
sq_stats = &pch->sq[j].stats;
tso_packets += sq_stats->tso_packets;
tso_bytes += sq_stats->tso_bytes;
tx_queue_dropped += sq_stats->dropped;
tx_queue_dropped += sq_stats->enobuf;
tx_defragged += sq_stats->defragged;
tx_offload_none += sq_stats->csum_offload_none;
}
}
#ifdef RATELIMIT
/* Collect statistics from all rate-limit queues */
for (j = 0; j < priv->rl.param.tx_worker_threads_def; j++) {
struct mlx5e_rl_worker *rlw = priv->rl.workers + j;
for (i = 0; i < priv->rl.param.tx_channels_per_worker_def; i++) {
struct mlx5e_rl_channel *channel = rlw->channels + i;
struct mlx5e_sq *sq = channel->sq;
if (sq == NULL)
continue;
sq_stats = &sq->stats;
tso_packets += sq_stats->tso_packets;
tso_bytes += sq_stats->tso_bytes;
tx_queue_dropped += sq_stats->dropped;
tx_queue_dropped += sq_stats->enobuf;
tx_defragged += sq_stats->defragged;
tx_offload_none += sq_stats->csum_offload_none;
}
}
#endif
/* update counters */
s->tso_packets = tso_packets;
s->tso_bytes = tso_bytes;
s->tx_queue_dropped = tx_queue_dropped;
s->tx_defragged = tx_defragged;
s->lro_packets = lro_packets;
s->lro_bytes = lro_bytes;
s->sw_lro_queued = sw_lro_queued;
s->sw_lro_flushed = sw_lro_flushed;
s->rx_csum_none = rx_csum_none;
s->rx_wqe_err = rx_wqe_err;
s->rx_packets = rx_packets;
s->rx_bytes = rx_bytes;
s->rx_decrypted_error_packets = rx_decrypted_error;
s->rx_decrypted_ok_packets = rx_decrypted_ok;
mlx5e_grp_vnic_env_update_stats(priv);
/* HW counters */
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
memset(out, 0, outlen);
/* get number of out-of-buffer drops first */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
&rx_out_of_buffer) == 0) {
s->rx_out_of_buffer = rx_out_of_buffer;
}
/* get port statistics */
if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) {
#define MLX5_GET_CTR(out, x) \
MLX5_GET64(query_vport_counter_out, out, x)
s->rx_error_packets =
MLX5_GET_CTR(out, received_errors.packets);
s->rx_error_bytes =
MLX5_GET_CTR(out, received_errors.octets);
s->tx_error_packets =
MLX5_GET_CTR(out, transmit_errors.packets);
s->tx_error_bytes =
MLX5_GET_CTR(out, transmit_errors.octets);
s->rx_unicast_packets =
MLX5_GET_CTR(out, received_eth_unicast.packets);
s->rx_unicast_bytes =
MLX5_GET_CTR(out, received_eth_unicast.octets);
s->tx_unicast_packets =
MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
s->tx_unicast_bytes =
MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
s->rx_multicast_packets =
MLX5_GET_CTR(out, received_eth_multicast.packets);
s->rx_multicast_bytes =
MLX5_GET_CTR(out, received_eth_multicast.octets);
s->tx_multicast_packets =
MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
s->tx_multicast_bytes =
MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
s->rx_broadcast_packets =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
s->rx_broadcast_bytes =
MLX5_GET_CTR(out, received_eth_broadcast.octets);
s->tx_broadcast_packets =
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
s->tx_broadcast_bytes =
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
s->tx_packets = s->tx_unicast_packets +
s->tx_multicast_packets + s->tx_broadcast_packets;
s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes +
s->tx_broadcast_bytes;
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none;
s->rx_csum_good = s->rx_packets - s->rx_csum_none;
}
/* Get physical port counters */
mlx5e_update_pport_counters(priv);
s->tx_jumbo_packets =
priv->stats.port_stats_debug.tx_stat_p1519to2047octets +
priv->stats.port_stats_debug.tx_stat_p2048to4095octets +
priv->stats.port_stats_debug.tx_stat_p4096to8191octets +
priv->stats.port_stats_debug.tx_stat_p8192to10239octets;
free_out:
kvfree(out);
/* Update diagnostics, if any */
if (priv->params_ethtool.diag_pci_enable ||
priv->params_ethtool.diag_general_enable) {
error = mlx5_core_get_diagnostics_full(mdev,
priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
if (error != 0)
mlx5_en_err(priv->ifp,
"Failed reading diagnostics: %d\n", error);
}
/* Update FEC, if any */
error = mlx5e_fec_update(priv);
if (error != 0 && error != EOPNOTSUPP) {
mlx5_en_err(priv->ifp,
"Updating FEC failed: %d\n", error);
}
/* Update temperature, if any */
if (priv->params_ethtool.hw_num_temp != 0) {
error = mlx5e_hw_temperature_update(priv);
if (error != 0 && error != EOPNOTSUPP) {
mlx5_en_err(priv->ifp,
"Updating temperature failed: %d\n", error);
}
}
}
static void
mlx5e_update_stats_work(struct work_struct *work)
{
struct mlx5e_priv *priv;
priv = container_of(work, struct mlx5e_priv, update_stats_work);
PRIV_LOCK(priv);
if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
!test_bit(MLX5_INTERFACE_STATE_TEARDOWN, &priv->mdev->intf_state))
mlx5e_update_stats_locked(priv);
PRIV_UNLOCK(priv);
}
static void
mlx5e_update_stats(void *arg)
{
struct mlx5e_priv *priv = arg;
queue_work(priv->wq, &priv->update_stats_work);
callout_reset(&priv->watchdog, hz / 4, &mlx5e_update_stats, priv);
}
static void
mlx5e_async_event_sub(struct mlx5e_priv *priv,
enum mlx5_dev_event event)
{
switch (event) {
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
default:
break;
}
}
static void
mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
mtx_lock(&priv->async_events_mtx);
if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
mlx5e_async_event_sub(priv, event);
mtx_unlock(&priv->async_events_mtx);
}
static void
mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
}
static void
mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
mtx_lock(&priv->async_events_mtx);
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
mtx_unlock(&priv->async_events_mtx);
}
static void mlx5e_calibration_callout(void *arg);
static int mlx5e_calibration_duration = 20;
static int mlx5e_fast_calibration = 1;
static int mlx5e_normal_calibration = 30;
static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"MLX5 timestamp calibration parameters");
SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN,
&mlx5e_calibration_duration, 0,
"Duration of initial calibration");
SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN,
&mlx5e_fast_calibration, 0,
"Recalibration interval during initial calibration");
SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN,
&mlx5e_normal_calibration, 0,
"Recalibration interval during normal operations");
/*
* Ignites the calibration process.
*/
static void
mlx5e_reset_calibration_callout(struct mlx5e_priv *priv)
{
if (priv->clbr_done == 0)
mlx5e_calibration_callout(priv);
else
callout_reset_sbt_curcpu(&priv->tstmp_clbr, (priv->clbr_done <
mlx5e_calibration_duration ? mlx5e_fast_calibration :
mlx5e_normal_calibration) * SBT_1S, 0,
mlx5e_calibration_callout, priv, C_DIRECT_EXEC);
}
static uint64_t
mlx5e_timespec2usec(const struct timespec *ts)
{
return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec);
}
static uint64_t
mlx5e_hw_clock(struct mlx5e_priv *priv)
{
struct mlx5_init_seg *iseg;
uint32_t hw_h, hw_h1, hw_l;
iseg = priv->mdev->iseg;
do {
hw_h = ioread32be(&iseg->internal_timer_h);
hw_l = ioread32be(&iseg->internal_timer_l);
hw_h1 = ioread32be(&iseg->internal_timer_h);
} while (hw_h1 != hw_h);
return (((uint64_t)hw_h << 32) | hw_l);
}
/*
* The calibration callout, it runs either in the context of the
* thread which enables calibration, or in callout. It takes the
* snapshot of system and adapter clocks, then advances the pointers to
* the calibration point to allow rx path to read the consistent data
* lockless.
*/
static void
mlx5e_calibration_callout(void *arg)
{
struct mlx5e_priv *priv;
struct mlx5e_clbr_point *next, *curr;
struct timespec ts;
int clbr_curr_next;
priv = arg;
curr = &priv->clbr_points[priv->clbr_curr];
clbr_curr_next = priv->clbr_curr + 1;
if (clbr_curr_next >= nitems(priv->clbr_points))
clbr_curr_next = 0;
next = &priv->clbr_points[clbr_curr_next];
next->base_prev = curr->base_curr;
next->clbr_hw_prev = curr->clbr_hw_curr;
next->clbr_hw_curr = mlx5e_hw_clock(priv);
if (((next->clbr_hw_curr - curr->clbr_hw_curr) >> MLX5E_TSTMP_PREC) ==
0) {
if (priv->clbr_done != 0) {
mlx5_en_err(priv->ifp,
"HW failed tstmp frozen %#jx %#jx, disabling\n",
next->clbr_hw_curr, curr->clbr_hw_prev);
priv->clbr_done = 0;
}
atomic_store_rel_int(&curr->clbr_gen, 0);
return;
}
nanouptime(&ts);
next->base_curr = mlx5e_timespec2usec(&ts);
curr->clbr_gen = 0;
atomic_thread_fence_rel();
priv->clbr_curr = clbr_curr_next;
atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen));
if (priv->clbr_done < mlx5e_calibration_duration)
priv->clbr_done++;
mlx5e_reset_calibration_callout(priv);
}
static const char *mlx5e_rq_stats_desc[] = {
MLX5E_RQ_STATS(MLX5E_STATS_DESC)
};
static int
mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
char buffer[16];
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
int wq_sz;
int err;
int i;
u32 nsegs, wqe_sz;
err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
if (err != 0)
goto done;
/* Create DMA descriptor TAG */
if ((err = -bus_dma_tag_create(
bus_get_dma_tag(mdev->pdev->dev.bsddev),
1, /* any alignment */
0, /* no boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */
nsegs, /* nsegments */
nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&rq->dma_tag)))
goto done;
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
goto err_free_dma_tag;
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
if (err != 0)
goto err_rq_wq_destroy;
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
err = -tcp_lro_init_args(&rq->lro, priv->ifp, TCP_LRO_ENTRIES, wq_sz);
if (err)
goto err_rq_wq_destroy;
rq->mbuf = malloc_domainset(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN,
mlx5_dev_domainset(mdev), M_WAITOK | M_ZERO);
for (i = 0; i != wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
int j;
err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
if (err != 0) {
while (i--)
bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
goto err_rq_mbuf_free;
}
/* set value for constant fields */
for (j = 0; j < rq->nsegs; j++)
wqe->data[j].lkey = cpu_to_be32(priv->mr.key);
}
INIT_WORK(&rq->dim.work, mlx5e_dim_work);
if (priv->params.rx_cq_moderation_mode < 2) {
rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
} else {
void *cqc = container_of(param,
struct mlx5e_channel_param, rq)->rx_cq.cqc;
switch (MLX5_GET(cqc, cqc, cq_period_mode)) {
case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
break;
case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
break;
default:
rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
break;
}
}
rq->ifp = priv->ifp;
rq->channel = c;
rq->ix = c->ix;
snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
rq->stats.arg);
return (0);
err_rq_mbuf_free:
free(rq->mbuf, M_MLX5EN);
tcp_lro_free(&rq->lro);
err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
err_free_dma_tag:
bus_dma_tag_destroy(rq->dma_tag);
done:
return (err);
}
static void
mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
int wq_sz;
int i;
/* destroy all sysctl nodes */
sysctl_ctx_free(&rq->stats.ctx);
/* free leftover LRO packets, if any */
tcp_lro_free(&rq->lro);
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
for (i = 0; i != wq_sz; i++) {
if (rq->mbuf[i].mbuf != NULL) {
bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
m_freem(rq->mbuf[i].mbuf);
}
bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
}
free(rq->mbuf, M_MLX5EN);
mlx5_wq_destroy(&rq->wq_ctrl);
bus_dma_tag_destroy(rq->dma_tag);
}
static int
mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *rqc;
void *wq;
int inlen;
int err;
u8 ts_format;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
sizeof(u64) * rq->wq_ctrl.buf.npages;
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
ts_format = mlx5_get_rq_default_ts(mdev);
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
memcpy(rqc, param->rqc, sizeof(param->rqc));
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
if (priv->counter_set_id >= 0)
MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
mlx5_fill_page_array(&rq->wq_ctrl.buf,
(__be64 *) MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
kvfree(in);
return (err);
}
static int
mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *rqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
err = mlx5_core_modify_rq(mdev, in, inlen);
kvfree(in);
return (err);
}
static void
mlx5e_disable_rq(struct mlx5e_rq *rq)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_rq(mdev, rq->rqn);
}
static int
mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
int err;
err = mlx5e_create_rq(c, param, rq);
if (err)
return (err);
/* set CQN in RQ parameters */
MLX5_SET(rqc, param->rqc, cqn, c->rq.cq.mcq.cqn);
err = mlx5e_enable_rq(rq, param);
if (err)
goto err_destroy_rq;
err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
c->rq.enabled = 1;
return (0);
err_disable_rq:
mlx5e_disable_rq(rq);
err_destroy_rq:
mlx5e_destroy_rq(rq);
return (err);
}
static void
mlx5e_close_rq(struct mlx5e_rq *rq)
{
mtx_lock(&rq->mtx);
rq->enabled = 0;
callout_stop(&rq->watchdog);
mtx_unlock(&rq->mtx);
mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
}
static void
mlx5e_close_rq_wait(struct mlx5e_rq *rq)
{
mlx5e_disable_rq(rq);
mlx5e_close_cq(&rq->cq);
cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
}
/*
* What is a drop RQ and why is it needed?
*
* The RSS indirection table, also called the RQT, selects the
* destination RQ based on the receive queue number, RQN. The RQT is
* frequently referred to by flow steering rules to distribute traffic
* among multiple RQs. The problem is that the RQs cannot be destroyed
* before the RQT referring them is destroyed too. Further, TLS RX
* rules may still be referring to the RQT even if the link went
* down. Because there is no magic RQN for dropping packets, we create
* a dummy RQ, also called drop RQ, which sole purpose is to drop all
* received packets. When the link goes down this RQN is filled in all
* RQT entries, of the main RQT, so the real RQs which are about to be
* destroyed can be released and the TLS RX rules can be sustained.
*/
static void
mlx5e_open_drop_rq_comp(struct mlx5_core_cq *mcq __unused, struct mlx5_eqe *eqe __unused)
{
}
static int
mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq)
{
struct mlx5e_cq_param param_cq = {};
struct mlx5e_rq_param param_rq = {};
void *rqc_wq = MLX5_ADDR_OF(rqc, param_rq.rqc, wq);
int err;
/* set channel pointer */
drop_rq->channel = priv->channel;
/* set basic CQ parameters needed */
MLX5_SET(cqc, param_cq.cqc, log_cq_size, 0);
MLX5_SET(cqc, param_cq.cqc, uar_page, priv->mdev->priv.uar->index);
/* open receive completion queue */
err = mlx5e_open_cq(priv, &param_cq, &drop_rq->cq,
&mlx5e_open_drop_rq_comp, 0);
if (err)
goto err_done;
/* set basic WQ parameters needed */
MLX5_SET(wq, rqc_wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, rqc_wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, rqc_wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + sizeof(struct mlx5_wqe_data_seg)));
MLX5_SET(wq, rqc_wq, log_wq_sz, 0);
MLX5_SET(wq, rqc_wq, pd, priv->pdn);
param_rq.wq.linear = 1;
err = mlx5_wq_ll_create(priv->mdev, &param_rq.wq, rqc_wq, &drop_rq->wq,
&drop_rq->wq_ctrl);
if (err)
goto err_close_cq;
/* set CQN in RQ parameters */
MLX5_SET(rqc, param_rq.rqc, cqn, drop_rq->cq.mcq.cqn);
err = mlx5e_enable_rq(drop_rq, &param_rq);
if (err)
goto err_wq_destroy;
err = mlx5e_modify_rq(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
return (err);
err_disable_rq:
mlx5e_disable_rq(drop_rq);
err_wq_destroy:
mlx5_wq_destroy(&drop_rq->wq_ctrl);
err_close_cq:
mlx5e_close_cq(&drop_rq->cq);
err_done:
return (err);
}
static void
mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{
mlx5e_modify_rq(drop_rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
mlx5e_disable_rq(drop_rq);
mlx5_wq_destroy(&drop_rq->wq_ctrl);
mlx5e_close_cq(&drop_rq->cq);
}
void
mlx5e_free_sq_db(struct mlx5e_sq *sq)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int x;
for (x = 0; x != wq_sz; x++) {
if (sq->mbuf[x].mbuf != NULL) {
bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map);
m_freem(sq->mbuf[x].mbuf);
}
if (sq->mbuf[x].mst != NULL) {
m_snd_tag_rele(sq->mbuf[x].mst);
sq->mbuf[x].mst = NULL;
}
bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
}
free(sq->mbuf, M_MLX5EN);
}
int
mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int err;
int x;
sq->mbuf = malloc_domainset(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN,
mlx5_dev_domainset(sq->priv->mdev), M_WAITOK | M_ZERO);
/* Create DMA descriptor MAPs */
for (x = 0; x != wq_sz; x++) {
err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
if (err != 0) {
while (x--)
bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
free(sq->mbuf, M_MLX5EN);
return (err);
}
}
return (0);
}
static const char *mlx5e_sq_stats_desc[] = {
MLX5E_SQ_STATS(MLX5E_STATS_DESC)
};
void
mlx5e_update_sq_inline(struct mlx5e_sq *sq)
{
sq->max_inline = sq->priv->params.tx_max_inline;
sq->min_inline_mode = sq->priv->params.tx_min_inline_mode;
/*
* Check if trust state is DSCP or if inline mode is NONE which
* indicates CX-5 or newer hardware.
*/
if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP ||
sq->min_inline_mode == MLX5_INLINE_MODE_NONE) {
if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert))
sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN;
else
sq->min_insert_caps = MLX5E_INSERT_NON_VLAN;
} else {
sq->min_insert_caps = 0;
}
}
static void
mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
{
int i;
for (i = 0; i != priv->num_tc; i++) {
mtx_lock(&c->sq[i].lock);
mlx5e_update_sq_inline(&c->sq[i]);
mtx_unlock(&c->sq[i].lock);
}
}
void
mlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
{
int i;
/* check if channels are closed */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return;
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
}
static int
mlx5e_create_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
struct mlx5e_sq *sq)
{
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
char buffer[16];
void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
/* Create DMA descriptor TAG */
if ((err = -bus_dma_tag_create(
bus_get_dma_tag(mdev->pdev->dev.bsddev),
1, /* any alignment */
0, /* no boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */
MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */
MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sq->dma_tag)))
goto done;
sq->mkey_be = cpu_to_be32(priv->mr.key);
sq->ifp = priv->ifp;
sq->priv = priv;
sq->tc = tc;
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl);
if (err)
goto err_free_dma_tag;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
err = mlx5e_alloc_sq_db(sq);
if (err)
goto err_sq_wq_destroy;
mlx5e_update_sq_inline(sq);
snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
sq->stats.arg);
return (0);
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
err_free_dma_tag:
bus_dma_tag_destroy(sq->dma_tag);
done:
return (err);
}
static void
mlx5e_destroy_sq(struct mlx5e_sq *sq)
{
/* destroy all sysctl nodes */
sysctl_ctx_free(&sq->stats.ctx);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
bus_dma_tag_destroy(sq->dma_tag);
}
int
mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
const struct mlx5_sq_bfreg *bfreg, int tis_num)
{
void *in;
void *sqc;
void *wq;
int inlen;
int err;
u8 ts_format;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
sizeof(u64) * sq->wq_ctrl.buf.npages;
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
sq->uar_map = bfreg->map;
ts_format = mlx5_get_sq_default_ts(sq->priv->mdev);
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
memcpy(sqc, param->sqc, sizeof(param->sqc));
MLX5_SET(sqc, sqc, tis_num_0, tis_num);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(sqc, sqc, allow_swp, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, bfreg->index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
mlx5_fill_page_array(&sq->wq_ctrl.buf,
(__be64 *) MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
kvfree(in);
return (err);
}
int
mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
{
void *in;
void *sqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
kvfree(in);
return (err);
}
void
mlx5e_disable_sq(struct mlx5e_sq *sq)
{
mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
}
static int
mlx5e_open_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
struct mlx5e_sq *sq)
{
int err;
sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
/* ensure the TX completion event factor is not zero */
if (sq->cev_factor == 0)
sq->cev_factor = 1;
err = mlx5e_create_sq(c, tc, param, sq);
if (err)
return (err);
err = mlx5e_enable_sq(sq, param, &c->bfreg, c->priv->tisn[tc]);
if (err)
goto err_destroy_sq;
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
if (err)
goto err_disable_sq;
WRITE_ONCE(sq->running, 1);
return (0);
err_disable_sq:
mlx5e_disable_sq(sq);
err_destroy_sq:
mlx5e_destroy_sq(sq);
return (err);
}
static void
mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
{
/* fill up remainder with NOPs */
while (sq->cev_counter != 0) {
while (!mlx5e_sq_has_room_for(sq, 1)) {
if (can_sleep != 0) {
mtx_unlock(&sq->lock);
msleep(4);
mtx_lock(&sq->lock);
} else {
goto done;
}
}
/* send a single NOP */
mlx5e_send_nop(sq, 1);
atomic_thread_fence_rel();
}
done:
mlx5e_tx_notify_hw(sq, false);
}
void
mlx5e_sq_cev_timeout(void *arg)
{
struct mlx5e_sq *sq = arg;
mtx_assert(&sq->lock, MA_OWNED);
/* check next state */
switch (sq->cev_next_state) {
case MLX5E_CEV_STATE_SEND_NOPS:
/* fill TX ring with NOPs, if any */
mlx5e_sq_send_nops_locked(sq, 0);
/* check if completed */
if (sq->cev_counter == 0) {
sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
return;
}
break;
default:
/* send NOPs on next timeout */
sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
break;
}
/* restart timer */
callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
}
void
mlx5e_drain_sq(struct mlx5e_sq *sq)
{
int error;
struct mlx5_core_dev *mdev= sq->priv->mdev;
/*
* Check if already stopped.
*
* NOTE: Serialization of this function is managed by the
* caller ensuring the priv's state lock is locked or in case
* of rate limit support, a single thread manages drain and
* resume of SQs. The "running" variable can therefore safely
* be read without any locks.
*/
if (READ_ONCE(sq->running) == 0)
return;
/* don't put more packets into the SQ */
WRITE_ONCE(sq->running, 0);
/* serialize access to DMA rings */
mtx_lock(&sq->lock);
/* teardown event factor timer, if any */
sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
callout_stop(&sq->cev_callout);
/* send dummy NOPs in order to flush the transmit ring */
mlx5e_sq_send_nops_locked(sq, 1);
mtx_unlock(&sq->lock);
/* wait till SQ is empty or link is down */
mtx_lock(&sq->lock);
while (sq->cc != sq->pc &&
(sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR &&
pci_channel_offline(mdev->pdev) == 0) {
mtx_unlock(&sq->lock);
msleep(1);
sq->cq.mcq.comp(&sq->cq.mcq, NULL);
mtx_lock(&sq->lock);
}
mtx_unlock(&sq->lock);
/* error out remaining requests */
error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
if (error != 0) {
mlx5_en_err(sq->ifp,
"mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
}
/* wait till SQ is empty */
mtx_lock(&sq->lock);
while (sq->cc != sq->pc &&
mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR &&
pci_channel_offline(mdev->pdev) == 0) {
mtx_unlock(&sq->lock);
msleep(1);
sq->cq.mcq.comp(&sq->cq.mcq, NULL);
mtx_lock(&sq->lock);
}
mtx_unlock(&sq->lock);
}
static void
mlx5e_close_sq_wait(struct mlx5e_sq *sq)
{
mlx5e_drain_sq(sq);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
static int
mlx5e_create_cq(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq,
mlx5e_cq_comp_t *comp,
int eq_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
int irqn;
int err;
u32 i;
err = mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
if (err)
return (err);
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
return (err);
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
*mcq->arm_db = 0;
mcq->vector = eq_ix;
mcq->comp = comp;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe->op_own = 0xf1;
}
cq->priv = priv;
return (0);
}
static void
mlx5e_destroy_cq(struct mlx5e_cq *cq)
{
mlx5_wq_destroy(&cq->wq_ctrl);
}
static int
mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
{
struct mlx5_core_cq *mcq = &cq->mcq;
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
void *in;
void *cqc;
int inlen;
int irqn_not_used;
int eqn;
int err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
memcpy(cqc, param->cqc, sizeof(param->cqc));
mlx5_fill_page_array(&cq->wq_ctrl.buf,
(__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
return (err);
mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
return (0);
}
static void
mlx5e_disable_cq(struct mlx5e_cq *cq)
{
mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
}
int
mlx5e_open_cq(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq,
mlx5e_cq_comp_t *comp,
int eq_ix)
{
int err;
err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
if (err)
return (err);
err = mlx5e_enable_cq(cq, param, eq_ix);
if (err)
goto err_destroy_cq;
return (0);
err_destroy_cq:
mlx5e_destroy_cq(cq);
return (err);
}
void
mlx5e_close_cq(struct mlx5e_cq *cq)
{
mlx5e_disable_cq(cq);
mlx5e_destroy_cq(cq);
}
static int
mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
for (tc = 0; tc < c->priv->num_tc; tc++) {
/* open completion queue */
err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
&mlx5e_tx_cq_comp, c->ix);
if (err)
goto err_close_tx_cqs;
}
return (0);
err_close_tx_cqs:
for (tc--; tc >= 0; tc--)
mlx5e_close_cq(&c->sq[tc].cq);
return (err);
}
static void
mlx5e_close_tx_cqs(struct mlx5e_channel *c)
{
int tc;
for (tc = 0; tc < c->priv->num_tc; tc++)
mlx5e_close_cq(&c->sq[tc].cq);
}
static int
mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
for (tc = 0; tc < c->priv->num_tc; tc++) {
err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
if (err)
goto err_close_sqs;
}
return (0);
err_close_sqs:
for (tc--; tc >= 0; tc--)
mlx5e_close_sq_wait(&c->sq[tc]);
return (err);
}
static void
mlx5e_close_sqs_wait(struct mlx5e_channel *c)
{
int tc;
for (tc = 0; tc < c->priv->num_tc; tc++)
mlx5e_close_sq_wait(&c->sq[tc]);
}
static void
mlx5e_chan_static_init(struct mlx5e_priv *priv, struct mlx5e_channel *c, int ix)
{
int tc;
/* setup priv and channel number */
c->priv = priv;
c->ix = ix;
/* setup send tag */
m_snd_tag_init(&c->tag, c->priv->ifp, &mlx5e_ul_snd_tag_sw);
init_completion(&c->completion);
mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
struct mlx5e_sq *sq = c->sq + tc;
mtx_init(&sq->lock, "mlx5tx",
MTX_NETWORK_LOCK " TX", MTX_DEF);
mtx_init(&sq->comp_lock, "mlx5comp",
MTX_NETWORK_LOCK " TX", MTX_DEF);
callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
}
mlx5e_iq_static_init(&c->iq);
}
static void
mlx5e_chan_wait_for_completion(struct mlx5e_channel *c)
{
m_snd_tag_rele(&c->tag);
wait_for_completion(&c->completion);
}
static void
mlx5e_priv_wait_for_completion(struct mlx5e_priv *priv, const uint32_t channels)
{
uint32_t x;
for (x = 0; x != channels; x++)
mlx5e_chan_wait_for_completion(&priv->channel[x]);
}
static void
mlx5e_chan_static_destroy(struct mlx5e_channel *c)
{
int tc;
callout_drain(&c->rq.watchdog);
mtx_destroy(&c->rq.mtx);
for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
callout_drain(&c->sq[tc].cev_callout);
mtx_destroy(&c->sq[tc].lock);
mtx_destroy(&c->sq[tc].comp_lock);
}
mlx5e_iq_static_destroy(&c->iq);
}
static int
mlx5e_open_channel(struct mlx5e_priv *priv,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel *c)
{
struct epoch_tracker et;
int i, err;
/* zero non-persistent data */
MLX5E_ZERO(&c->rq, mlx5e_rq_zero_start);
for (i = 0; i != priv->num_tc; i++)
MLX5E_ZERO(&c->sq[i], mlx5e_sq_zero_start);
MLX5E_ZERO(&c->iq, mlx5e_iq_zero_start);
/* open transmit completion queue */
err = mlx5e_open_tx_cqs(c, cparam);
if (err)
goto err_free;
/* open receive completion queue */
err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
&mlx5e_rx_cq_comp, c->ix);
if (err)
goto err_close_tx_cqs;
err = mlx5e_open_sqs(c, cparam);
if (err)
goto err_close_rx_cq;
err = mlx5e_iq_open(c, &cparam->sq, &cparam->tx_cq, &c->iq);
if (err)
goto err_close_sqs;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_iq;
/* poll receive queue initially */
NET_EPOCH_ENTER(et);
c->rq.cq.mcq.comp(&c->rq.cq.mcq, NULL);
NET_EPOCH_EXIT(et);
return (0);
err_close_iq:
mlx5e_iq_close(&c->iq);
err_close_sqs:
mlx5e_close_sqs_wait(c);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
err_free:
return (err);
}
static void
mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
}
static void
mlx5e_close_channel_wait(struct mlx5e_channel *c)
{
mlx5e_close_rq_wait(&c->rq);
mlx5e_iq_close(&c->iq);
mlx5e_close_sqs_wait(c);
mlx5e_close_tx_cqs(c);
}
static int
mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
{
u32 r, n;
r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
MLX5E_SW2MB_MTU(if_getmtu(priv->ifp));
if (r > MJUM16BYTES)
return (-ENOMEM);
if (r > MJUM9BYTES)
r = MJUM16BYTES;
else if (r > MJUMPAGESIZE)
r = MJUM9BYTES;
else if (r > MCLBYTES)
r = MJUMPAGESIZE;
else
r = MCLBYTES;
/*
* n + 1 must be a power of two, because stride size must be.
* Stride size is 16 * (n + 1), as the first segment is
* control.
*/
n = roundup_pow_of_two(1 + howmany(r, MLX5E_MAX_RX_BYTES)) - 1;
if (n > MLX5E_MAX_BUSDMA_RX_SEGS)
return (-ENOMEM);
*wqe_sz = r;
*nsegs = n;
return (0);
}
static void
mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 wqe_sz, nsegs;
mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
nsegs * sizeof(struct mlx5_wqe_data_seg)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.linear = 1;
}
static void
mlx5e_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.linear = 1;
}
static void
mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
}
static void
mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr)
{
*ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE);
/* apply LRO restrictions */
if (priv->params.hw_lro_en &&
ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) {
ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO;
}
}
static void
mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
struct net_dim_cq_moder curr;
void *cqc = param->cqc;
/*
* We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE
* format is more beneficial for FreeBSD use case.
*
* Adding support for MLX5_CQE_FORMAT_CSUM will require changes
* in mlx5e_decompress_cqe.
*/
if (priv->params.cqe_zipping_en) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH);
MLX5_SET(cqc, cqc, cqe_compression_en, 1);
}
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
switch (priv->params.rx_cq_moderation_mode) {
case 0:
MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
break;
case 1:
MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
else
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
break;
case 2:
mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr);
MLX5_SET(cqc, cqc, cq_period, curr.usec);
MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
break;
case 3:
mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr);
MLX5_SET(cqc, cqc, cq_period, curr.usec);
MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
else
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
break;
default:
break;
}
mlx5e_dim_build_cq_param(priv, param);
mlx5e_build_common_cq_param(priv, param);
}
static void
mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
switch (priv->params.tx_cq_moderation_mode) {
case 0:
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
break;
default:
if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
else
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
break;
}
mlx5e_build_common_cq_param(priv, param);
}
static void
mlx5e_build_channel_param(struct mlx5e_priv *priv,
struct mlx5e_channel_param *cparam)
{
memset(cparam, 0, sizeof(*cparam));
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
}
static int
mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param *cparam;
int err;
int i;
cparam = malloc(sizeof(*cparam), M_MLX5EN, M_WAITOK);
mlx5e_build_channel_param(priv, cparam);
for (i = 0; i < priv->params.num_channels; i++) {
err = mlx5e_open_channel(priv, cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
/* Bind interrupt vectors, if any. */
if (priv->params_ethtool.irq_cpu_base > -1) {
cpuset_t cpuset;
int cpu;
int irq;
int eqn;
int nirq;
err = mlx5_vector2eqn(priv->mdev, i,
&eqn, &nirq);
/* error here is non-fatal */
if (err != 0)
continue;
irq = priv->mdev->priv.msix_arr[nirq].vector;
cpu = (unsigned)(priv->params_ethtool.irq_cpu_base +
i * priv->params_ethtool.irq_cpu_stride) % (unsigned)mp_ncpus;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
intr_setaffinity(irq, CPU_WHICH_INTRHANDLER, &cpuset);
}
}
free(cparam, M_MLX5EN);
return (0);
err_close_channels:
while (i--) {
mlx5e_close_channel(&priv->channel[i]);
mlx5e_close_channel_wait(&priv->channel[i]);
}
free(cparam, M_MLX5EN);
return (err);
}
static void
mlx5e_close_channels(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(&priv->channel[i]);
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel_wait(&priv->channel[i]);
}
static int
mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
{
if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
uint8_t cq_mode;
switch (priv->params.tx_cq_moderation_mode) {
case 0:
case 2:
cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
break;
default:
cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
break;
}
return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
priv->params.tx_cq_moderation_usec,
priv->params.tx_cq_moderation_pkts,
cq_mode));
}
return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
priv->params.tx_cq_moderation_usec,
priv->params.tx_cq_moderation_pkts));
}
static int
mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
{
if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
uint8_t cq_mode;
uint8_t dim_mode;
int retval;
switch (priv->params.rx_cq_moderation_mode) {
case 0:
case 2:
cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
break;
default:
cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
break;
}
/* tear down dynamic interrupt moderation */
mtx_lock(&rq->mtx);
rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
mtx_unlock(&rq->mtx);
/* wait for dynamic interrupt moderation work task, if any */
cancel_work_sync(&rq->dim.work);
if (priv->params.rx_cq_moderation_mode >= 2) {
struct net_dim_cq_moder curr;
mlx5e_get_default_profile(priv, dim_mode, &curr);
retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
curr.usec, curr.pkts, cq_mode);
/* set dynamic interrupt moderation mode and zero defaults */
mtx_lock(&rq->mtx);
rq->dim.mode = dim_mode;
rq->dim.state = 0;
rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE;
mtx_unlock(&rq->mtx);
} else {
retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
priv->params.rx_cq_moderation_usec,
priv->params.rx_cq_moderation_pkts,
cq_mode);
}
return (retval);
}
return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
priv->params.rx_cq_moderation_usec,
priv->params.rx_cq_moderation_pkts));
}
static int
mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
{
int err;
int i;
err = mlx5e_refresh_rq_params(priv, &c->rq);
if (err)
goto done;
for (i = 0; i != priv->num_tc; i++) {
err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
if (err)
goto done;
}
done:
return (err);
}
int
mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
{
int i;
/* check if channels are closed */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return (EINVAL);
for (i = 0; i < priv->params.num_channels; i++) {
int err;
err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
if (err)
return (err);
}
return (0);
}
static int
mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(create_tis_in)];
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc);
MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
}
static void
mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
{
mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc], 0);
}
static int
mlx5e_open_tises(struct mlx5e_priv *priv)
{
int num_tc = priv->num_tc;
int err;
int tc;
for (tc = 0; tc < num_tc; tc++) {
err = mlx5e_open_tis(priv, tc);
if (err)
goto err_close_tises;
}
return (0);
err_close_tises:
for (tc--; tc >= 0; tc--)
mlx5e_close_tis(priv, tc);
return (err);
}
static void
mlx5e_close_tises(struct mlx5e_priv *priv)
{
int num_tc = priv->num_tc;
int tc;
for (tc = 0; tc < num_tc; tc++)
mlx5e_close_tis(priv, tc);
}
static int
mlx5e_open_default_rqt(struct mlx5e_priv *priv, u32 *prqtn, int sz)
{
u32 *in;
void *rqtc;
int inlen;
int err;
int i;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
for (i = 0; i != sz; i++)
MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
err = mlx5_core_create_rqt(priv->mdev, in, inlen, prqtn);
kvfree(in);
return (err);
}
static int
mlx5e_open_rqts(struct mlx5e_priv *priv)
{
int err;
int i;
err = mlx5e_open_default_rqt(priv, &priv->rqtn,
1 << priv->params.rx_hash_log_tbl_sz);
if (err)
goto err_default;
for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++) {
err = mlx5e_open_default_rqt(priv, &priv->channel[i].rqtn, 1);
if (err)
goto err_channel;
}
return (0);
err_channel:
while (i--)
mlx5_core_destroy_rqt(priv->mdev, priv->channel[i].rqtn, 0);
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn, 0);
err_default:
return (err);
}
static void
mlx5e_close_rqts(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++)
mlx5_core_destroy_rqt(priv->mdev, priv->channel[i].rqtn, 0);
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn, 0);
}
static int
mlx5e_activate_rqt(struct mlx5e_priv *priv)
{
u32 *in;
void *rqtc;
int inlen;
int err;
int sz;
int i;
sz = 1 << priv->params.rx_hash_log_tbl_sz;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
for (i = 0; i != sz; i++) {
int ix;
#ifdef RSS
ix = rss_get_indirection_to_bucket(i);
#else
ix = i;
#endif
/* ensure we don't overflow */
ix %= priv->params.num_channels;
/* apply receive side scaling stride, if any */
ix -= ix % (int)priv->params.channels_rsss;
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
}
err = mlx5_core_modify_rqt(priv->mdev, priv->rqtn, in, inlen);
if (err)
goto err_modify;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32);
MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++) {
int ix;
#ifdef RSS
ix = rss_get_indirection_to_bucket(i);
#else
ix = i;
#endif
/* ensure we don't overflow */
ix %= priv->params.num_channels;
/* apply receive side scaling stride, if any */
ix -= ix % (int)priv->params.channels_rsss;
MLX5_SET(rqtc, rqtc, rq_num[0], priv->channel[ix].rq.rqn);
err = mlx5_core_modify_rqt(priv->mdev, priv->channel[i].rqtn, in, inlen);
if (err)
goto err_modify;
}
err_modify:
kvfree(in);
return (err);
}
static int
mlx5e_deactivate_rqt(struct mlx5e_priv *priv)
{
u32 *in;
void *rqtc;
int inlen;
int err;
int sz;
int i;
sz = 1 << priv->params.rx_hash_log_tbl_sz;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
for (i = 0; i != sz; i++)
MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
err = mlx5_core_modify_rqt(priv->mdev, priv->rqtn, in, inlen);
if (err)
goto err_modify;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32);
MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
for (i = 0; i != priv->mdev->priv.eq_table.num_comp_vectors; i++) {
MLX5_SET(rqtc, rqtc, rq_num[0], priv->drop_rq.rqn);
err = mlx5_core_modify_rqt(priv->mdev, priv->channel[i].rqtn, in, inlen);
if (err)
goto err_modify;
}
err_modify:
kvfree(in);
return (err);
}
#define MLX5E_RSS_KEY_SIZE (10 * 4) /* bytes */
static void
mlx5e_get_rss_key(void *key_ptr)
{
#ifdef RSS
rss_getkey(key_ptr);
#else
static const u32 rsskey[] = {
cpu_to_be32(0xD181C62C),
cpu_to_be32(0xF7F4DB5B),
cpu_to_be32(0x1983A2FC),
cpu_to_be32(0x943E1ADB),
cpu_to_be32(0xD9389E6B),
cpu_to_be32(0xD1039C2C),
cpu_to_be32(0xA74499AD),
cpu_to_be32(0x593D56D9),
cpu_to_be32(0xF3253C06),
cpu_to_be32(0x2ADC1FFC),
};
CTASSERT(sizeof(rsskey) == MLX5E_RSS_KEY_SIZE);
memcpy(key_ptr, rsskey, MLX5E_RSS_KEY_SIZE);
#endif
}
static void
mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt, bool inner_vxlan)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
void *hfsi = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
void *hfs = inner_vxlan ? hfsi : hfso;
__be32 *hkey;
MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
#define ROUGH_MAX_L2_L3_HDR_SZ 256
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
if (priv->params.hw_lro_en) {
MLX5_SET(tirc, tirc, lro_enable_mask,
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_msg_sz,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
/* TODO: add the option to choose timer value dynamically */
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
MLX5_CAP_ETH(priv->mdev,
lro_timer_supported_periods[2]));
}
if (inner_vxlan)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
/*
* All packets must go through the indirection table, RQT,
* because it is not possible to modify the RQN of the TIR
* for direct dispatchment after it is created, typically
* when the link goes up and down.
*/
MLX5_SET(tirc, tirc, disp_type,
MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn);
MLX5_SET(tirc, tirc, rx_hash_fn,
MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
CTASSERT(MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key) >=
MLX5E_RSS_KEY_SIZE);
#ifdef RSS
/*
* The FreeBSD RSS implementation does currently not
* support symmetric Toeplitz hashes:
*/
MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
#else
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
#endif
mlx5e_get_rss_key(hkey);
switch (tt) {
case MLX5E_TT_IPV4_TCP:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfs, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
#ifdef RSS
if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP);
} else
#endif
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_ALL);
break;
case MLX5E_TT_IPV6_TCP:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfs, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
#ifdef RSS
if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP);
} else
#endif
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_ALL);
break;
case MLX5E_TT_IPV4_UDP:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfs, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
#ifdef RSS
if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP);
} else
#endif
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_ALL);
break;
case MLX5E_TT_IPV6_UDP:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfs, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
#ifdef RSS
if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP);
} else
#endif
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_ALL);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP);
break;
case MLX5E_TT_IPV6:
MLX5_SET(rx_hash_field_select, hfs, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfs, selected_fields,
MLX5_HASH_IP);
break;
default:
break;
}
}
static int
mlx5e_open_tir(struct mlx5e_priv *priv, int tt, bool inner_vxlan)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *tirc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = mlx5_vzalloc(inlen);
if (in == NULL)
return (-ENOMEM);
tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
mlx5e_build_tir_ctx(priv, tirc, tt, inner_vxlan);
err = mlx5_core_create_tir(mdev, in, inlen, inner_vxlan ?
&priv->tirn_inner_vxlan[tt] : &priv->tirn[tt]);
kvfree(in);
return (err);
}
static void
mlx5e_close_tir(struct mlx5e_priv *priv, int tt, bool inner_vxlan)
{
mlx5_core_destroy_tir(priv->mdev, inner_vxlan ?
priv->tirn_inner_vxlan[tt] : priv->tirn[tt], 0);
}
static int
mlx5e_open_tirs(struct mlx5e_priv *priv)
{
int err;
int i;
for (i = 0; i != 2 * MLX5E_NUM_TT; i++) {
err = mlx5e_open_tir(priv, i / 2, (i % 2) ? true : false);
if (err)
goto err_close_tirs;
}
return (0);
err_close_tirs:
for (i--; i >= 0; i--)
mlx5e_close_tir(priv, i / 2, (i % 2) ? true : false);
return (err);
}
static void
mlx5e_close_tirs(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i != 2 * MLX5E_NUM_TT; i++)
mlx5e_close_tir(priv, i / 2, (i % 2) ? true : false);
}
/*
* SW MTU does not include headers,
* HW MTU includes all headers and checksums.
*/
static int
mlx5e_set_dev_port_mtu(if_t ifp, int sw_mtu)
{
struct mlx5e_priv *priv = if_getsoftc(ifp);
struct mlx5_core_dev *mdev = priv->mdev;
int hw_mtu;
int err;
hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
err = mlx5_set_port_mtu(mdev, hw_mtu);
if (err) {
mlx5_en_err(ifp, "mlx5_set_port_mtu failed setting %d, err=%d\n",
sw_mtu, err);
return (err);
}
/* Update vport context MTU */
err = mlx5_set_vport_mtu(mdev, hw_mtu);
if (err) {
mlx5_en_err(ifp,
"Failed updating vport context with MTU size, err=%d\n",
err);
}
if_setmtu(ifp, sw_mtu);
err = mlx5_query_vport_mtu(mdev, &hw_mtu);
if (err || !hw_mtu) {
/* fallback to port oper mtu */
err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
}
if (err) {
mlx5_en_err(ifp,
"Query port MTU, after setting new MTU value, failed\n");
return (err);
} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
err = -E2BIG,
mlx5_en_err(ifp,
"Port MTU %d is smaller than ifp mtu %d\n",
hw_mtu, sw_mtu);
} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
err = -EINVAL;
mlx5_en_err(ifp,
"Port MTU %d is bigger than ifp mtu %d\n",
hw_mtu, sw_mtu);
}
priv->params_ethtool.hw_mtu = hw_mtu;
/* compute MSB */
while (hw_mtu & (hw_mtu - 1))
hw_mtu &= (hw_mtu - 1);
priv->params_ethtool.hw_mtu_msb = hw_mtu;
return (err);
}
int
mlx5e_open_locked(if_t ifp)
{
struct mlx5e_priv *priv = if_getsoftc(ifp);
int err;
u16 set_id;
/* check if already opened */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
return (0);
#ifdef RSS
if (rss_getnumbuckets() > priv->params.num_channels) {
mlx5_en_info(ifp,
"NOTE: There are more RSS buckets(%u) than channels(%u) available\n",
rss_getnumbuckets(), priv->params.num_channels);
}
#endif
err = mlx5e_open_tises(priv);
if (err) {
mlx5_en_err(ifp, "mlx5e_open_tises failed, %d\n", err);
return (err);
}
err = mlx5_vport_alloc_q_counter(priv->mdev,
MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
if (err) {
mlx5_en_err(priv->ifp,
"mlx5_vport_alloc_q_counter failed: %d\n", err);
goto err_close_tises;
}
/* store counter set ID */
priv->counter_set_id = set_id;
err = mlx5e_open_channels(priv);
if (err) {
mlx5_en_err(ifp,
"mlx5e_open_channels failed, %d\n", err);
goto err_dalloc_q_counter;
}
err = mlx5e_activate_rqt(priv);
if (err) {
mlx5_en_err(ifp, "mlx5e_activate_rqt failed, %d\n", err);
goto err_close_channels;
}
set_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_update_carrier(priv);
return (0);
err_close_channels:
mlx5e_close_channels(priv);
err_dalloc_q_counter:
mlx5_vport_dealloc_q_counter(priv->mdev,
MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
err_close_tises:
mlx5e_close_tises(priv);
return (err);
}
static void
mlx5e_open(void *arg)
{
struct mlx5e_priv *priv = arg;
PRIV_LOCK(priv);
if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
mlx5_en_err(priv->ifp,
"Setting port status to up failed\n");
mlx5e_open_locked(priv->ifp);
if_setdrvflagbits(priv->ifp, IFF_DRV_RUNNING, 0);
PRIV_UNLOCK(priv);
}
int
mlx5e_close_locked(if_t ifp)
{
struct mlx5e_priv *priv = if_getsoftc(ifp);
/* check if already closed */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return (0);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
mlx5e_deactivate_rqt(priv);
mlx5e_close_channels(priv);
mlx5_vport_dealloc_q_counter(priv->mdev,
MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
mlx5e_close_tises(priv);
return (0);
}
static uint64_t
mlx5e_get_counter(if_t ifp, ift_counter cnt)
{
struct mlx5e_priv *priv = if_getsoftc(ifp);
u64 retval;
/* PRIV_LOCK(priv); XXX not allowed */
switch (cnt) {
case IFCOUNTER_IPACKETS:
retval = priv->stats.vport.rx_packets;
break;
case IFCOUNTER_IERRORS:
retval = priv->stats.pport.in_range_len_errors +
priv->stats.pport.out_of_range_len +
priv->stats.pport.too_long_errors +
priv->stats.pport.check_seq_err +
priv->stats.pport.alignment_err;
break;
case IFCOUNTER_IQDROPS:
retval = priv->stats.vport.rx_out_of_buffer;
break;
case IFCOUNTER_OPACKETS:
retval = priv->stats.vport.tx_packets;
break;
case IFCOUNTER_OERRORS:
retval = priv->stats.port_stats_debug.out_discards;
break;
case IFCOUNTER_IBYTES:
retval = priv->stats.vport.rx_bytes;
break;
case IFCOUNTER_OBYTES:
retval = priv->stats.vport.tx_bytes;
break;
case IFCOUNTER_IMCASTS:
retval = priv->stats.vport.rx_multicast_packets;
break;
case IFCOUNTER_OMCASTS:
retval = priv->stats.vport.tx_multicast_packets;
break;
case IFCOUNTER_OQDROPS:
retval = priv->stats.vport.tx_queue_dropped;
break;
case IFCOUNTER_COLLISIONS:
retval = priv->stats.pport.collisions;
break;
default:
retval = if_get_counter_default(ifp, cnt);
break;
}
/* PRIV_UNLOCK(priv); XXX not allowed */
return (retval);
}
static void
mlx5e_set_rx_mode(if_t ifp)
{
struct mlx5e_priv *priv = if_getsoftc(ifp);
queue_work(priv->wq, &priv->set_rx_mode_work);
}
static int
mlx5e_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct mlx5e_priv *priv;
struct ifreq *ifr;
struct ifdownreason *ifdr;
struct ifi2creq i2c;
struct ifrsskey *ifrk;
struct ifrsshash *ifrh;
struct siocsifcapnv_driver_data *drv_ioctl_data, drv_ioctl_data_d;
int error = 0;
int mask;
int size_read = 0;
int module_status;
int module_num;
int max_mtu;
uint8_t read_addr;
priv = if_getsoftc(ifp);
/* check if detaching */
if (priv == NULL || priv->gone != 0)
return (ENXIO);
switch (command) {
case SIOCSIFMTU:
ifr = (struct ifreq *)data;
PRIV_LOCK(priv);
mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
int was_opened;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(ifp);
/* set new MTU */
mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
if (was_opened)
mlx5e_open_locked(ifp);
} else {
error = EINVAL;
mlx5_en_err(ifp,
"Invalid MTU value. Min val: %d, Max val: %d\n",
MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
}
PRIV_UNLOCK(priv);
break;
case SIOCSIFFLAGS:
if ((if_getflags(ifp) & IFF_UP) &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
mlx5e_set_rx_mode(ifp);
break;
}
PRIV_LOCK(priv);
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
mlx5e_open_locked(ifp);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
mlx5_set_port_status(priv->mdev,
MLX5_PORT_DOWN);
if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
mlx5e_close_locked(ifp);
mlx5e_update_carrier(priv);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
}
}
PRIV_UNLOCK(priv);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
mlx5e_set_rx_mode(ifp);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
ifr = (struct ifreq *)data;
error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
break;
case SIOCGIFCAPNV:
error = 0;
break;
case SIOCSIFCAP:
ifr = (struct ifreq *)data;
drv_ioctl_data = &drv_ioctl_data_d;
drv_ioctl_data->reqcap = ifr->ifr_reqcap;
PRIV_LOCK(priv);
drv_ioctl_data->reqcap2 = if_getcapenable2(ifp);
drv_ioctl_data->nvcap = NULL;
goto siocsifcap_driver;
case SIOCSIFCAPNV:
drv_ioctl_data = (struct siocsifcapnv_driver_data *)data;
PRIV_LOCK(priv);
siocsifcap_driver:
mask = drv_ioctl_data->reqcap ^ if_getcapenable(ifp);
if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
if (IFCAP_TSO4 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO4;
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
mlx5_en_err(ifp,
"tso4 disabled due to -txcsum.\n");
}
}
if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if_togglehwassist(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
if (IFCAP_TSO6 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO6;
if_setcapenablebit(ifp, 0, IFCAP_TSO6);
if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
mlx5_en_err(ifp,
"tso6 disabled due to -txcsum6.\n");
}
}
if (mask & IFCAP_MEXTPG)
if_togglecapenable(ifp, IFCAP_MEXTPG);
if (mask & IFCAP_TXTLS4)
if_togglecapenable(ifp, IFCAP_TXTLS4);
if (mask & IFCAP_TXTLS6)
if_togglecapenable(ifp, IFCAP_TXTLS6);
#ifdef RATELIMIT
if (mask & IFCAP_TXTLS_RTLMT)
if_togglecapenable(ifp, IFCAP_TXTLS_RTLMT);
#endif
if (mask & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
if (mask & IFCAP_TSO4) {
if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
mlx5_en_err(ifp, "enable txcsum first.\n");
error = EAGAIN;
goto out;
}
if_togglecapenable(ifp, IFCAP_TSO4);
if_togglehwassist(ifp, CSUM_IP_TSO);
}
if (mask & IFCAP_TSO6) {
if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
mlx5_en_err(ifp, "enable txcsum6 first.\n");
error = EAGAIN;
goto out;
}
if_togglecapenable(ifp, IFCAP_TSO6);
if_togglehwassist(ifp, CSUM_IP6_TSO);
}
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (mask & IFCAP_VLAN_HWFILTER) {
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
mlx5e_disable_vlan_filter(priv);
else
mlx5e_enable_vlan_filter(priv);
if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
}
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (mask & IFCAP_WOL_MAGIC)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if (mask & IFCAP_VXLAN_HWCSUM) {
const bool was_enabled =
(if_getcapenable(ifp) & IFCAP_VXLAN_HWCSUM) != 0;
if (was_enabled)
mlx5e_del_all_vxlan_rules(priv);
if_togglecapenable(ifp, IFCAP_VXLAN_HWCSUM);
if_togglehwassist(ifp, CSUM_INNER_IP | CSUM_INNER_IP_UDP |
CSUM_INNER_IP_TCP | CSUM_INNER_IP6_UDP |
CSUM_INNER_IP6_TCP);
if (!was_enabled) {
int err = mlx5e_add_all_vxlan_rules(priv);
if (err != 0) {
mlx5_en_err(ifp,
"mlx5e_add_all_vxlan_rules() failed, %d (ignored)\n", err);
}
}
}
if (mask & IFCAP_VXLAN_HWTSO) {
if_togglecapenable(ifp, IFCAP_VXLAN_HWTSO);
if_togglehwassist(ifp, CSUM_INNER_IP_TSO |
CSUM_INNER_IP6_TSO);
}
VLAN_CAPABILITIES(ifp);
/* turn off LRO means also turn of HW LRO - if it's on */
if (mask & IFCAP_LRO) {
int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
bool need_restart = false;
if_togglecapenable(ifp, IFCAP_LRO);
/* figure out if updating HW LRO is needed */
if (!(if_getcapenable(ifp) & IFCAP_LRO)) {
if (priv->params.hw_lro_en) {
priv->params.hw_lro_en = false;
need_restart = true;
}
} else {
if (priv->params.hw_lro_en == false &&
priv->params_ethtool.hw_lro != 0) {
priv->params.hw_lro_en = true;
need_restart = true;
}
}
if (was_opened && need_restart) {
mlx5e_close_locked(ifp);
mlx5e_open_locked(ifp);
}
}
if (mask & IFCAP_HWRXTSTMP) {
if_togglecapenable(ifp, IFCAP_HWRXTSTMP);
if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP) {
if (priv->clbr_done == 0)
mlx5e_reset_calibration_callout(priv);
} else {
callout_drain(&priv->tstmp_clbr);
priv->clbr_done = 0;
}
}
mask = drv_ioctl_data->reqcap2 ^ if_getcapenable2(ifp);
if ((mask & IFCAP2_BIT(IFCAP2_RXTLS4)) != 0)
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS4));
if ((mask & IFCAP2_BIT(IFCAP2_RXTLS6)) != 0)
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS6));
out:
PRIV_UNLOCK(priv);
break;
case SIOCGI2C:
ifr = (struct ifreq *)data;
/*
* Copy from the user-space address ifr_data to the
* kernel-space address i2c
*/
error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (error)
break;
if (i2c.len > sizeof(i2c.data)) {
error = EINVAL;
break;
}
PRIV_LOCK(priv);
/* Get module_num which is required for the query_eeprom */
error = mlx5_query_module_num(priv->mdev, &module_num);
if (error) {
mlx5_en_err(ifp,
"Query module num failed, eeprom reading is not supported\n");
error = EINVAL;
goto err_i2c;
}
/* Check if module is present before doing an access */
module_status = mlx5_query_module_status(priv->mdev, module_num);
if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) {
mlx5_en_err(ifp,
"Query module %d status: not plugged (%d), eeprom reading is not supported\n",
module_num, module_status);
error = EINVAL;
goto err_i2c;
}
/*
* Currently 0XA0 and 0xA2 are the only addresses permitted.
* The internal conversion is as follows:
*/
if (i2c.dev_addr == 0xA0)
read_addr = MLX5_I2C_ADDR_LOW;
else if (i2c.dev_addr == 0xA2)
read_addr = MLX5_I2C_ADDR_HIGH;
else {
mlx5_en_err(ifp,
"Query eeprom failed, Invalid Address: %X\n",
i2c.dev_addr);
error = EINVAL;
goto err_i2c;
}
error = mlx5_query_eeprom(priv->mdev,
read_addr, MLX5_EEPROM_LOW_PAGE,
(uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
(uint32_t *)i2c.data, &size_read);
if (error) {
mlx5_en_err(ifp,
"Query eeprom failed, eeprom reading is not supported\n");
error = EINVAL;
goto err_i2c;
}
if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
error = mlx5_query_eeprom(priv->mdev,
read_addr, MLX5_EEPROM_LOW_PAGE,
(uint32_t)(i2c.offset + size_read),
(uint32_t)(i2c.len - size_read), module_num,
(uint32_t *)(i2c.data + size_read), &size_read);
}
if (error) {
mlx5_en_err(ifp,
"Query eeprom failed, eeprom reading is not supported\n");
error = EINVAL;
goto err_i2c;
}
error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
err_i2c:
PRIV_UNLOCK(priv);
break;
case SIOCGIFDOWNREASON:
ifdr = (struct ifdownreason *)data;
bzero(ifdr->ifdr_msg, sizeof(ifdr->ifdr_msg));
PRIV_LOCK(priv);
error = -mlx5_query_pddr_troubleshooting_info(priv->mdev, NULL,
ifdr->ifdr_msg, sizeof(ifdr->ifdr_msg));
PRIV_UNLOCK(priv);
if (error == 0)
ifdr->ifdr_reason = IFDR_REASON_MSG;
break;
case SIOCGIFRSSKEY:
ifrk = (struct ifrsskey *)data;
ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
ifrk->ifrk_keylen = MLX5E_RSS_KEY_SIZE;
CTASSERT(sizeof(ifrk->ifrk_key) >= MLX5E_RSS_KEY_SIZE);
mlx5e_get_rss_key(ifrk->ifrk_key);
break;
case SIOCGIFRSSHASH:
ifrh = (struct ifrsshash *)data;
ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
ifrh->ifrh_types =
RSS_TYPE_IPV4 |
RSS_TYPE_TCP_IPV4 |
RSS_TYPE_UDP_IPV4 |
RSS_TYPE_IPV6 |
RSS_TYPE_TCP_IPV6 |
RSS_TYPE_UDP_IPV6;
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
/*
* TODO: uncoment once FW really sets all these bits if
* (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
* !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
* !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
* -ENOTSUPP;
*/
/* TODO: add more must-to-have features */
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return (-ENODEV);
return (0);
}
static u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
{
const int min_size = ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN;
const int max_size = MLX5E_MAX_TX_INLINE;
const int bf_buf_size =
((1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U) -
(sizeof(struct mlx5e_tx_wqe) - 2);
/* verify against driver limits */
if (bf_buf_size > max_size)
return (max_size);
else if (bf_buf_size < min_size)
return (min_size);
else
return (bf_buf_size);
}
static int
mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv,
int num_comp_vectors)
{
int err;
/*
* TODO: Consider link speed for setting "log_sq_size",
* "log_rq_size" and "cq_moderation_xxx":
*/
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
priv->params.log_rq_size =
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
priv->params.rx_cq_moderation_usec =
MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
priv->params.rx_cq_moderation_mode =
MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
priv->params.rx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
priv->params.tx_cq_moderation_usec =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.rx_hash_log_tbl_sz =
(order_base_2(num_comp_vectors) >
MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
order_base_2(num_comp_vectors) :
MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
priv->counter_set_id = -1;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
if (err)
return (err);
/*
* hw lro is currently defaulted to off. when it won't anymore we
* will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
*/
priv->params.hw_lro_en = false;
priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
/*
* CQE zipping is off, because the per-packet 32-bit Toeplitz hash
* is then not supported. The 32-bit Toeplitz hash is needed to
* correctly demultiplex incoming traffic into the expected
* network queues.
*/
priv->params.cqe_zipping_en = false;
priv->mdev = mdev;
priv->params.num_channels = num_comp_vectors;
priv->params.channels_rsss = 1;
priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
priv->queue_mapping_channel_mask =
roundup_pow_of_two(num_comp_vectors) - 1;
priv->num_tc = priv->params.num_tc;
priv->default_vlan_prio = priv->params.default_vlan_prio;
INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
return (0);
}
static void
mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{
bool ro_pci_enable =
pci_get_relaxed_ordering_enabled(mdev->pdev->dev.bsddev);
bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
static int
mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
struct mlx5_core_mkey *mkey)
{
if_t ifp = priv->ifp;
struct mlx5_core_dev *mdev = priv->mdev;
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
int err;
in = mlx5_vzalloc(inlen);
if (in == NULL) {
mlx5_en_err(ifp, "failed to allocate inbox\n");
return (-ENOMEM);
}
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, umr_en, 1); /* used by HW TLS */
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, pd, pdn);
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
if (err)
mlx5_en_err(ifp, "mlx5_core_create_mkey failed, %d\n",
err);
kvfree(in);
return (err);
}
static const char *mlx5e_vport_stats_desc[] = {
MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
};
static const char *mlx5e_pport_stats_desc[] = {
MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
};
static int
mlx5e_priv_static_init(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev,
const uint32_t channels)
{
uint32_t x;
int err;
mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
sx_init(&priv->state_lock, "mlx5state");
callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
for (x = 0; x != channels; x++)
mlx5e_chan_static_init(priv, &priv->channel[x], x);
for (x = 0; x != channels; x++) {
err = mlx5_alloc_bfreg(mdev, &priv->channel[x].bfreg, false, false);
if (err)
goto err_alloc_bfreg;
}
return (0);
err_alloc_bfreg:
while (x--)
mlx5_free_bfreg(mdev, &priv->channel[x].bfreg);
for (x = 0; x != channels; x++)
mlx5e_chan_static_destroy(&priv->channel[x]);
callout_drain(&priv->watchdog);
mtx_destroy(&priv->async_events_mtx);
sx_destroy(&priv->state_lock);
return (err);
}
static void
mlx5e_priv_static_destroy(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev,
const uint32_t channels)
{
uint32_t x;
for (x = 0; x != channels; x++)
mlx5_free_bfreg(mdev, &priv->channel[x].bfreg);
for (x = 0; x != channels; x++)
mlx5e_chan_static_destroy(&priv->channel[x]);
callout_drain(&priv->watchdog);
mtx_destroy(&priv->async_events_mtx);
sx_destroy(&priv->state_lock);
}
static int
sysctl_firmware(SYSCTL_HANDLER_ARGS)
{
/*
* %d.%d%.d the string format.
* fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
* We need at most 5 chars to store that.
* It also has: two "." and NULL at the end, which means we need 18
* (5*3 + 3) chars at most.
*/
char fw[18];
struct mlx5e_priv *priv = arg1;
int error;
snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
fw_rev_sub(priv->mdev));
error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
return (error);
}
static void
mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
{
int i;
for (i = 0; i < ch->priv->num_tc; i++)
mlx5e_drain_sq(&ch->sq[i]);
}
static void
mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
{
sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
mlx5e_tx_notify_hw(sq, true);
}
void
mlx5e_resume_sq(struct mlx5e_sq *sq)
{
int err;
/* check if already enabled */
if (READ_ONCE(sq->running) != 0)
return;
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
MLX5_SQC_STATE_RST);
if (err != 0) {
mlx5_en_err(sq->ifp,
"mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
}
sq->cc = 0;
sq->pc = 0;
/* reset doorbell prior to moving from RST to RDY */
mlx5e_reset_sq_doorbell_record(sq);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
MLX5_SQC_STATE_RDY);
if (err != 0) {
mlx5_en_err(sq->ifp,
"mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
}
sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
WRITE_ONCE(sq->running, 1);
}
static void
mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
{
int i;
for (i = 0; i < ch->priv->num_tc; i++)
mlx5e_resume_sq(&ch->sq[i]);
}
static void
mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
{
struct mlx5e_rq *rq = &ch->rq;
struct epoch_tracker et;
int err;
mtx_lock(&rq->mtx);
rq->enabled = 0;
callout_stop(&rq->watchdog);
mtx_unlock(&rq->mtx);
err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
if (err != 0) {
mlx5_en_err(rq->ifp,
"mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
}
while (!mlx5_wq_ll_is_empty(&rq->wq)) {
msleep(1);
NET_EPOCH_ENTER(et);
rq->cq.mcq.comp(&rq->cq.mcq, NULL);
NET_EPOCH_EXIT(et);
}
/*
* Transitioning into RST state will allow the FW to track less ERR state queues,
* thus reducing the recv queue flushing time
*/
err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
if (err != 0) {
mlx5_en_err(rq->ifp,
"mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
}
}
static void
mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
{
struct mlx5e_rq *rq = &ch->rq;
struct epoch_tracker et;
int err;
rq->wq.wqe_ctr = 0;
mlx5_wq_ll_update_db_record(&rq->wq);
err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err != 0) {
mlx5_en_err(rq->ifp,
"mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
}
rq->enabled = 1;
NET_EPOCH_ENTER(et);
rq->cq.mcq.comp(&rq->cq.mcq, NULL);
NET_EPOCH_EXIT(et);
}
void
mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
{
int i;
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return;
for (i = 0; i < priv->params.num_channels; i++) {
if (value)
mlx5e_disable_tx_dma(&priv->channel[i]);
else
mlx5e_enable_tx_dma(&priv->channel[i]);
}
}
void
mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
{
int i;
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return;
for (i = 0; i < priv->params.num_channels; i++) {
if (value)
mlx5e_disable_rx_dma(&priv->channel[i]);
else
mlx5e_enable_rx_dma(&priv->channel[i]);
}
}
static void
mlx5e_add_hw_stats(struct mlx5e_priv *priv)
{
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
priv, 0, sysctl_firmware, "A", "HCA firmware version");
SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
"Board ID");
}
static int
mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
{
struct mlx5e_priv *priv = arg1;
uint8_t temp[MLX5E_MAX_PRIORITY];
uint32_t tx_pfc;
int err;
int i;
PRIV_LOCK(priv);
tx_pfc = priv->params.tx_priority_flow_control;
for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
temp[i] = (tx_pfc >> i) & 1;
err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
if (err || !req->newptr)
goto done;
err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
if (err)
goto done;
priv->params.tx_priority_flow_control = 0;
/* range check input value */
for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
if (temp[i] > 1) {
err = ERANGE;
goto done;
}
priv->params.tx_priority_flow_control |= (temp[i] << i);
}
/* check if update is required */
if (tx_pfc != priv->params.tx_priority_flow_control)
err = -mlx5e_set_port_pfc(priv);
done:
if (err != 0)
priv->params.tx_priority_flow_control= tx_pfc;
PRIV_UNLOCK(priv);
return (err);
}
static int
mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
{
struct mlx5e_priv *priv = arg1;
uint8_t temp[MLX5E_MAX_PRIORITY];
uint32_t rx_pfc;
int err;
int i;
PRIV_LOCK(priv);
rx_pfc = priv->params.rx_priority_flow_control;
for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
temp[i] = (rx_pfc >> i) & 1;
err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
if (err || !req->newptr)
goto done;
err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
if (err)
goto done;
priv->params.rx_priority_flow_control = 0;
/* range check input value */
for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
if (temp[i] > 1) {
err = ERANGE;
goto done;
}
priv->params.rx_priority_flow_control |= (temp[i] << i);
}
/* check if update is required */
if (rx_pfc != priv->params.rx_priority_flow_control) {
err = -mlx5e_set_port_pfc(priv);
if (err == 0 && priv->sw_is_port_buf_owner)
err = mlx5e_update_buf_lossy(priv);
}
done:
if (err != 0)
priv->params.rx_priority_flow_control= rx_pfc;
PRIV_UNLOCK(priv);
return (err);
}
static void
mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
{
int error;
/* enable pauseframes by default */
priv->params.tx_pauseframe_control = 1;
priv->params.rx_pauseframe_control = 1;
/* disable ports flow control, PFC, by default */
priv->params.tx_priority_flow_control = 0;
priv->params.rx_priority_flow_control = 0;
/* register pauseframe SYSCTLs */
SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
&priv->params.tx_pauseframe_control, 0,
"Set to enable TX pause frames. Clear to disable.");
SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
&priv->params.rx_pauseframe_control, 0,
"Set to enable RX pause frames. Clear to disable.");
/* register priority flow control, PFC, SYSCTLs */
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU",
"Set to enable TX ports flow control frames for priorities 0..7. Clear to disable.");
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU",
"Set to enable RX ports flow control frames for priorities 0..7. Clear to disable.");
PRIV_LOCK(priv);
/* range check */
priv->params.tx_pauseframe_control =
priv->params.tx_pauseframe_control ? 1 : 0;
priv->params.rx_pauseframe_control =
priv->params.rx_pauseframe_control ? 1 : 0;
/* update firmware */
error = mlx5e_set_port_pause_and_pfc(priv);
if (error == -EINVAL) {
mlx5_en_err(priv->ifp,
"Global pauseframes must be disabled before enabling PFC.\n");
priv->params.rx_priority_flow_control = 0;
priv->params.tx_priority_flow_control = 0;
/* update firmware */
(void) mlx5e_set_port_pause_and_pfc(priv);
}
PRIV_UNLOCK(priv);
}
static int
mlx5e_ul_snd_tag_alloc(if_t ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
struct mlx5e_priv *priv;
struct mlx5e_channel *pch;
priv = if_getsoftc(ifp);
if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) {
return (EOPNOTSUPP);
} else {
/* keep this code synced with mlx5e_select_queue() */
u32 ch = priv->params.num_channels;
#ifdef RSS
u32 temp;
if (rss_hash2bucket(params->hdr.flowid,
params->hdr.flowtype, &temp) == 0)
ch = temp % ch;
else
#endif
ch = (params->hdr.flowid % 128) % ch;
/*
* NOTE: The channels array is only freed at detach
* and it safe to return a pointer to the send tag
* inside the channels structure as long as we
* reference the priv.
*/
pch = priv->channel + ch;
/* check if send queue is not running */
if (unlikely(pch->sq[0].running == 0))
return (ENXIO);
m_snd_tag_ref(&pch->tag);
*ppmt = &pch->tag;
return (0);
}
}
static int
mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
{
struct mlx5e_channel *pch =
container_of(pmt, struct mlx5e_channel, tag);
params->unlimited.max_rate = -1ULL;
params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]);
return (0);
}
static void
mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt)
{
struct mlx5e_channel *pch =
container_of(pmt, struct mlx5e_channel, tag);
complete(&pch->completion);
}
static int
mlx5e_snd_tag_alloc(if_t ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
switch (params->hdr.type) {
#ifdef RATELIMIT
case IF_SND_TAG_TYPE_RATE_LIMIT:
return (mlx5e_rl_snd_tag_alloc(ifp, params, ppmt));
#ifdef KERN_TLS
case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
return (mlx5e_tls_snd_tag_alloc(ifp, params, ppmt));
#endif
#endif
case IF_SND_TAG_TYPE_UNLIMITED:
return (mlx5e_ul_snd_tag_alloc(ifp, params, ppmt));
#ifdef KERN_TLS
case IF_SND_TAG_TYPE_TLS:
return (mlx5e_tls_snd_tag_alloc(ifp, params, ppmt));
case IF_SND_TAG_TYPE_TLS_RX:
return (mlx5e_tls_rx_snd_tag_alloc(ifp, params, ppmt));
#endif
default:
return (EOPNOTSUPP);
}
}
#ifdef RATELIMIT
#define NUM_HDWR_RATES_MLX 13
static const uint64_t adapter_rates_mlx[NUM_HDWR_RATES_MLX] = {
135375, /* 1,083,000 */
180500, /* 1,444,000 */
270750, /* 2,166,000 */
361000, /* 2,888,000 */
541500, /* 4,332,000 */
721875, /* 5,775,000 */
1082875, /* 8,663,000 */
1443875, /* 11,551,000 */
2165750, /* 17,326,000 */
2887750, /* 23,102,000 */
4331625, /* 34,653,000 */
5775500, /* 46,204,000 */
8663125 /* 69,305,000 */
};
static void
mlx5e_ratelimit_query(if_t ifp __unused, struct if_ratelimit_query_results *q)
{
/*
* This function needs updating by the driver maintainer!
* For the MLX card there are currently (ConectX-4?) 13
* pre-set rates and others i.e. ConnectX-5, 6, 7??
*
* This will change based on later adapters
* and this code should be updated to look at ifp
* and figure out the specific adapter type
* settings i.e. how many rates as well
* as if they are fixed (as is shown here) or
* if they are dynamic (example chelsio t4). Also if there
* is a maximum number of flows that the adapter
* can handle that too needs to be updated in
* the max_flows field.
*/
q->rate_table = adapter_rates_mlx;
q->flags = RT_IS_FIXED_TABLE;
q->max_flows = 0; /* mlx has no limit */
q->number_of_rates = NUM_HDWR_RATES_MLX;
q->min_segment_burst = 1;
}
#endif
static void
mlx5e_ifm_add(struct mlx5e_priv *priv, int type)
{
ifmedia_add(&priv->media, type | IFM_ETHER, 0, NULL);
ifmedia_add(&priv->media, type | IFM_ETHER |
IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
ifmedia_add(&priv->media, type | IFM_ETHER | IFM_ETH_RXPAUSE, 0, NULL);
ifmedia_add(&priv->media, type | IFM_ETHER | IFM_ETH_TXPAUSE, 0, NULL);
ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX, 0, NULL);
ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX |
IFM_ETH_RXPAUSE, 0, NULL);
ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX |
IFM_ETH_TXPAUSE, 0, NULL);
ifmedia_add(&priv->media, type | IFM_ETHER | IFM_FDX |
IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
}
static void *
mlx5e_create_ifp(struct mlx5_core_dev *mdev)
{
if_t ifp;
struct mlx5e_priv *priv;
u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
struct sysctl_oid_list *child;
int ncv = mdev->priv.eq_table.num_comp_vectors;
char unit[16];
struct pfil_head_args pa;
int err;
u32 eth_proto_cap;
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
bool ext;
struct media media_entry = {};
if (mlx5e_check_required_hca_cap(mdev)) {
mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
return (NULL);
}
/*
* Try to allocate the priv and make room for worst-case
* number of channel structures:
*/
priv = malloc_domainset(sizeof(*priv) +
(sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
M_MLX5EN, mlx5_dev_domainset(mdev), M_WAITOK | M_ZERO);
ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev);
- if (ifp == NULL) {
- mlx5_core_err(mdev, "if_alloc() failed\n");
- goto err_free_priv;
- }
/* setup all static fields */
if (mlx5e_priv_static_init(priv, mdev, mdev->priv.eq_table.num_comp_vectors)) {
mlx5_core_err(mdev, "mlx5e_priv_static_init() failed\n");
goto err_free_ifp;
}
if_setsoftc(ifp, priv);
if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
if_setmtu(ifp, ETHERMTU);
if_setinitfn(ifp, mlx5e_open);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, mlx5e_ioctl);
if_settransmitfn(ifp, mlx5e_xmit);
if_setqflushfn(ifp, if_qflush);
if_setgetcounterfn(ifp, mlx5e_get_counter);
if_setsendqlen(ifp, ifqmaxlen);
/*
* Set driver features
*/
if_setcapabilities(ifp, IFCAP_NV);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER, 0);
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE | IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO | IFCAP_VLAN_HWTSO, 0);
if_setcapabilitiesbit(ifp, IFCAP_HWSTATS | IFCAP_HWRXTSTMP, 0);
if_setcapabilitiesbit(ifp, IFCAP_MEXTPG, 0);
if_setcapabilitiesbit(ifp, IFCAP_TXTLS4 | IFCAP_TXTLS6, 0);
#ifdef RATELIMIT
if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT | IFCAP_TXTLS_RTLMT, 0);
#endif
if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
if_setcapabilities2bit(ifp, IFCAP2_BIT(IFCAP2_RXTLS4) |
IFCAP2_BIT(IFCAP2_RXTLS6), 0);
if_setsndtagallocfn(ifp, mlx5e_snd_tag_alloc);
#ifdef RATELIMIT
if_setratelimitqueryfn(ifp, mlx5e_ratelimit_query);
#endif
/* set TSO limits so that we don't have to drop TX packets */
if_sethwtsomax(ifp, MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ifp, MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */);
if_sethwtsomaxsegsize(ifp, MLX5E_MAX_TX_MBUF_SIZE);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setcapenable2(ifp, if_getcapabilities2(ifp));
if_sethwassist(ifp, 0);
if (if_getcapenable(ifp) & IFCAP_TSO)
if_sethwassistbits(ifp, CSUM_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP), 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6), 0);
if (if_getcapabilities(ifp) & IFCAP_VXLAN_HWCSUM)
if_sethwassistbits(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP |
CSUM_ENCAP_VXLAN, 0);
if (if_getcapabilities(ifp) & IFCAP_VXLAN_HWTSO)
if_sethwassistbits(ifp, CSUM_INNER_IP6_TSO | CSUM_INNER_IP_TSO, 0);
/* ifnet sysctl tree */
sysctl_ctx_init(&priv->sysctl_ctx);
priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
OID_AUTO, if_getdname(ifp), CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"MLX5 ethernet - interface name");
if (priv->sysctl_ifnet == NULL) {
mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
goto err_free_sysctl;
}
snprintf(unit, sizeof(unit), "%d", if_getdunit(ifp));
priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
OID_AUTO, unit, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"MLX5 ethernet - interface unit");
if (priv->sysctl_ifnet == NULL) {
mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
goto err_free_sysctl;
}
/* HW sysctl tree */
child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
OID_AUTO, "hw", CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"MLX5 ethernet dev hw");
if (priv->sysctl_hw == NULL) {
mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
goto err_free_sysctl;
}
err = mlx5e_build_ifp_priv(mdev, priv, ncv);
if (err) {
mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err);
goto err_free_sysctl;
}
/* reuse mlx5core's watchdog workqueue */
priv->wq = mdev->priv.health.wq_watchdog;
err = mlx5_core_alloc_pd(mdev, &priv->pdn, 0);
if (err) {
mlx5_en_err(ifp, "mlx5_core_alloc_pd failed, %d\n", err);
goto err_free_wq;
}
err = mlx5_alloc_transport_domain(mdev, &priv->tdn, 0);
if (err) {
mlx5_en_err(ifp,
"mlx5_alloc_transport_domain failed, %d\n", err);
goto err_dealloc_pd;
}
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) {
mlx5_en_err(ifp, "mlx5e_create_mkey failed, %d\n", err);
goto err_dealloc_transport_domain;
}
mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
/* check if we should generate a random MAC address */
if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
is_zero_ether_addr(dev_addr)) {
random_ether_addr(dev_addr);
mlx5_en_err(ifp, "Assigned random MAC address\n");
}
err = mlx5e_rl_init(priv);
if (err) {
mlx5_en_err(ifp, "mlx5e_rl_init failed, %d\n", err);
goto err_create_mkey;
}
err = mlx5e_tls_init(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_tls_init failed\n", __func__);
goto err_rl_init;
}
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
if_printf(ifp, "%s: mlx5e_open_drop_rq failed (%d)\n", __func__, err);
goto err_tls_init;
}
err = mlx5e_open_rqts(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_open_rqts failed (%d)\n", __func__, err);
goto err_open_drop_rq;
}
err = mlx5e_open_tirs(priv);
if (err) {
mlx5_en_err(ifp, "mlx5e_open_tirs() failed, %d\n", err);
goto err_open_rqts;
}
err = mlx5e_open_flow_tables(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_open_flow_tables failed (%d)\n", __func__, err);
goto err_open_tirs;
}
err = mlx5e_tls_rx_init(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_tls_rx_init() failed, %d\n", __func__, err);
goto err_open_flow_tables;
}
/* set default MTU */
mlx5e_set_dev_port_mtu(ifp, if_getmtu(ifp));
/* Set default media status */
priv->media_status_last = IFM_AVALID;
priv->media_active_last = IFM_ETHER | IFM_AUTO | IFM_FDX;
/* setup default pauseframes configuration */
mlx5e_setup_pauseframes(priv);
/* Setup supported medias */
if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) {
ext = MLX5_CAP_PCAM_FEATURE(mdev,
ptys_extended_ethernet);
eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability);
} else {
ext = false;
eth_proto_cap = 0;
mlx5_en_err(ifp, "Query port media capability failed, %d\n", err);
}
ifmedia_init(&priv->media, IFM_IMASK,
mlx5e_media_change, mlx5e_media_status);
if (ext) {
for (unsigned i = 0; i != MLX5E_EXT_LINK_SPEEDS_NUMBER; i++) {
/* check if hardware has the right capability */
if (MLX5E_PROT_MASK(i) & ~eth_proto_cap)
continue;
for (unsigned j = 0; j != MLX5E_CABLE_TYPE_NUMBER; j++) {
media_entry = mlx5e_ext_mode_table[i][j];
if (media_entry.subtype == 0)
continue;
/* check if this subtype was already added */
for (unsigned k = 0; k != i; k++) {
/* check if hardware has the right capability */
if (MLX5E_PROT_MASK(k) & ~eth_proto_cap)
continue;
for (unsigned m = 0; m != MLX5E_CABLE_TYPE_NUMBER; m++) {
if (media_entry.subtype == mlx5e_ext_mode_table[k][m].subtype)
goto skip_ext_media;
}
}
mlx5e_ifm_add(priv, media_entry.subtype);
skip_ext_media:;
}
}
} else {
for (unsigned i = 0; i != MLX5E_LINK_SPEEDS_NUMBER; i++) {
media_entry = mlx5e_mode_table[i];
if (media_entry.subtype == 0)
continue;
if (MLX5E_PROT_MASK(i) & ~eth_proto_cap)
continue;
/* check if this subtype was already added */
for (unsigned k = 0; k != i; k++) {
if (media_entry.subtype == mlx5e_mode_table[k].subtype)
goto skip_media;
}
mlx5e_ifm_add(priv, media_entry.subtype);
/* NOTE: 10G ER and LR shares the same entry */
if (media_entry.subtype == IFM_10G_ER)
mlx5e_ifm_add(priv, IFM_10G_LR);
skip_media:;
}
}
mlx5e_ifm_add(priv, IFM_AUTO);
/* Set autoselect by default */
ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
DEBUGNET_SET(ifp, mlx5_en);
ether_ifattach(ifp, dev_addr);
/* Register for VLAN events */
priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
/* Register for VxLAN events */
priv->vxlan_start = EVENTHANDLER_REGISTER(vxlan_start,
mlx5e_vxlan_start, priv, EVENTHANDLER_PRI_ANY);
priv->vxlan_stop = EVENTHANDLER_REGISTER(vxlan_stop,
mlx5e_vxlan_stop, priv, EVENTHANDLER_PRI_ANY);
/* Link is down by default */
if_link_state_change(ifp, LINK_STATE_DOWN);
mlx5e_enable_async_events(priv);
mlx5e_add_hw_stats(priv);
mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
"vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
priv->stats.vport.arg);
mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
"pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
priv->stats.pport.arg);
mlx5e_create_ethtool(priv);
mtx_lock(&priv->async_events_mtx);
mlx5e_update_stats(priv);
mtx_unlock(&priv->async_events_mtx);
SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
OID_AUTO, "rx_clbr_done", CTLFLAG_RD,
&priv->clbr_done, 0,
"RX timestamps calibration state");
callout_init(&priv->tstmp_clbr, 1);
/* Pull out the frequency of the clock in hz */
priv->cclk = (uint64_t)MLX5_CAP_GEN(mdev, device_frequency_khz) * 1000ULL;
mlx5e_reset_calibration_callout(priv);
pa.pa_version = PFIL_VERSION;
pa.pa_flags = PFIL_IN;
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = if_name(ifp);
priv->pfil = pfil_head_register(&pa);
PRIV_LOCK(priv);
err = mlx5e_open_flow_rules(priv);
if (err) {
mlx5_en_err(ifp,
"mlx5e_open_flow_rules() failed, %d (ignored)\n", err);
}
PRIV_UNLOCK(priv);
return (priv);
err_open_flow_tables:
mlx5e_close_flow_tables(priv);
err_open_tirs:
mlx5e_close_tirs(priv);
err_open_rqts:
mlx5e_close_rqts(priv);
err_open_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_tls_init:
mlx5e_tls_cleanup(priv);
err_rl_init:
mlx5e_rl_cleanup(priv);
err_create_mkey:
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
err_dealloc_transport_domain:
mlx5_dealloc_transport_domain(mdev, priv->tdn, 0);
err_dealloc_pd:
mlx5_core_dealloc_pd(mdev, priv->pdn, 0);
err_free_wq:
flush_workqueue(priv->wq);
err_free_sysctl:
sysctl_ctx_free(&priv->sysctl_ctx);
if (priv->sysctl_debug)
sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
mlx5e_priv_static_destroy(priv, mdev, mdev->priv.eq_table.num_comp_vectors);
err_free_ifp:
if_free(ifp);
-
-err_free_priv:
free(priv, M_MLX5EN);
return (NULL);
}
static void
mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
if_t ifp = priv->ifp;
/* don't allow more IOCTLs */
priv->gone = 1;
/* XXX wait a bit to allow IOCTL handlers to complete */
pause("W", hz);
#ifdef RATELIMIT
/*
* The kernel can have reference(s) via the m_snd_tag's into
* the ratelimit channels, and these must go away before
* detaching:
*/
while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) {
mlx5_en_err(priv->ifp,
"Waiting for all ratelimit connections to terminate\n");
pause("W", hz);
}
#endif
#ifdef KERN_TLS
/* wait for all TLS tags to get freed */
while (priv->tls.init != 0 &&
uma_zone_get_cur(priv->tls.zone) != 0) {
mlx5_en_err(priv->ifp,
"Waiting for all TLS connections to terminate\n");
pause("W", hz);
}
/* wait for all TLS RX tags to get freed */
while (priv->tls_rx.init != 0 &&
uma_zone_get_cur(priv->tls_rx.zone) != 0) {
mlx5_en_err(priv->ifp,
"Waiting for all TLS RX connections to terminate\n");
pause("W", hz);
}
#endif
/* wait for all unlimited send tags to complete */
mlx5e_priv_wait_for_completion(priv, mdev->priv.eq_table.num_comp_vectors);
/* stop watchdog timer */
callout_drain(&priv->watchdog);
callout_drain(&priv->tstmp_clbr);
if (priv->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
if (priv->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
if (priv->vxlan_start != NULL)
EVENTHANDLER_DEREGISTER(vxlan_start, priv->vxlan_start);
if (priv->vxlan_stop != NULL)
EVENTHANDLER_DEREGISTER(vxlan_stop, priv->vxlan_stop);
/* make sure device gets closed */
PRIV_LOCK(priv);
mlx5e_close_locked(ifp);
mlx5e_close_flow_rules(priv);
PRIV_UNLOCK(priv);
/* deregister pfil */
if (priv->pfil != NULL) {
pfil_head_unregister(priv->pfil);
priv->pfil = NULL;
}
/* unregister device */
ifmedia_removeall(&priv->media);
ether_ifdetach(ifp);
mlx5e_tls_rx_cleanup(priv);
mlx5e_close_flow_tables(priv);
mlx5e_close_tirs(priv);
mlx5e_close_rqts(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_tls_cleanup(priv);
mlx5e_rl_cleanup(priv);
/* destroy all remaining sysctl nodes */
sysctl_ctx_free(&priv->stats.vport.ctx);
sysctl_ctx_free(&priv->stats.pport.ctx);
if (priv->sysctl_debug)
sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
sysctl_ctx_free(&priv->sysctl_ctx);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn, 0);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn, 0);
mlx5e_disable_async_events(priv);
flush_workqueue(priv->wq);
mlx5e_priv_static_destroy(priv, mdev, mdev->priv.eq_table.num_comp_vectors);
if_free(ifp);
free(priv, M_MLX5EN);
}
#ifdef DEBUGNET
static void
mlx5_en_debugnet_init(if_t dev, int *nrxr, int *ncl, int *clsize)
{
struct mlx5e_priv *priv = if_getsoftc(dev);
PRIV_LOCK(priv);
*nrxr = priv->params.num_channels;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
*clsize = MLX5E_MAX_RX_BYTES;
PRIV_UNLOCK(priv);
}
static void
mlx5_en_debugnet_event(if_t dev, enum debugnet_ev event)
{
}
static int
mlx5_en_debugnet_transmit(if_t dev, struct mbuf *m)
{
struct mlx5e_priv *priv = if_getsoftc(dev);
struct mlx5e_sq *sq;
int err;
if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (priv->media_status_last & IFM_ACTIVE) == 0)
return (ENOENT);
sq = &priv->channel[0].sq[0];
if (sq->running == 0) {
m_freem(m);
return (ENOENT);
}
if (mlx5e_sq_xmit(sq, &m) != 0) {
m_freem(m);
err = ENOBUFS;
} else {
err = 0;
}
mlx5e_tx_notify_hw(sq, true);
return (err);
}
static int
mlx5_en_debugnet_poll(if_t dev, int count)
{
struct mlx5e_priv *priv = if_getsoftc(dev);
if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 ||
(priv->media_status_last & IFM_ACTIVE) == 0)
return (ENOENT);
mlx5_poll_interrupts(priv->mdev);
return (0);
}
#endif /* DEBUGNET */
static void *
mlx5e_get_ifp(void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
return (priv->ifp);
}
static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_create_ifp,
.remove = mlx5e_destroy_ifp,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_ifp,
};
void
mlx5e_init(void)
{
mlx5_register_interface(&mlx5e_interface);
}
void
mlx5e_cleanup(void)
{
mlx5_unregister_interface(&mlx5e_interface);
}
module_init_order(mlx5e_init, SI_ORDER_SIXTH);
module_exit_order(mlx5e_cleanup, SI_ORDER_SIXTH);
MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
MODULE_VERSION(mlx5en, 1);
diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c
index 2165cb73f7cd..3c1c31cded26 100644
--- a/sys/dev/msk/if_msk.c
+++ b/sys/dev/msk/if_msk.c
@@ -1,4597 +1,4592 @@
/******************************************************************************
*
* Name : sky2.c
* Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
* Version: $Revision: 1.23 $
* Date : $Date: 2005/12/22 09:04:11 $
* Purpose: Main driver source file
*
*****************************************************************************/
/******************************************************************************
*
* LICENSE:
* Copyright (C) Marvell International Ltd. and/or its affiliates
*
* The computer program files contained in this folder ("Files")
* are provided to you under the BSD-type license terms provided
* below, and any use of such Files and any derivative works
* thereof created by you shall be governed by the following terms
* and conditions:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* - Neither the name of Marvell nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* /LICENSE
*
*****************************************************************************/
/*-
* SPDX-License-Identifier: BSD-4-Clause AND BSD-3-Clause
*
* Copyright (c) 1997, 1998, 1999, 2000
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Device driver for the Marvell Yukon II Ethernet controller.
* Due to lack of documentation, this driver is based on the code from
* sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/msk/if_mskreg.h>
MODULE_DEPEND(msk, pci, 1, 1, 1);
MODULE_DEPEND(msk, ether, 1, 1, 1);
MODULE_DEPEND(msk, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/* Tunables. */
static int msi_disable = 0;
TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
static int legacy_intr = 0;
TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
static int jumbo_disable = 0;
TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
/*
* Devices supported by this driver.
*/
static const struct msk_product {
uint16_t msk_vendorid;
uint16_t msk_deviceid;
const char *msk_name;
} msk_products[] = {
{ VENDORID_SK, DEVICEID_SK_YUKON2,
"SK-9Sxx Gigabit Ethernet" },
{ VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
"SK-9Exx Gigabit Ethernet"},
{ VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
"Marvell Yukon 88E8021CU Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8021X,
"Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
"Marvell Yukon 88E8022CU Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8022X,
"Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
"Marvell Yukon 88E8061CU Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8061X,
"Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
"Marvell Yukon 88E8062CU Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8062X,
"Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8035,
"Marvell Yukon 88E8035 Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8036,
"Marvell Yukon 88E8036 Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8038,
"Marvell Yukon 88E8038 Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8039,
"Marvell Yukon 88E8039 Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8040,
"Marvell Yukon 88E8040 Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8040T,
"Marvell Yukon 88E8040T Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8042,
"Marvell Yukon 88E8042 Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_8048,
"Marvell Yukon 88E8048 Fast Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4361,
"Marvell Yukon 88E8050 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4360,
"Marvell Yukon 88E8052 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4362,
"Marvell Yukon 88E8053 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4363,
"Marvell Yukon 88E8055 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4364,
"Marvell Yukon 88E8056 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4365,
"Marvell Yukon 88E8070 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_436A,
"Marvell Yukon 88E8058 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_436B,
"Marvell Yukon 88E8071 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_436C,
"Marvell Yukon 88E8072 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_436D,
"Marvell Yukon 88E8055 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4370,
"Marvell Yukon 88E8075 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4380,
"Marvell Yukon 88E8057 Gigabit Ethernet" },
{ VENDORID_MARVELL, DEVICEID_MRVL_4381,
"Marvell Yukon 88E8059 Gigabit Ethernet" },
{ VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
"D-Link 550SX Gigabit Ethernet" },
{ VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
"D-Link 560SX Gigabit Ethernet" },
{ VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
"D-Link 560T Gigabit Ethernet" }
};
static const char *model_name[] = {
"Yukon XL",
"Yukon EC Ultra",
"Yukon EX",
"Yukon EC",
"Yukon FE",
"Yukon FE+",
"Yukon Supreme",
"Yukon Ultra 2",
"Yukon Unknown",
"Yukon Optima",
};
static int mskc_probe(device_t);
static int mskc_attach(device_t);
static int mskc_detach(device_t);
static int mskc_shutdown(device_t);
static int mskc_setup_rambuffer(struct msk_softc *);
static int mskc_suspend(device_t);
static int mskc_resume(device_t);
static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
static void mskc_reset(struct msk_softc *);
static int msk_probe(device_t);
static int msk_attach(device_t);
static int msk_detach(device_t);
static void msk_tick(void *);
static void msk_intr(void *);
static void msk_intr_phy(struct msk_if_softc *);
static void msk_intr_gmac(struct msk_if_softc *);
static __inline void msk_rxput(struct msk_if_softc *);
static int msk_handle_events(struct msk_softc *);
static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
static void msk_intr_hwerr(struct msk_softc *);
#ifndef __NO_STRICT_ALIGNMENT
static __inline void msk_fixup_rx(struct mbuf *);
#endif
static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
static void msk_txeof(struct msk_if_softc *, int);
static int msk_encap(struct msk_if_softc *, struct mbuf **);
static void msk_start(if_t);
static void msk_start_locked(if_t);
static int msk_ioctl(if_t, u_long, caddr_t);
static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
static void msk_set_rambuffer(struct msk_if_softc *);
static void msk_set_tx_stfwd(struct msk_if_softc *);
static void msk_init(void *);
static void msk_init_locked(struct msk_if_softc *);
static void msk_stop(struct msk_if_softc *);
static void msk_watchdog(struct msk_if_softc *);
static int msk_mediachange(if_t);
static void msk_mediastatus(if_t, struct ifmediareq *);
static void msk_phy_power(struct msk_softc *, int);
static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int msk_status_dma_alloc(struct msk_softc *);
static void msk_status_dma_free(struct msk_softc *);
static int msk_txrx_dma_alloc(struct msk_if_softc *);
static int msk_rx_dma_jalloc(struct msk_if_softc *);
static void msk_txrx_dma_free(struct msk_if_softc *);
static void msk_rx_dma_jfree(struct msk_if_softc *);
static int msk_rx_fill(struct msk_if_softc *, int);
static int msk_init_rx_ring(struct msk_if_softc *);
static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
static void msk_init_tx_ring(struct msk_if_softc *);
static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
static int msk_newbuf(struct msk_if_softc *, int);
static int msk_jumbo_newbuf(struct msk_if_softc *, int);
static int msk_phy_readreg(struct msk_if_softc *, int, int);
static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
static int msk_miibus_readreg(device_t, int, int);
static int msk_miibus_writereg(device_t, int, int, int);
static void msk_miibus_statchg(device_t);
static void msk_rxfilter(struct msk_if_softc *);
static void msk_setvlan(struct msk_if_softc *, if_t);
static void msk_stats_clear(struct msk_if_softc *);
static void msk_stats_update(struct msk_if_softc *);
static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
static void msk_sysctl_node(struct msk_if_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
static device_method_t mskc_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mskc_probe),
DEVMETHOD(device_attach, mskc_attach),
DEVMETHOD(device_detach, mskc_detach),
DEVMETHOD(device_suspend, mskc_suspend),
DEVMETHOD(device_resume, mskc_resume),
DEVMETHOD(device_shutdown, mskc_shutdown),
DEVMETHOD(bus_get_dma_tag, mskc_get_dma_tag),
DEVMETHOD_END
};
static driver_t mskc_driver = {
"mskc",
mskc_methods,
sizeof(struct msk_softc)
};
static device_method_t msk_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, msk_probe),
DEVMETHOD(device_attach, msk_attach),
DEVMETHOD(device_detach, msk_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, msk_miibus_readreg),
DEVMETHOD(miibus_writereg, msk_miibus_writereg),
DEVMETHOD(miibus_statchg, msk_miibus_statchg),
DEVMETHOD_END
};
static driver_t msk_driver = {
"msk",
msk_methods,
sizeof(struct msk_if_softc)
};
DRIVER_MODULE(mskc, pci, mskc_driver, NULL, NULL);
DRIVER_MODULE(msk, mskc, msk_driver, NULL, NULL);
DRIVER_MODULE(miibus, msk, miibus_driver, NULL, NULL);
static struct resource_spec msk_res_spec_io[] = {
{ SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec msk_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ -1, 0, 0 }
};
static struct resource_spec msk_irq_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec msk_irq_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static int
msk_miibus_readreg(device_t dev, int phy, int reg)
{
struct msk_if_softc *sc_if;
sc_if = device_get_softc(dev);
return (msk_phy_readreg(sc_if, phy, reg));
}
static int
msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
{
struct msk_softc *sc;
int i, val;
sc = sc_if->msk_softc;
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
for (i = 0; i < MSK_TIMEOUT; i++) {
DELAY(1);
val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
if ((val & GM_SMI_CT_RD_VAL) != 0) {
val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
break;
}
}
if (i == MSK_TIMEOUT) {
if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
val = 0;
}
return (val);
}
static int
msk_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct msk_if_softc *sc_if;
sc_if = device_get_softc(dev);
return (msk_phy_writereg(sc_if, phy, reg, val));
}
static int
msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
{
struct msk_softc *sc;
int i;
sc = sc_if->msk_softc;
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
for (i = 0; i < MSK_TIMEOUT; i++) {
DELAY(1);
if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
GM_SMI_CT_BUSY) == 0)
break;
}
if (i == MSK_TIMEOUT)
if_printf(sc_if->msk_ifp, "phy write timeout\n");
return (0);
}
static void
msk_miibus_statchg(device_t dev)
{
struct msk_softc *sc;
struct msk_if_softc *sc_if;
struct mii_data *mii;
if_t ifp;
uint32_t gmac;
sc_if = device_get_softc(dev);
sc = sc_if->msk_softc;
MSK_IF_LOCK_ASSERT(sc_if);
mii = device_get_softc(sc_if->msk_miibus);
ifp = sc_if->msk_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc_if->msk_flags &= ~MSK_FLAG_LINK;
if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
(IFM_AVALID | IFM_ACTIVE)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc_if->msk_flags |= MSK_FLAG_LINK;
break;
case IFM_1000_T:
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
sc_if->msk_flags |= MSK_FLAG_LINK;
break;
default:
break;
}
}
if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
/* Enable Tx FIFO Underrun. */
CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
/*
* Because mii(4) notify msk(4) that it detected link status
* change, there is no need to enable automatic
* speed/flow-control/duplex updates.
*/
gmac = GM_GPCR_AU_ALL_DIS;
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_SX:
case IFM_1000_T:
gmac |= GM_GPCR_SPEED_1000;
break;
case IFM_100_TX:
gmac |= GM_GPCR_SPEED_100;
break;
case IFM_10_T:
break;
}
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_RXPAUSE) == 0)
gmac |= GM_GPCR_FC_RX_DIS;
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_TXPAUSE) == 0)
gmac |= GM_GPCR_FC_TX_DIS;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
gmac |= GM_GPCR_DUP_FULL;
else
gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
/* Read again to ensure writing. */
GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
gmac = GMC_PAUSE_OFF;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
gmac = GMC_PAUSE_ON;
}
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
/* Enable PHY interrupt for FIFO underrun/overflow. */
msk_phy_writereg(sc_if, PHY_ADDR_MARV,
PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
} else {
/*
* Link state changed to down.
* Disable PHY interrupts.
*/
msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
/* Disable Rx/Tx MAC. */
gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
/* Read again to ensure writing. */
GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
}
}
}
static u_int
msk_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *mchash = arg;
uint32_t crc;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
/* Just want the 6 least significant bits. */
crc &= 0x3f;
/* Set the corresponding bit in the hash table. */
mchash[crc >> 5] |= 1 << (crc & 0x1f);
return (1);
}
static void
msk_rxfilter(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
if_t ifp;
uint32_t mchash[2];
uint16_t mode;
sc = sc_if->msk_softc;
MSK_IF_LOCK_ASSERT(sc_if);
ifp = sc_if->msk_ifp;
bzero(mchash, sizeof(mchash));
mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
mchash[0] = 0xffff;
mchash[1] = 0xffff;
} else {
mode |= GM_RXCR_UCF_ENA;
if_foreach_llmaddr(ifp, msk_hash_maddr, mchash);
if (mchash[0] != 0 || mchash[1] != 0)
mode |= GM_RXCR_MCF_ENA;
}
GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
mchash[0] & 0xffff);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
(mchash[0] >> 16) & 0xffff);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
mchash[1] & 0xffff);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
(mchash[1] >> 16) & 0xffff);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
}
static void
msk_setvlan(struct msk_if_softc *sc_if, if_t ifp)
{
struct msk_softc *sc;
sc = sc_if->msk_softc;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
TX_VLAN_TAG_ON);
} else {
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF);
}
}
static int
msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
{
uint16_t idx;
int i;
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
(if_getcapenable(sc_if->msk_ifp) & IFCAP_RXCSUM) != 0) {
/* Wait until controller executes OP_TCPSTART command. */
for (i = 100; i > 0; i--) {
DELAY(100);
idx = CSR_READ_2(sc_if->msk_softc,
Y2_PREF_Q_ADDR(sc_if->msk_rxq,
PREF_UNIT_GET_IDX_REG));
if (idx != 0)
break;
}
if (i == 0) {
device_printf(sc_if->msk_if_dev,
"prefetch unit stuck?\n");
return (ETIMEDOUT);
}
/*
* Fill consumed LE with free buffer. This can be done
* in Rx handler but we don't want to add special code
* in fast handler.
*/
if (jumbo > 0) {
if (msk_jumbo_newbuf(sc_if, 0) != 0)
return (ENOBUFS);
bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_cdata.msk_jumbo_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
} else {
if (msk_newbuf(sc_if, 0) != 0)
return (ENOBUFS);
bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
sc_if->msk_cdata.msk_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
sc_if->msk_cdata.msk_rx_prod = 0;
CSR_WRITE_2(sc_if->msk_softc,
Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
sc_if->msk_cdata.msk_rx_prod);
}
return (0);
}
static int
msk_init_rx_ring(struct msk_if_softc *sc_if)
{
struct msk_ring_data *rd;
struct msk_rxdesc *rxd;
int i, nbuf, prod;
MSK_IF_LOCK_ASSERT(sc_if);
sc_if->msk_cdata.msk_rx_cons = 0;
sc_if->msk_cdata.msk_rx_prod = 0;
sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
rd = &sc_if->msk_rdata;
bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_rx_ring[prod];
MSK_INC(prod, MSK_RX_RING_CNT);
}
nbuf = MSK_RX_BUF_CNT;
prod = 0;
/* Have controller know how to compute Rx checksum. */
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
(if_getcapenable(sc_if->msk_ifp) & IFCAP_RXCSUM) != 0) {
#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_rx_ring[prod];
rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
ETHER_HDR_LEN);
rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
MSK_INC(prod, MSK_RX_RING_CNT);
MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
#endif
rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_rx_ring[prod];
rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
ETHER_HDR_LEN);
rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
MSK_INC(prod, MSK_RX_RING_CNT);
MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
nbuf--;
}
for (i = 0; i < nbuf; i++) {
if (msk_newbuf(sc_if, prod) != 0)
return (ENOBUFS);
MSK_RX_INC(prod, MSK_RX_RING_CNT);
}
bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
sc_if->msk_cdata.msk_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Update prefetch unit. */
sc_if->msk_cdata.msk_rx_prod = prod;
CSR_WRITE_2(sc_if->msk_softc,
Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
(sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
MSK_RX_RING_CNT);
if (msk_rx_fill(sc_if, 0) != 0)
return (ENOBUFS);
return (0);
}
static int
msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
{
struct msk_ring_data *rd;
struct msk_rxdesc *rxd;
int i, nbuf, prod;
MSK_IF_LOCK_ASSERT(sc_if);
sc_if->msk_cdata.msk_rx_cons = 0;
sc_if->msk_cdata.msk_rx_prod = 0;
sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
rd = &sc_if->msk_rdata;
bzero(rd->msk_jumbo_rx_ring,
sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
}
nbuf = MSK_RX_BUF_CNT;
prod = 0;
/* Have controller know how to compute Rx checksum. */
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
(if_getcapenable(sc_if->msk_ifp) & IFCAP_RXCSUM) != 0) {
#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
ETHER_HDR_LEN);
rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
#endif
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
ETHER_HDR_LEN);
rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
nbuf--;
}
for (i = 0; i < nbuf; i++) {
if (msk_jumbo_newbuf(sc_if, prod) != 0)
return (ENOBUFS);
MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
}
bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_cdata.msk_jumbo_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Update prefetch unit. */
sc_if->msk_cdata.msk_rx_prod = prod;
CSR_WRITE_2(sc_if->msk_softc,
Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
(sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
MSK_JUMBO_RX_RING_CNT);
if (msk_rx_fill(sc_if, 1) != 0)
return (ENOBUFS);
return (0);
}
static void
msk_init_tx_ring(struct msk_if_softc *sc_if)
{
struct msk_ring_data *rd;
struct msk_txdesc *txd;
int i;
sc_if->msk_cdata.msk_tso_mtu = 0;
sc_if->msk_cdata.msk_last_csum = 0;
sc_if->msk_cdata.msk_tx_prod = 0;
sc_if->msk_cdata.msk_tx_cons = 0;
sc_if->msk_cdata.msk_tx_cnt = 0;
sc_if->msk_cdata.msk_tx_high_addr = 0;
rd = &sc_if->msk_rdata;
bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
for (i = 0; i < MSK_TX_RING_CNT; i++) {
txd = &sc_if->msk_cdata.msk_txdesc[i];
txd->tx_m = NULL;
txd->tx_le = &rd->msk_tx_ring[i];
}
bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
sc_if->msk_cdata.msk_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static __inline void
msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
{
struct msk_rx_desc *rx_le;
struct msk_rxdesc *rxd;
struct mbuf *m;
#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
rx_le = rxd->rx_le;
rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
MSK_INC(idx, MSK_RX_RING_CNT);
#endif
rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
m = rxd->rx_m;
rx_le = rxd->rx_le;
rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
}
static __inline void
msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
{
struct msk_rx_desc *rx_le;
struct msk_rxdesc *rxd;
struct mbuf *m;
#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
rx_le = rxd->rx_le;
rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
#endif
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
m = rxd->rx_m;
rx_le = rxd->rx_le;
rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
}
static int
msk_newbuf(struct msk_if_softc *sc_if, int idx)
{
struct msk_rx_desc *rx_le;
struct msk_rxdesc *rxd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
m_adj(m, ETHER_ALIGN);
#ifndef __NO_STRICT_ALIGNMENT
else
m_adj(m, MSK_RX_BUF_ALIGN);
#endif
if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
BUS_DMA_NOWAIT) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
#ifdef MSK_64BIT_DMA
rx_le = rxd->rx_le;
rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
MSK_INC(idx, MSK_RX_RING_CNT);
rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
#endif
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
rxd->rx_m = NULL;
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
sc_if->msk_cdata.msk_rx_sparemap = map;
bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
rx_le = rxd->rx_le;
rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
rx_le->msk_control =
htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
return (0);
}
static int
msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
{
struct msk_rx_desc *rx_le;
struct msk_rxdesc *rxd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
m_adj(m, ETHER_ALIGN);
#ifndef __NO_STRICT_ALIGNMENT
else
m_adj(m, MSK_RX_BUF_ALIGN);
#endif
if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
BUS_DMA_NOWAIT) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
#ifdef MSK_64BIT_DMA
rx_le = rxd->rx_le;
rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
#endif
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
rxd->rx_dmamap);
rxd->rx_m = NULL;
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
rx_le = rxd->rx_le;
rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
rx_le->msk_control =
htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
return (0);
}
/*
* Set media options.
*/
static int
msk_mediachange(if_t ifp)
{
struct msk_if_softc *sc_if;
struct mii_data *mii;
int error;
sc_if = if_getsoftc(ifp);
MSK_IF_LOCK(sc_if);
mii = device_get_softc(sc_if->msk_miibus);
error = mii_mediachg(mii);
MSK_IF_UNLOCK(sc_if);
return (error);
}
/*
* Report current media status.
*/
static void
msk_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct msk_if_softc *sc_if;
struct mii_data *mii;
sc_if = if_getsoftc(ifp);
MSK_IF_LOCK(sc_if);
if ((if_getflags(ifp) & IFF_UP) == 0) {
MSK_IF_UNLOCK(sc_if);
return;
}
mii = device_get_softc(sc_if->msk_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
MSK_IF_UNLOCK(sc_if);
}
static int
msk_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct msk_if_softc *sc_if;
struct ifreq *ifr;
struct mii_data *mii;
int error, mask, reinit;
sc_if = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch(command) {
case SIOCSIFMTU:
MSK_IF_LOCK(sc_if);
if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
if (ifr->ifr_mtu > ETHERMTU) {
if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
error = EINVAL;
MSK_IF_UNLOCK(sc_if);
break;
}
if ((sc_if->msk_flags &
MSK_FLAG_JUMBO_NOCSUM) != 0) {
if_sethwassistbits(ifp, 0,
MSK_CSUM_FEATURES | CSUM_TSO);
if_setcapenablebit(ifp, 0,
IFCAP_TSO4 | IFCAP_TXCSUM);
VLAN_CAPABILITIES(ifp);
}
}
if_setmtu(ifp, ifr->ifr_mtu);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
msk_init_locked(sc_if);
}
}
MSK_IF_UNLOCK(sc_if);
break;
case SIOCSIFFLAGS:
MSK_IF_LOCK(sc_if);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc_if->msk_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
msk_rxfilter(sc_if);
else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
msk_init_locked(sc_if);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
msk_stop(sc_if);
sc_if->msk_if_flags = if_getflags(ifp);
MSK_IF_UNLOCK(sc_if);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
MSK_IF_LOCK(sc_if);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
msk_rxfilter(sc_if);
MSK_IF_UNLOCK(sc_if);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc_if->msk_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
reinit = 0;
MSK_IF_LOCK(sc_if);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, MSK_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, MSK_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
reinit = 1;
}
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(IFCAP_VLAN_HWCSUM & if_getcapabilities(ifp)) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_TSO4) != 0 &&
(IFCAP_TSO4 & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(IFCAP_VLAN_HWTSO & if_getcapabilities(ifp)) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((IFCAP_VLAN_HWTAGGING & if_getcapenable(ifp)) == 0)
if_setcapenablebit(ifp, 0,
IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
msk_setvlan(sc_if, ifp);
}
if (if_getmtu(ifp) > ETHERMTU &&
(sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
if_sethwassistbits(ifp, 0, (MSK_CSUM_FEATURES | CSUM_TSO));
if_setcapenablebit(ifp, 0, (IFCAP_TSO4 | IFCAP_TXCSUM));
}
VLAN_CAPABILITIES(ifp);
if (reinit > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
msk_init_locked(sc_if);
}
MSK_IF_UNLOCK(sc_if);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
mskc_probe(device_t dev)
{
const struct msk_product *mp;
uint16_t vendor, devid;
int i;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
mp = msk_products;
for (i = 0; i < nitems(msk_products); i++, mp++) {
if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
device_set_desc(dev, mp->msk_name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
mskc_setup_rambuffer(struct msk_softc *sc)
{
int next;
int i;
/* Get adapter SRAM size. */
sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
if (bootverbose)
device_printf(sc->msk_dev,
"RAM buffer size : %dKB\n", sc->msk_ramsize);
if (sc->msk_ramsize == 0)
return (0);
sc->msk_pflags |= MSK_FLAG_RAMBUF;
/*
* Give receiver 2/3 of memory and round down to the multiple
* of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
* of 1024.
*/
sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
for (i = 0, next = 0; i < sc->msk_num_port; i++) {
sc->msk_rxqstart[i] = next;
sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
next = sc->msk_rxqend[i] + 1;
sc->msk_txqstart[i] = next;
sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
next = sc->msk_txqend[i] + 1;
if (bootverbose) {
device_printf(sc->msk_dev,
"Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
sc->msk_rxqend[i]);
device_printf(sc->msk_dev,
"Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
sc->msk_txqsize / 1024, sc->msk_txqstart[i],
sc->msk_txqend[i]);
}
}
return (0);
}
static void
msk_phy_power(struct msk_softc *sc, int mode)
{
uint32_t our, val;
int i;
switch (mode) {
case MSK_PHY_POWERUP:
/* Switch power to VCC (WA for VAUX problem). */
CSR_WRITE_1(sc, B0_POWER_CTRL,
PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
/* Disable Core Clock Division, set Clock Select to 0. */
CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
val = 0;
if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
/* Enable bits are inverted. */
val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
}
/*
* Enable PCI & Core Clock, enable clock gating for both Links.
*/
CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
/* Deassert Low Power for 1st PHY. */
our |= PCI_Y2_PHY1_COMA;
if (sc->msk_num_port > 1)
our |= PCI_Y2_PHY2_COMA;
}
}
if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
sc->msk_hw_id == CHIP_ID_YUKON_EX ||
sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
val &= (PCI_FORCE_ASPM_REQUEST |
PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
PCI_ASPM_CLKRUN_REQUEST);
/* Set all bits to 0 except bits 15..12. */
CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
val &= PCI_CTL_TIM_VMAIN_AV_MSK;
CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
/*
* Disable status race, workaround for
* Yukon EC Ultra & Yukon EX.
*/
val = CSR_READ_4(sc, B2_GP_IO);
val |= GLB_GPIO_STAT_RACE_DIS;
CSR_WRITE_4(sc, B2_GP_IO, val);
CSR_READ_4(sc, B2_GP_IO);
}
/* Release PHY from PowerDown/COMA mode. */
CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
for (i = 0; i < sc->msk_num_port; i++) {
CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
GMLC_RST_SET);
CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
GMLC_RST_CLR);
}
break;
case MSK_PHY_POWERDOWN:
val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
val &= ~PCI_Y2_PHY1_COMA;
if (sc->msk_num_port > 1)
val &= ~PCI_Y2_PHY2_COMA;
}
CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
/* Enable bits are inverted. */
val = 0;
}
/*
* Disable PCI & Core Clock, disable clock gating for
* both Links.
*/
CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
CSR_WRITE_1(sc, B0_POWER_CTRL,
PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
break;
default:
break;
}
}
static void
mskc_reset(struct msk_softc *sc)
{
bus_addr_t addr;
uint16_t status;
uint32_t val;
int i, initram;
/* Disable ASF. */
if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
/* Clear AHB bridge & microcontroller reset. */
status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
Y2_ASF_HCU_CCSR_CPU_RST_MODE);
/* Clear ASF microcontroller state. */
status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
} else
CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
/*
* Since we disabled ASF, S/W reset is required for
* Power Management.
*/
CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
}
/* Clear all error bits in the PCI status register. */
status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_write_config(sc->msk_dev, PCIR_STATUS, status |
PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
switch (sc->msk_bustype) {
case MSK_PEX_BUS:
/* Clear all PEX errors. */
CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
if ((val & PEX_RX_OV) != 0) {
sc->msk_intrmask &= ~Y2_IS_HW_ERR;
sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
}
break;
case MSK_PCI_BUS:
case MSK_PCIX_BUS:
/* Set Cache Line Size to 2(8bytes) if configured to 0. */
val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
if (val == 0)
pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
if (sc->msk_bustype == MSK_PCIX_BUS) {
/* Set Cache Line Size opt. */
val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
val |= PCI_CLS_OPT;
pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
}
break;
}
/* Set PHY power state. */
msk_phy_power(sc, MSK_PHY_POWERUP);
/* Reset GPHY/GMAC Control */
for (i = 0; i < sc->msk_num_port; i++) {
/* GPHY Control reset. */
CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
/* GMAC Control reset. */
CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
GMC_BYP_RETR_ON);
}
if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
/* Disable PCIe PHY powerdown(reg 0x80, bit7). */
CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
}
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* LED On. */
CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
/* Clear TWSI IRQ. */
CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
/* Turn off hardware timer. */
CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
/* Turn off descriptor polling. */
CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
/* Turn off time stamps. */
CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
initram = 0;
if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
sc->msk_hw_id == CHIP_ID_YUKON_EC ||
sc->msk_hw_id == CHIP_ID_YUKON_FE)
initram++;
/* Configure timeout values. */
for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
MSK_RI_TO_53);
CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
MSK_RI_TO_53);
}
/* Disable all interrupts. */
CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
CSR_READ_4(sc, B0_HWE_IMSK);
CSR_WRITE_4(sc, B0_IMSK, 0);
CSR_READ_4(sc, B0_IMSK);
/*
* On dual port PCI-X card, there is an problem where status
* can be received out of order due to split transactions.
*/
if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
uint16_t pcix_cmd;
pcix_cmd = pci_read_config(sc->msk_dev,
sc->msk_pcixcap + PCIXR_COMMAND, 2);
/* Clear Max Outstanding Split Transactions. */
pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_write_config(sc->msk_dev,
sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (sc->msk_expcap != 0) {
/* Change Max. Read Request Size to 2048 bytes. */
if (pci_get_max_read_req(sc->msk_dev) == 512)
pci_set_max_read_req(sc->msk_dev, 2048);
}
/* Clear status list. */
bzero(sc->msk_stat_ring,
sizeof(struct msk_stat_desc) * sc->msk_stat_count);
sc->msk_stat_cons = 0;
bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
/* Set the status list base address. */
addr = sc->msk_stat_ring_paddr;
CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
/* Set the status list last index. */
CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
/* WA for dev. #4.3 */
CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
/* WA for dev. #4.18 */
CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
} else {
CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
else
CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
}
/*
* Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
*/
CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
/* Enable status unit. */
CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
}
static int
msk_probe(device_t dev)
{
struct msk_softc *sc;
sc = device_get_softc(device_get_parent(dev));
/*
* Not much to do here. We always know there will be
* at least one GMAC present, and if there are two,
* mskc_attach() will create a second device instance
* for us.
*/
device_set_descf(dev,
"Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
sc->msk_hw_rev);
return (BUS_PROBE_DEFAULT);
}
static int
msk_attach(device_t dev)
{
struct msk_softc *sc;
struct msk_if_softc *sc_if;
if_t ifp;
struct msk_mii_data *mmd;
int i, port, error;
uint8_t eaddr[6];
if (dev == NULL)
return (EINVAL);
error = 0;
sc_if = device_get_softc(dev);
sc = device_get_softc(device_get_parent(dev));
mmd = device_get_ivars(dev);
port = mmd->port;
sc_if->msk_if_dev = dev;
sc_if->msk_port = port;
sc_if->msk_softc = sc;
sc_if->msk_flags = sc->msk_pflags;
sc->msk_if[port] = sc_if;
/* Setup Tx/Rx queue register offsets. */
if (port == MSK_PORT_A) {
sc_if->msk_txq = Q_XA1;
sc_if->msk_txsq = Q_XS1;
sc_if->msk_rxq = Q_R1;
} else {
sc_if->msk_txq = Q_XA2;
sc_if->msk_txsq = Q_XS2;
sc_if->msk_rxq = Q_R2;
}
callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
msk_sysctl_node(sc_if);
if ((error = msk_txrx_dma_alloc(sc_if)) != 0)
goto fail;
msk_rx_dma_jalloc(sc_if);
ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc_if);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_TSO4);
/*
* Enable Rx checksum offloading if controller supports
* new descriptor formant and controller is not Yukon XL.
*/
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
sc->msk_hw_id != CHIP_ID_YUKON_XL)
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
(sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
if_sethwassist(ifp, MSK_CSUM_FEATURES | CSUM_TSO);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setioctlfn(ifp, msk_ioctl);
if_setstartfn(ifp, msk_start);
if_setinitfn(ifp, msk_init);
if_setsendqlen(ifp, MSK_TX_RING_CNT - 1);
if_setsendqready(ifp);
/*
* Get station address for this interface. Note that
* dual port cards actually come with three station
* addresses: one for each port, plus an extra. The
* extra one is used by the SysKonnect driver software
* as a 'virtual' station address for when both ports
* are operating in failover mode. Currently we don't
* use this extra address.
*/
MSK_IF_LOCK(sc_if);
for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
/*
* Call MI attach routine. Can't hold locks when calling into ether_*.
*/
MSK_IF_UNLOCK(sc_if);
ether_ifattach(ifp, eaddr);
MSK_IF_LOCK(sc_if);
/* VLAN capability setup */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
/*
* Due to Tx checksum offload hardware bugs, msk(4) manually
* computes checksum for short frames. For VLAN tagged frames
* this workaround does not work so disable checksum offload
* for VLAN interface.
*/
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO, 0);
/*
* Enable Rx checksum offloading for VLAN tagged frames
* if controller support new descriptor format.
*/
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
(sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Disable RX checksum offloading on controllers that don't use
* new descriptor format but give chance to enable it.
*/
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM);
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* Do miibus setup.
*/
MSK_IF_UNLOCK(sc_if);
error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
mmd->mii_flags);
if (error != 0) {
device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
ether_ifdetach(ifp);
error = ENXIO;
goto fail;
}
fail:
if (error != 0) {
/* Access should be ok even though lock has been dropped */
sc->msk_if[port] = NULL;
msk_detach(dev);
}
return (error);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
mskc_attach(device_t dev)
{
struct msk_softc *sc;
struct msk_mii_data *mmd;
int error, msic, msir, reg;
sc = device_get_softc(dev);
sc->msk_dev = dev;
mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
/* Allocate I/O resource */
#ifdef MSK_USEIOSPACE
sc->msk_res_spec = msk_res_spec_io;
#else
sc->msk_res_spec = msk_res_spec_mem;
#endif
sc->msk_irq_spec = msk_irq_spec_legacy;
error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
if (error) {
if (sc->msk_res_spec == msk_res_spec_mem)
sc->msk_res_spec = msk_res_spec_io;
else
sc->msk_res_spec = msk_res_spec_mem;
error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
if (error) {
device_printf(dev, "couldn't allocate %s resources\n",
sc->msk_res_spec == msk_res_spec_mem ? "memory" :
"I/O");
mtx_destroy(&sc->msk_mtx);
return (ENXIO);
}
}
/* Enable all clocks before accessing any registers. */
CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
/* Bail out if chip is not recognized. */
if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
sc->msk_hw_id, sc->msk_hw_rev);
mtx_destroy(&sc->msk_mtx);
return (ENXIO);
}
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "process_limit",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
"max number of Rx events to process");
sc->msk_process_limit = MSK_PROC_DEFAULT;
error = resource_int_value(device_get_name(dev), device_get_unit(dev),
"process_limit", &sc->msk_process_limit);
if (error == 0) {
if (sc->msk_process_limit < MSK_PROC_MIN ||
sc->msk_process_limit > MSK_PROC_MAX) {
device_printf(dev, "process_limit value out of range; "
"using default: %d\n", MSK_PROC_DEFAULT);
sc->msk_process_limit = MSK_PROC_DEFAULT;
}
}
sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
"Maximum number of time to delay interrupts");
resource_int_value(device_get_name(dev), device_get_unit(dev),
"int_holdoff", &sc->msk_int_holdoff);
sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
/* Check number of MACs. */
sc->msk_num_port = 1;
if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
CFG_DUAL_MAC_MSK) {
if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
sc->msk_num_port++;
}
/* Check bus type. */
if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0) {
sc->msk_bustype = MSK_PEX_BUS;
sc->msk_expcap = reg;
} else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, &reg) == 0) {
sc->msk_bustype = MSK_PCIX_BUS;
sc->msk_pcixcap = reg;
} else
sc->msk_bustype = MSK_PCI_BUS;
switch (sc->msk_hw_id) {
case CHIP_ID_YUKON_EC:
sc->msk_clock = 125; /* 125 MHz */
sc->msk_pflags |= MSK_FLAG_JUMBO;
break;
case CHIP_ID_YUKON_EC_U:
sc->msk_clock = 125; /* 125 MHz */
sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
break;
case CHIP_ID_YUKON_EX:
sc->msk_clock = 125; /* 125 MHz */
sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
MSK_FLAG_AUTOTX_CSUM;
/*
* Yukon Extreme seems to have silicon bug for
* automatic Tx checksum calculation capability.
*/
if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
/*
* Yukon Extreme A0 could not use store-and-forward
* for jumbo frames, so disable Tx checksum
* offloading for jumbo frames.
*/
if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
break;
case CHIP_ID_YUKON_FE:
sc->msk_clock = 100; /* 100 MHz */
sc->msk_pflags |= MSK_FLAG_FASTETHER;
break;
case CHIP_ID_YUKON_FE_P:
sc->msk_clock = 50; /* 50 MHz */
sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
MSK_FLAG_AUTOTX_CSUM;
if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
/*
* XXX
* FE+ A0 has status LE writeback bug so msk(4)
* does not rely on status word of received frame
* in msk_rxeof() which in turn disables all
* hardware assistance bits reported by the status
* word as well as validity of the received frame.
* Just pass received frames to upper stack with
* minimal test and let upper stack handle them.
*/
sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
}
break;
case CHIP_ID_YUKON_XL:
sc->msk_clock = 156; /* 156 MHz */
sc->msk_pflags |= MSK_FLAG_JUMBO;
break;
case CHIP_ID_YUKON_SUPR:
sc->msk_clock = 125; /* 125 MHz */
sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
MSK_FLAG_AUTOTX_CSUM;
break;
case CHIP_ID_YUKON_UL_2:
sc->msk_clock = 125; /* 125 MHz */
sc->msk_pflags |= MSK_FLAG_JUMBO;
break;
case CHIP_ID_YUKON_OPT:
sc->msk_clock = 125; /* 125 MHz */
sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
break;
default:
sc->msk_clock = 156; /* 156 MHz */
break;
}
/* Allocate IRQ resources. */
msic = pci_msi_count(dev);
if (bootverbose)
device_printf(dev, "MSI count : %d\n", msic);
if (legacy_intr != 0)
msi_disable = 1;
if (msi_disable == 0 && msic > 0) {
msir = 1;
if (pci_alloc_msi(dev, &msir) == 0) {
if (msir == 1) {
sc->msk_pflags |= MSK_FLAG_MSI;
sc->msk_irq_spec = msk_irq_spec_msi;
} else
pci_release_msi(dev);
}
}
error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
if (error) {
device_printf(dev, "couldn't allocate IRQ resources\n");
goto fail;
}
if ((error = msk_status_dma_alloc(sc)) != 0)
goto fail;
/* Set base interrupt mask. */
sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
/* Reset the adapter. */
mskc_reset(sc);
if ((error = mskc_setup_rambuffer(sc)) != 0)
goto fail;
sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
if (sc->msk_devs[MSK_PORT_A] == NULL) {
device_printf(dev, "failed to add child for PORT_A\n");
error = ENXIO;
goto fail;
}
mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
mmd->port = MSK_PORT_A;
mmd->pmd = sc->msk_pmd;
mmd->mii_flags |= MIIF_DOPAUSE;
if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
mmd->mii_flags |= MIIF_HAVEFIBER;
if (sc->msk_pmd == 'P')
mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
if (sc->msk_num_port > 1) {
sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
if (sc->msk_devs[MSK_PORT_B] == NULL) {
device_printf(dev, "failed to add child for PORT_B\n");
error = ENXIO;
goto fail;
}
mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
M_ZERO);
mmd->port = MSK_PORT_B;
mmd->pmd = sc->msk_pmd;
if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
mmd->mii_flags |= MIIF_HAVEFIBER;
if (sc->msk_pmd == 'P')
mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
}
error = bus_generic_attach(dev);
if (error) {
device_printf(dev, "failed to attach port(s)\n");
goto fail;
}
/* Hook interrupt last to avoid having to lock softc. */
error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
if (error != 0) {
device_printf(dev, "couldn't set up interrupt handler\n");
goto fail;
}
fail:
if (error != 0)
mskc_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
msk_detach(device_t dev)
{
struct msk_softc *sc;
struct msk_if_softc *sc_if;
if_t ifp;
sc_if = device_get_softc(dev);
KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
("msk mutex not initialized in msk_detach"));
MSK_IF_LOCK(sc_if);
ifp = sc_if->msk_ifp;
if (device_is_attached(dev)) {
/* XXX */
sc_if->msk_flags |= MSK_FLAG_DETACH;
msk_stop(sc_if);
/* Can't hold locks while calling detach. */
MSK_IF_UNLOCK(sc_if);
callout_drain(&sc_if->msk_tick_ch);
if (ifp)
ether_ifdetach(ifp);
MSK_IF_LOCK(sc_if);
}
/*
* We're generally called from mskc_detach() which is using
* device_delete_child() to get to here. It's already trashed
* miibus for us, so don't do it here or we'll panic.
*
* if (sc_if->msk_miibus != NULL) {
* device_delete_child(dev, sc_if->msk_miibus);
* sc_if->msk_miibus = NULL;
* }
*/
msk_rx_dma_jfree(sc_if);
msk_txrx_dma_free(sc_if);
bus_generic_detach(dev);
sc = sc_if->msk_softc;
sc->msk_if[sc_if->msk_port] = NULL;
MSK_IF_UNLOCK(sc_if);
if (ifp)
if_free(ifp);
return (0);
}
static int
mskc_detach(device_t dev)
{
struct msk_softc *sc;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
if (device_is_alive(dev)) {
if (sc->msk_devs[MSK_PORT_A] != NULL) {
free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
M_DEVBUF);
device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
}
if (sc->msk_devs[MSK_PORT_B] != NULL) {
free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
M_DEVBUF);
device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
}
bus_generic_detach(dev);
}
/* Disable all interrupts. */
CSR_WRITE_4(sc, B0_IMSK, 0);
CSR_READ_4(sc, B0_IMSK);
CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
CSR_READ_4(sc, B0_HWE_IMSK);
/* LED Off. */
CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
/* Put hardware reset. */
CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
msk_status_dma_free(sc);
if (sc->msk_intrhand) {
bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
sc->msk_intrhand = NULL;
}
bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
pci_release_msi(dev);
bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
mtx_destroy(&sc->msk_mtx);
return (0);
}
static bus_dma_tag_t
mskc_get_dma_tag(device_t bus, device_t child __unused)
{
return (bus_get_dma_tag(bus));
}
struct msk_dmamap_arg {
bus_addr_t msk_busaddr;
};
static void
msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct msk_dmamap_arg *ctx;
if (error != 0)
return;
ctx = arg;
ctx->msk_busaddr = segs[0].ds_addr;
}
/* Create status DMA region. */
static int
msk_status_dma_alloc(struct msk_softc *sc)
{
struct msk_dmamap_arg ctx;
bus_size_t stat_sz;
int count, error;
/*
* It seems controller requires number of status LE entries
* is power of 2 and the maximum number of status LE entries
* is 4096. For dual-port controllers, the number of status
* LE entries should be large enough to hold both port's
* status updates.
*/
count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
count = imin(4096, roundup2(count, 1024));
sc->msk_stat_count = count;
stat_sz = count * sizeof(struct msk_stat_desc);
error = bus_dma_tag_create(
bus_get_dma_tag(sc->msk_dev), /* parent */
MSK_STAT_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
stat_sz, /* maxsize */
1, /* nsegments */
stat_sz, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->msk_stat_tag);
if (error != 0) {
device_printf(sc->msk_dev,
"failed to create status DMA tag\n");
return (error);
}
/* Allocate DMA'able memory and load the DMA map for status ring. */
error = bus_dmamem_alloc(sc->msk_stat_tag,
(void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
BUS_DMA_ZERO, &sc->msk_stat_map);
if (error != 0) {
device_printf(sc->msk_dev,
"failed to allocate DMA'able memory for status ring\n");
return (error);
}
ctx.msk_busaddr = 0;
error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->msk_dev,
"failed to load DMA'able memory for status ring\n");
return (error);
}
sc->msk_stat_ring_paddr = ctx.msk_busaddr;
return (0);
}
static void
msk_status_dma_free(struct msk_softc *sc)
{
/* Destroy status block. */
if (sc->msk_stat_tag) {
if (sc->msk_stat_ring_paddr) {
bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
sc->msk_stat_ring_paddr = 0;
}
if (sc->msk_stat_ring) {
bus_dmamem_free(sc->msk_stat_tag,
sc->msk_stat_ring, sc->msk_stat_map);
sc->msk_stat_ring = NULL;
}
bus_dma_tag_destroy(sc->msk_stat_tag);
sc->msk_stat_tag = NULL;
}
}
static int
msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
{
struct msk_dmamap_arg ctx;
struct msk_txdesc *txd;
struct msk_rxdesc *rxd;
bus_size_t rxalign;
int error, i;
/* Create parent DMA tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->msk_cdata.msk_parent_tag);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create parent DMA tag\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
MSK_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MSK_TX_RING_SZ, /* maxsize */
1, /* nsegments */
MSK_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->msk_cdata.msk_tx_ring_tag);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create Tx ring DMA tag\n");
goto fail;
}
/* Create tag for Rx ring. */
error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
MSK_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MSK_RX_RING_SZ, /* maxsize */
1, /* nsegments */
MSK_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->msk_cdata.msk_rx_ring_tag);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create Rx ring DMA tag\n");
goto fail;
}
/* Create tag for Tx buffers. */
error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MSK_TSO_MAXSIZE, /* maxsize */
MSK_MAXTXSEGS, /* nsegments */
MSK_TSO_MAXSGSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->msk_cdata.msk_tx_tag);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create Tx DMA tag\n");
goto fail;
}
rxalign = 1;
/*
* Workaround hardware hang which seems to happen when Rx buffer
* is not aligned on multiple of FIFO word(8 bytes).
*/
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
rxalign = MSK_RX_BUF_ALIGN;
/* Create tag for Rx buffers. */
error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
rxalign, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->msk_cdata.msk_rx_tag);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create Rx DMA tag\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
(void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to allocate DMA'able memory for Tx ring\n");
goto fail;
}
ctx.msk_busaddr = 0;
error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to load DMA'able memory for Tx ring\n");
goto fail;
}
sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx ring. */
error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
(void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to allocate DMA'able memory for Rx ring\n");
goto fail;
}
ctx.msk_busaddr = 0;
error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to load DMA'able memory for Rx ring\n");
goto fail;
}
sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
/* Create DMA maps for Tx buffers. */
for (i = 0; i < MSK_TX_RING_CNT; i++) {
txd = &sc_if->msk_cdata.msk_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create Tx dmamap\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
&sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create spare Rx dmamap\n");
goto fail;
}
for (i = 0; i < MSK_RX_RING_CNT; i++) {
rxd = &sc_if->msk_cdata.msk_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create Rx dmamap\n");
goto fail;
}
}
fail:
return (error);
}
static int
msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
{
struct msk_dmamap_arg ctx;
struct msk_rxdesc *jrxd;
bus_size_t rxalign;
int error, i;
if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
device_printf(sc_if->msk_if_dev,
"disabling jumbo frame support\n");
return (0);
}
/* Create tag for jumbo Rx ring. */
error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
MSK_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MSK_JUMBO_RX_RING_SZ, /* maxsize */
1, /* nsegments */
MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create jumbo Rx ring DMA tag\n");
goto jumbo_fail;
}
rxalign = 1;
/*
* Workaround hardware hang which seems to happen when Rx buffer
* is not aligned on multiple of FIFO word(8 bytes).
*/
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
rxalign = MSK_RX_BUF_ALIGN;
/* Create tag for jumbo Rx buffers. */
error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
rxalign, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUM9BYTES, /* maxsize */
1, /* nsegments */
MJUM9BYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->msk_cdata.msk_jumbo_rx_tag);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create jumbo Rx DMA tag\n");
goto jumbo_fail;
}
/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
(void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc_if->msk_cdata.msk_jumbo_rx_ring_map);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to allocate DMA'able memory for jumbo Rx ring\n");
goto jumbo_fail;
}
ctx.msk_busaddr = 0;
error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_cdata.msk_jumbo_rx_ring_map,
sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to load DMA'able memory for jumbo Rx ring\n");
goto jumbo_fail;
}
sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
/* Create DMA maps for jumbo Rx buffers. */
if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
&sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create spare jumbo Rx dmamap\n");
goto jumbo_fail;
}
for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
jrxd->rx_m = NULL;
jrxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
&jrxd->rx_dmamap);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to create jumbo Rx dmamap\n");
goto jumbo_fail;
}
}
return (0);
jumbo_fail:
msk_rx_dma_jfree(sc_if);
device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
"due to resource shortage\n");
sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
return (error);
}
static void
msk_txrx_dma_free(struct msk_if_softc *sc_if)
{
struct msk_txdesc *txd;
struct msk_rxdesc *rxd;
int i;
/* Tx ring. */
if (sc_if->msk_cdata.msk_tx_ring_tag) {
if (sc_if->msk_rdata.msk_tx_ring_paddr)
bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
sc_if->msk_cdata.msk_tx_ring_map);
if (sc_if->msk_rdata.msk_tx_ring)
bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
sc_if->msk_rdata.msk_tx_ring,
sc_if->msk_cdata.msk_tx_ring_map);
sc_if->msk_rdata.msk_tx_ring = NULL;
sc_if->msk_rdata.msk_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
sc_if->msk_cdata.msk_tx_ring_tag = NULL;
}
/* Rx ring. */
if (sc_if->msk_cdata.msk_rx_ring_tag) {
if (sc_if->msk_rdata.msk_rx_ring_paddr)
bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
sc_if->msk_cdata.msk_rx_ring_map);
if (sc_if->msk_rdata.msk_rx_ring)
bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
sc_if->msk_rdata.msk_rx_ring,
sc_if->msk_cdata.msk_rx_ring_map);
sc_if->msk_rdata.msk_rx_ring = NULL;
sc_if->msk_rdata.msk_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
sc_if->msk_cdata.msk_rx_ring_tag = NULL;
}
/* Tx buffers. */
if (sc_if->msk_cdata.msk_tx_tag) {
for (i = 0; i < MSK_TX_RING_CNT; i++) {
txd = &sc_if->msk_cdata.msk_txdesc[i];
if (txd->tx_dmamap) {
bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
sc_if->msk_cdata.msk_tx_tag = NULL;
}
/* Rx buffers. */
if (sc_if->msk_cdata.msk_rx_tag) {
for (i = 0; i < MSK_RX_RING_CNT; i++) {
rxd = &sc_if->msk_cdata.msk_rxdesc[i];
if (rxd->rx_dmamap) {
bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc_if->msk_cdata.msk_rx_sparemap) {
bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
sc_if->msk_cdata.msk_rx_sparemap);
sc_if->msk_cdata.msk_rx_sparemap = 0;
}
bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
sc_if->msk_cdata.msk_rx_tag = NULL;
}
if (sc_if->msk_cdata.msk_parent_tag) {
bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
sc_if->msk_cdata.msk_parent_tag = NULL;
}
}
static void
msk_rx_dma_jfree(struct msk_if_softc *sc_if)
{
struct msk_rxdesc *jrxd;
int i;
/* Jumbo Rx ring. */
if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
if (sc_if->msk_rdata.msk_jumbo_rx_ring_paddr)
bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_cdata.msk_jumbo_rx_ring_map);
if (sc_if->msk_rdata.msk_jumbo_rx_ring)
bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_rdata.msk_jumbo_rx_ring,
sc_if->msk_cdata.msk_jumbo_rx_ring_map);
sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
}
/* Jumbo Rx buffers. */
if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
if (jrxd->rx_dmamap) {
bus_dmamap_destroy(
sc_if->msk_cdata.msk_jumbo_rx_tag,
jrxd->rx_dmamap);
jrxd->rx_dmamap = NULL;
}
}
if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
sc_if->msk_cdata.msk_jumbo_rx_sparemap);
sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
}
bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
}
}
static int
msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
{
struct msk_txdesc *txd, *txd_last;
struct msk_tx_desc *tx_le;
struct mbuf *m;
bus_dmamap_t map;
bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
uint32_t control, csum, prod, si;
uint16_t offset, tcp_offset, tso_mtu;
int error, i, nseg, tso;
MSK_IF_LOCK_ASSERT(sc_if);
tcp_offset = offset = 0;
m = *m_head;
if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
(m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
(m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
/*
* Since mbuf has no protocol specific structure information
* in it we have to inspect protocol information here to
* setup TSO and checksum offload. I don't know why Marvell
* made a such decision in chip design because other GigE
* hardwares normally takes care of all these chores in
* hardware. However, TSO performance of Yukon II is very
* good such that it's worth to implement it.
*/
struct ether_header *eh;
struct ip *ip;
struct tcphdr *tcp;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
offset = sizeof(struct ether_header);
m = m_pullup(m, offset);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
/* Check if hardware VLAN insertion is off. */
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
offset = sizeof(struct ether_vlan_header);
m = m_pullup(m, offset);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
}
m = m_pullup(m, offset + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + offset);
offset += (ip->ip_hl << 2);
tcp_offset = offset;
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
m = m_pullup(m, offset + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
tcp = (struct tcphdr *)(mtod(m, char *) + offset);
offset += (tcp->th_off << 2);
} else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
(m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
(m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
/*
* It seems that Yukon II has Tx checksum offload bug
* for small TCP packets that's less than 60 bytes in
* size (e.g. TCP window probe packet, pure ACK packet).
* Common work around like padding with zeros to make
* the frame minimum ethernet frame size didn't work at
* all.
* Instead of disabling checksum offload completely we
* resort to S/W checksum routine when we encounter
* short TCP frames.
* Short UDP packets appear to be handled correctly by
* Yukon II. Also I assume this bug does not happen on
* controllers that use newer descriptor format or
* automatic Tx checksum calculation.
*/
m = m_pullup(m, offset + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*(uint16_t *)(m->m_data + offset +
m->m_pkthdr.csum_data) = in_cksum_skip(m,
m->m_pkthdr.len, offset);
m->m_pkthdr.csum_flags &= ~CSUM_TCP;
}
*m_head = m;
}
prod = sc_if->msk_cdata.msk_tx_prod;
txd = &sc_if->msk_cdata.msk_txdesc[prod];
txd_last = txd;
map = txd->tx_dmamap;
error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
*m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nseg == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check number of available descriptors. */
if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
(MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
return (ENOBUFS);
}
control = 0;
tso = 0;
tx_le = NULL;
/* Check TSO support. */
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
tso_mtu = m->m_pkthdr.tso_segsz;
else
tso_mtu = offset + m->m_pkthdr.tso_segsz;
if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_addr = htole32(tso_mtu);
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
else
tx_le->msk_control =
htole32(OP_LRGLEN | HW_OWNER);
sc_if->msk_cdata.msk_tx_cnt++;
MSK_INC(prod, MSK_TX_RING_CNT);
sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
}
tso++;
}
/* Check if we have a VLAN tag to insert. */
if ((m->m_flags & M_VLANTAG) != 0) {
if (tx_le == NULL) {
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_addr = htole32(0);
tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
htons(m->m_pkthdr.ether_vtag));
sc_if->msk_cdata.msk_tx_cnt++;
MSK_INC(prod, MSK_TX_RING_CNT);
} else {
tx_le->msk_control |= htole32(OP_VLAN |
htons(m->m_pkthdr.ether_vtag));
}
control |= INS_VLAN;
}
/* Check if we have to handle checksum offload. */
if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
control |= CALSUM;
else {
control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
control |= UDPTCP;
/* Checksum write position. */
csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
/* Checksum start position. */
csum |= (uint32_t)tcp_offset << 16;
if (csum != sc_if->msk_cdata.msk_last_csum) {
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_addr = htole32(csum);
tx_le->msk_control = htole32(1 << 16 |
(OP_TCPLISW | HW_OWNER));
sc_if->msk_cdata.msk_tx_cnt++;
MSK_INC(prod, MSK_TX_RING_CNT);
sc_if->msk_cdata.msk_last_csum = csum;
}
}
}
#ifdef MSK_64BIT_DMA
if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
sc_if->msk_cdata.msk_tx_high_addr) {
sc_if->msk_cdata.msk_tx_high_addr =
MSK_ADDR_HI(txsegs[0].ds_addr);
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
sc_if->msk_cdata.msk_tx_cnt++;
MSK_INC(prod, MSK_TX_RING_CNT);
}
#endif
si = prod;
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
if (tso == 0)
tx_le->msk_control = htole32(txsegs[0].ds_len | control |
OP_PACKET);
else
tx_le->msk_control = htole32(txsegs[0].ds_len | control |
OP_LARGESEND);
sc_if->msk_cdata.msk_tx_cnt++;
MSK_INC(prod, MSK_TX_RING_CNT);
for (i = 1; i < nseg; i++) {
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
#ifdef MSK_64BIT_DMA
if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
sc_if->msk_cdata.msk_tx_high_addr) {
sc_if->msk_cdata.msk_tx_high_addr =
MSK_ADDR_HI(txsegs[i].ds_addr);
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_addr =
htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
sc_if->msk_cdata.msk_tx_cnt++;
MSK_INC(prod, MSK_TX_RING_CNT);
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
}
#endif
tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
tx_le->msk_control = htole32(txsegs[i].ds_len | control |
OP_BUFFER | HW_OWNER);
sc_if->msk_cdata.msk_tx_cnt++;
MSK_INC(prod, MSK_TX_RING_CNT);
}
/* Update producer index. */
sc_if->msk_cdata.msk_tx_prod = prod;
/* Set EOP on the last descriptor. */
prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_control |= htole32(EOP);
/* Turn the first descriptor ownership to hardware. */
tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
tx_le->msk_control |= htole32(HW_OWNER);
txd = &sc_if->msk_cdata.msk_txdesc[prod];
map = txd_last->tx_dmamap;
txd_last->tx_dmamap = txd->tx_dmamap;
txd->tx_dmamap = map;
txd->tx_m = m;
/* Sync descriptors. */
bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
sc_if->msk_cdata.msk_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
msk_start(if_t ifp)
{
struct msk_if_softc *sc_if;
sc_if = if_getsoftc(ifp);
MSK_IF_LOCK(sc_if);
msk_start_locked(ifp);
MSK_IF_UNLOCK(sc_if);
}
static void
msk_start_locked(if_t ifp)
{
struct msk_if_softc *sc_if;
struct mbuf *m_head;
int enq;
sc_if = if_getsoftc(ifp);
MSK_IF_LOCK_ASSERT(sc_if);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp) &&
sc_if->msk_cdata.msk_tx_cnt <
(MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (msk_encap(sc_if, &m_head) != 0) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
/* Transmit */
CSR_WRITE_2(sc_if->msk_softc,
Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
sc_if->msk_cdata.msk_tx_prod);
/* Set a timeout in case the chip goes out to lunch. */
sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
}
}
static void
msk_watchdog(struct msk_if_softc *sc_if)
{
if_t ifp;
MSK_IF_LOCK_ASSERT(sc_if);
if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
return;
ifp = sc_if->msk_ifp;
if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
if (bootverbose)
if_printf(sc_if->msk_ifp, "watchdog timeout "
"(missed link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
msk_init_locked(sc_if);
return;
}
if_printf(ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
msk_init_locked(sc_if);
if (!if_sendq_empty(ifp))
msk_start_locked(ifp);
}
static int
mskc_shutdown(device_t dev)
{
struct msk_softc *sc;
int i;
sc = device_get_softc(dev);
MSK_LOCK(sc);
for (i = 0; i < sc->msk_num_port; i++) {
if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
((if_getdrvflags(sc->msk_if[i]->msk_ifp) &
IFF_DRV_RUNNING) != 0))
msk_stop(sc->msk_if[i]);
}
MSK_UNLOCK(sc);
/* Put hardware reset. */
CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
return (0);
}
static int
mskc_suspend(device_t dev)
{
struct msk_softc *sc;
int i;
sc = device_get_softc(dev);
MSK_LOCK(sc);
for (i = 0; i < sc->msk_num_port; i++) {
if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
((if_getdrvflags(sc->msk_if[i]->msk_ifp) &
IFF_DRV_RUNNING) != 0))
msk_stop(sc->msk_if[i]);
}
/* Disable all interrupts. */
CSR_WRITE_4(sc, B0_IMSK, 0);
CSR_READ_4(sc, B0_IMSK);
CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
CSR_READ_4(sc, B0_HWE_IMSK);
msk_phy_power(sc, MSK_PHY_POWERDOWN);
/* Put hardware reset. */
CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
sc->msk_pflags |= MSK_FLAG_SUSPEND;
MSK_UNLOCK(sc);
return (0);
}
static int
mskc_resume(device_t dev)
{
struct msk_softc *sc;
int i;
sc = device_get_softc(dev);
MSK_LOCK(sc);
CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
mskc_reset(sc);
for (i = 0; i < sc->msk_num_port; i++) {
if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
((if_getflags(sc->msk_if[i]->msk_ifp) & IFF_UP) != 0)) {
if_setdrvflagbits(sc->msk_if[i]->msk_ifp, 0,
IFF_DRV_RUNNING);
msk_init_locked(sc->msk_if[i]);
}
}
sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
MSK_UNLOCK(sc);
return (0);
}
#ifndef __NO_STRICT_ALIGNMENT
static __inline void
msk_fixup_rx(struct mbuf *m)
{
int i;
uint16_t *src, *dst;
src = mtod(m, uint16_t *);
dst = src - 3;
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
}
#endif
static __inline void
msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
{
struct ether_header *eh;
struct ip *ip;
struct udphdr *uh;
int32_t hlen, len, pktlen, temp32;
uint16_t csum, *opts;
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((control & CSS_IPV4_CSUM_OK) != 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
(control & (CSS_TCPUDP_CSUM_OK)) != 0) {
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
return;
}
/*
* Marvell Yukon controllers that support OP_RXCHKS has known
* to have various Rx checksum offloading bugs. These
* controllers can be configured to compute simple checksum
* at two different positions. So we can compute IP and TCP/UDP
* checksum at the same time. We intentionally have controller
* compute TCP/UDP checksum twice by specifying the same
* checksum start position and compare the result. If the value
* is different it would indicate the hardware logic was wrong.
*/
if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
if (bootverbose)
device_printf(sc_if->msk_if_dev,
"Rx checksum value mismatch!\n");
return;
}
pktlen = m->m_pkthdr.len;
if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
return;
eh = mtod(m, struct ether_header *);
if (eh->ether_type != htons(ETHERTYPE_IP))
return;
ip = (struct ip *)(eh + 1);
if (ip->ip_v != IPVERSION)
return;
hlen = ip->ip_hl << 2;
pktlen -= sizeof(struct ether_header);
if (hlen < sizeof(struct ip))
return;
if (ntohs(ip->ip_len) < hlen)
return;
if (ntohs(ip->ip_len) != pktlen)
return;
if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
return; /* can't handle fragmented packet. */
switch (ip->ip_p) {
case IPPROTO_TCP:
if (pktlen < (hlen + sizeof(struct tcphdr)))
return;
break;
case IPPROTO_UDP:
if (pktlen < (hlen + sizeof(struct udphdr)))
return;
uh = (struct udphdr *)((caddr_t)ip + hlen);
if (uh->uh_sum == 0)
return; /* no checksum */
break;
default:
return;
}
csum = bswap16(sc_if->msk_csum & 0xFFFF);
/* Checksum fixup for IP options. */
len = hlen - sizeof(struct ip);
if (len > 0) {
opts = (uint16_t *)(ip + 1);
for (; len > 0; len -= sizeof(uint16_t), opts++) {
temp32 = csum - *opts;
temp32 = (temp32 >> 16) + (temp32 & 65535);
csum = temp32 & 65535;
}
}
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
m->m_pkthdr.csum_data = csum;
}
static void
msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
int len)
{
struct mbuf *m;
if_t ifp;
struct msk_rxdesc *rxd;
int cons, rxlen;
ifp = sc_if->msk_ifp;
MSK_IF_LOCK_ASSERT(sc_if);
cons = sc_if->msk_cdata.msk_rx_cons;
do {
rxlen = status >> 16;
if ((status & GMR_FS_VLAN) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
rxlen -= ETHER_VLAN_ENCAP_LEN;
if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
/*
* For controllers that returns bogus status code
* just do minimal check and let upper stack
* handle this frame.
*/
if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
msk_discard_rxbuf(sc_if, cons);
break;
}
} else if (len > sc_if->msk_framesize ||
((status & GMR_FS_ANY_ERR) != 0) ||
((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
/* Don't count flow-control packet as errors. */
if ((status & GMR_FS_GOOD_FC) == 0)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
msk_discard_rxbuf(sc_if, cons);
break;
}
#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
MSK_RX_RING_CNT];
#else
rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
#endif
m = rxd->rx_m;
if (msk_newbuf(sc_if, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/* Reuse old buffer. */
msk_discard_rxbuf(sc_if, cons);
break;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len;
#ifndef __NO_STRICT_ALIGNMENT
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
msk_fixup_rx(m);
#endif
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
msk_rxcsum(sc_if, control, m);
/* Check for VLAN tagged packets. */
if ((status & GMR_FS_VLAN) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
m->m_flags |= M_VLANTAG;
}
MSK_IF_UNLOCK(sc_if);
if_input(ifp, m);
MSK_IF_LOCK(sc_if);
} while (0);
MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
}
static void
msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
int len)
{
struct mbuf *m;
if_t ifp;
struct msk_rxdesc *jrxd;
int cons, rxlen;
ifp = sc_if->msk_ifp;
MSK_IF_LOCK_ASSERT(sc_if);
cons = sc_if->msk_cdata.msk_rx_cons;
do {
rxlen = status >> 16;
if ((status & GMR_FS_VLAN) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
rxlen -= ETHER_VLAN_ENCAP_LEN;
if (len > sc_if->msk_framesize ||
((status & GMR_FS_ANY_ERR) != 0) ||
((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
/* Don't count flow-control packet as errors. */
if ((status & GMR_FS_GOOD_FC) == 0)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
msk_discard_jumbo_rxbuf(sc_if, cons);
break;
}
#ifdef MSK_64BIT_DMA
jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
MSK_JUMBO_RX_RING_CNT];
#else
jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
#endif
m = jrxd->rx_m;
if (msk_jumbo_newbuf(sc_if, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/* Reuse old buffer. */
msk_discard_jumbo_rxbuf(sc_if, cons);
break;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len;
#ifndef __NO_STRICT_ALIGNMENT
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
msk_fixup_rx(m);
#endif
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
msk_rxcsum(sc_if, control, m);
/* Check for VLAN tagged packets. */
if ((status & GMR_FS_VLAN) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
m->m_flags |= M_VLANTAG;
}
MSK_IF_UNLOCK(sc_if);
if_input(ifp, m);
MSK_IF_LOCK(sc_if);
} while (0);
MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
}
static void
msk_txeof(struct msk_if_softc *sc_if, int idx)
{
struct msk_txdesc *txd;
struct msk_tx_desc *cur_tx;
if_t ifp;
uint32_t control;
int cons, prog;
MSK_IF_LOCK_ASSERT(sc_if);
ifp = sc_if->msk_ifp;
bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
sc_if->msk_cdata.msk_tx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Go through our tx ring and free mbufs for those
* frames that have been sent.
*/
cons = sc_if->msk_cdata.msk_tx_cons;
prog = 0;
for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
if (sc_if->msk_cdata.msk_tx_cnt <= 0)
break;
prog++;
cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
control = le32toh(cur_tx->msk_control);
sc_if->msk_cdata.msk_tx_cnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if ((control & EOP) == 0)
continue;
txd = &sc_if->msk_cdata.msk_txdesc[cons];
bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
__func__));
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
if (prog > 0) {
sc_if->msk_cdata.msk_tx_cons = cons;
if (sc_if->msk_cdata.msk_tx_cnt == 0)
sc_if->msk_watchdog_timer = 0;
/* No need to sync LEs as we didn't update LEs. */
}
}
static void
msk_tick(void *xsc_if)
{
struct epoch_tracker et;
struct msk_if_softc *sc_if;
struct mii_data *mii;
sc_if = xsc_if;
MSK_IF_LOCK_ASSERT(sc_if);
mii = device_get_softc(sc_if->msk_miibus);
mii_tick(mii);
if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
msk_miibus_statchg(sc_if->msk_if_dev);
NET_EPOCH_ENTER(et);
msk_handle_events(sc_if->msk_softc);
NET_EPOCH_EXIT(et);
msk_watchdog(sc_if);
callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
}
static void
msk_intr_phy(struct msk_if_softc *sc_if)
{
uint16_t status;
msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
/* Handle FIFO Underrun/Overflow? */
if ((status & PHY_M_IS_FIFO_ERROR))
device_printf(sc_if->msk_if_dev,
"PHY FIFO underrun/overflow.\n");
}
static void
msk_intr_gmac(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
uint8_t status;
sc = sc_if->msk_softc;
status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
/* GMAC Rx FIFO overrun. */
if ((status & GM_IS_RX_FF_OR) != 0)
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
GMF_CLI_RX_FO);
/* GMAC Tx FIFO underrun. */
if ((status & GM_IS_TX_FF_UR) != 0) {
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
GMF_CLI_TX_FU);
device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
/*
* XXX
* In case of Tx underrun, we may need to flush/reset
* Tx MAC but that would also require resynchronization
* with status LEs. Reinitializing status LEs would
* affect other port in dual MAC configuration so it
* should be avoided as possible as we can.
* Due to lack of documentation it's all vague guess but
* it needs more investigation.
*/
}
}
static void
msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
{
struct msk_softc *sc;
sc = sc_if->msk_softc;
if ((status & Y2_IS_PAR_RD1) != 0) {
device_printf(sc_if->msk_if_dev,
"RAM buffer read parity error\n");
/* Clear IRQ. */
CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
RI_CLR_RD_PERR);
}
if ((status & Y2_IS_PAR_WR1) != 0) {
device_printf(sc_if->msk_if_dev,
"RAM buffer write parity error\n");
/* Clear IRQ. */
CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
RI_CLR_WR_PERR);
}
if ((status & Y2_IS_PAR_MAC1) != 0) {
device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
/* Clear IRQ. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
GMF_CLI_TX_PE);
}
if ((status & Y2_IS_PAR_RX1) != 0) {
device_printf(sc_if->msk_if_dev, "Rx parity error\n");
/* Clear IRQ. */
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
}
if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
/* Clear IRQ. */
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
}
}
static void
msk_intr_hwerr(struct msk_softc *sc)
{
uint32_t status;
uint32_t tlphead[4];
status = CSR_READ_4(sc, B0_HWE_ISRC);
/* Time Stamp timer overflow. */
if ((status & Y2_IS_TIST_OV) != 0)
CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
if ((status & Y2_IS_PCI_NEXP) != 0) {
/*
* PCI Express Error occurred which is not described in PEX
* spec.
* This error is also mapped either to Master Abort(
* Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
* can only be cleared there.
*/
device_printf(sc->msk_dev,
"PCI Express protocol violation error\n");
}
if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
uint16_t v16;
if ((status & Y2_IS_MST_ERR) != 0)
device_printf(sc->msk_dev,
"unexpected IRQ Status error\n");
else
device_printf(sc->msk_dev,
"unexpected IRQ Master error\n");
/* Reset all bits in the PCI status register. */
v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
/* Check for PCI Express Uncorrectable Error. */
if ((status & Y2_IS_PCI_EXP) != 0) {
uint32_t v32;
/*
* On PCI Express bus bridges are called root complexes (RC).
* PCI Express errors are recognized by the root complex too,
* which requests the system to handle the problem. After
* error occurrence it may be that no access to the adapter
* may be performed any longer.
*/
v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
if ((v32 & PEX_UNSUP_REQ) != 0) {
/* Ignore unsupported request error. */
device_printf(sc->msk_dev,
"Uncorrectable PCI Express error\n");
}
if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
int i;
/* Get TLP header form Log Registers. */
for (i = 0; i < 4; i++)
tlphead[i] = CSR_PCI_READ_4(sc,
PEX_HEADER_LOG + i * 4);
/* Check for vendor defined broadcast message. */
if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
CSR_WRITE_4(sc, B0_HWE_IMSK,
sc->msk_intrhwemask);
CSR_READ_4(sc, B0_HWE_IMSK);
}
}
/* Clear the interrupt. */
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
}
static __inline void
msk_rxput(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
sc = sc_if->msk_softc;
if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
bus_dmamap_sync(
sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_cdata.msk_jumbo_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
else
bus_dmamap_sync(
sc_if->msk_cdata.msk_rx_ring_tag,
sc_if->msk_cdata.msk_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
}
static int
msk_handle_events(struct msk_softc *sc)
{
struct msk_if_softc *sc_if;
int rxput[2];
struct msk_stat_desc *sd;
uint32_t control, status;
int cons, len, port, rxprog;
if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
return (0);
/* Sync status LEs. */
bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
rxprog = 0;
cons = sc->msk_stat_cons;
for (;;) {
sd = &sc->msk_stat_ring[cons];
control = le32toh(sd->msk_control);
if ((control & HW_OWNER) == 0)
break;
control &= ~HW_OWNER;
sd->msk_control = htole32(control);
status = le32toh(sd->msk_status);
len = control & STLE_LEN_MASK;
port = (control >> 16) & 0x01;
sc_if = sc->msk_if[port];
if (sc_if == NULL) {
device_printf(sc->msk_dev, "invalid port opcode "
"0x%08x\n", control & STLE_OP_MASK);
continue;
}
switch (control & STLE_OP_MASK) {
case OP_RXVLAN:
sc_if->msk_vtag = ntohs(len);
break;
case OP_RXCHKSVLAN:
sc_if->msk_vtag = ntohs(len);
/* FALLTHROUGH */
case OP_RXCHKS:
sc_if->msk_csum = status;
break;
case OP_RXSTAT:
if (!(if_getdrvflags(sc_if->msk_ifp) & IFF_DRV_RUNNING))
break;
if (sc_if->msk_framesize >
(MCLBYTES - MSK_RX_BUF_ALIGN))
msk_jumbo_rxeof(sc_if, status, control, len);
else
msk_rxeof(sc_if, status, control, len);
rxprog++;
/*
* Because there is no way to sync single Rx LE
* put the DMA sync operation off until the end of
* event processing.
*/
rxput[port]++;
/* Update prefetch unit if we've passed water mark. */
if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
msk_rxput(sc_if);
rxput[port] = 0;
}
break;
case OP_TXINDEXLE:
if (sc->msk_if[MSK_PORT_A] != NULL)
msk_txeof(sc->msk_if[MSK_PORT_A],
status & STLE_TXA1_MSKL);
if (sc->msk_if[MSK_PORT_B] != NULL)
msk_txeof(sc->msk_if[MSK_PORT_B],
((status & STLE_TXA2_MSKL) >>
STLE_TXA2_SHIFTL) |
((len & STLE_TXA2_MSKH) <<
STLE_TXA2_SHIFTH));
break;
default:
device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
control & STLE_OP_MASK);
break;
}
MSK_INC(cons, sc->msk_stat_count);
if (rxprog > sc->msk_process_limit)
break;
}
sc->msk_stat_cons = cons;
bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (rxput[MSK_PORT_A] > 0)
msk_rxput(sc->msk_if[MSK_PORT_A]);
if (rxput[MSK_PORT_B] > 0)
msk_rxput(sc->msk_if[MSK_PORT_B]);
return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
}
static void
msk_intr(void *xsc)
{
struct msk_softc *sc;
struct msk_if_softc *sc_if0, *sc_if1;
if_t ifp0, ifp1;
uint32_t status;
int domore;
sc = xsc;
MSK_LOCK(sc);
/* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
if (status == 0 || status == 0xffffffff ||
(sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
(status & sc->msk_intrmask) == 0) {
CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
MSK_UNLOCK(sc);
return;
}
sc_if0 = sc->msk_if[MSK_PORT_A];
sc_if1 = sc->msk_if[MSK_PORT_B];
ifp0 = ifp1 = NULL;
if (sc_if0 != NULL)
ifp0 = sc_if0->msk_ifp;
if (sc_if1 != NULL)
ifp1 = sc_if1->msk_ifp;
if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
msk_intr_phy(sc_if0);
if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
msk_intr_phy(sc_if1);
if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
msk_intr_gmac(sc_if0);
if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
msk_intr_gmac(sc_if1);
if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
device_printf(sc->msk_dev, "Rx descriptor error\n");
sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
CSR_READ_4(sc, B0_IMSK);
}
if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
device_printf(sc->msk_dev, "Tx descriptor error\n");
sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
CSR_READ_4(sc, B0_IMSK);
}
if ((status & Y2_IS_HW_ERR) != 0)
msk_intr_hwerr(sc);
domore = msk_handle_events(sc);
if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
/* Reenable interrupts. */
CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
if (ifp0 != NULL && (if_getdrvflags(ifp0) & IFF_DRV_RUNNING) != 0 &&
!if_sendq_empty(ifp0))
msk_start_locked(ifp0);
if (ifp1 != NULL && (if_getdrvflags(ifp1) & IFF_DRV_RUNNING) != 0 &&
!if_sendq_empty(ifp1))
msk_start_locked(ifp1);
MSK_UNLOCK(sc);
}
static void
msk_set_tx_stfwd(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
if_t ifp;
ifp = sc_if->msk_ifp;
sc = sc_if->msk_softc;
if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
TX_STFW_ENA);
} else {
if (if_getmtu(ifp) > ETHERMTU) {
/* Set Tx GMAC FIFO Almost Empty Threshold. */
CSR_WRITE_4(sc,
MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
/* Disable Store & Forward mode for Tx. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
TX_STFW_DIS);
} else {
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
TX_STFW_ENA);
}
}
}
static void
msk_init(void *xsc)
{
struct msk_if_softc *sc_if = xsc;
MSK_IF_LOCK(sc_if);
msk_init_locked(sc_if);
MSK_IF_UNLOCK(sc_if);
}
static void
msk_init_locked(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
if_t ifp;
struct mii_data *mii;
uint8_t *eaddr;
uint16_t gmac;
uint32_t reg;
int error;
MSK_IF_LOCK_ASSERT(sc_if);
ifp = sc_if->msk_ifp;
sc = sc_if->msk_softc;
mii = device_get_softc(sc_if->msk_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
error = 0;
/* Cancel pending I/O and free all Rx/Tx buffers. */
msk_stop(sc_if);
if (if_getmtu(ifp) < ETHERMTU)
sc_if->msk_framesize = ETHERMTU;
else
sc_if->msk_framesize = if_getmtu(ifp);
sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
if (if_getmtu(ifp) > ETHERMTU &&
(sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
if_sethwassistbits(ifp, 0, (MSK_CSUM_FEATURES | CSUM_TSO));
if_setcapenablebit(ifp, 0, (IFCAP_TSO4 | IFCAP_TXCSUM));
}
/* GMAC Control reset. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
GMC_BYP_RETR_ON);
/*
* Initialize GMAC first such that speed/duplex/flow-control
* parameters are renegotiated when interface is brought up.
*/
GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
/* Dummy read the Interrupt Source Register. */
CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
/* Clear MIB stats. */
msk_stats_clear(sc_if);
/* Disable FCS. */
GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
/* Setup Transmit Control Register. */
GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
/* Setup Transmit Flow Control Register. */
GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
/* Setup Transmit Parameter Register. */
GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
if (if_getmtu(ifp) > ETHERMTU)
gmac |= GM_SMOD_JUMBO_ENA;
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
/* Set station address. */
eaddr = if_getlladdr(ifp);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
eaddr[0] | (eaddr[1] << 8));
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
eaddr[2] | (eaddr[3] << 8));
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
eaddr[4] | (eaddr[5] << 8));
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
eaddr[0] | (eaddr[1] << 8));
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
eaddr[2] | (eaddr[3] << 8));
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
eaddr[4] | (eaddr[5] << 8));
/* Disable interrupts for counter overflows. */
GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
/* Configure Rx MAC FIFO. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
sc->msk_hw_id == CHIP_ID_YUKON_EX)
reg |= GMF_RX_OVER_ON;
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
/* Set receive filter. */
msk_rxfilter(sc_if);
if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
/* Clear flush mask - HW bug. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
} else {
/* Flush Rx MAC FIFO on any flow control or error. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
GMR_FS_ANY_ERR);
}
/*
* Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
* due to hardware hang on receipt of pause frames.
*/
reg = RX_GMF_FL_THR_DEF + 1;
/* Another magic for Yukon FE+ - From Linux. */
if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
reg = 0x178;
CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
/* Configure Tx MAC FIFO. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
/* Configure hardware VLAN tag insertion/stripping. */
msk_setvlan(sc_if, ifp);
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
/* Set Rx Pause threshold. */
CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
MSK_ECU_LLPP);
CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
MSK_ECU_ULPP);
/* Configure store-and-forward for Tx. */
msk_set_tx_stfwd(sc_if);
}
if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
/* Disable dynamic watermark - from Linux. */
reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
reg &= ~0x03;
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
}
/*
* Disable Force Sync bit and Alloc bit in Tx RAM interface
* arbiter as we don't use Sync Tx queue.
*/
CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
/* Enable the RAM Interface Arbiter. */
CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
/* Setup RAM buffer. */
msk_set_rambuffer(sc_if);
/* Disable Tx sync Queue. */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
/* Setup Tx Queue Bus Memory Interface. */
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
switch (sc->msk_hw_id) {
case CHIP_ID_YUKON_EC_U:
if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
/* Fix for Yukon-EC Ultra: set BMU FIFO level */
CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
MSK_ECU_TXFF_LEV);
}
break;
case CHIP_ID_YUKON_EX:
/*
* Yukon Extreme seems to have silicon bug for
* automatic Tx checksum calculation capability.
*/
if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
F_TX_CHK_AUTO_OFF);
break;
}
/* Setup Rx Queue Bus Memory Interface. */
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
/* MAC Rx RAM Read is controlled by hardware. */
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
}
msk_set_prefetch(sc, sc_if->msk_txq,
sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
msk_init_tx_ring(sc_if);
/* Disable Rx checksum offload and RSS hash. */
reg = BMU_DIS_RX_RSS_HASH;
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
(if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
reg |= BMU_ENA_RX_CHKSUM;
else
reg |= BMU_DIS_RX_CHKSUM;
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
msk_set_prefetch(sc, sc_if->msk_rxq,
sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
MSK_JUMBO_RX_RING_CNT - 1);
error = msk_init_jumbo_rx_ring(sc_if);
} else {
msk_set_prefetch(sc, sc_if->msk_rxq,
sc_if->msk_rdata.msk_rx_ring_paddr,
MSK_RX_RING_CNT - 1);
error = msk_init_rx_ring(sc_if);
}
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"initialization failed: no memory for Rx buffers\n");
msk_stop(sc_if);
return;
}
if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
/* Disable flushing of non-ASF packets. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
GMF_RX_MACSEC_FLUSH_OFF);
}
/* Configure interrupt handling. */
if (sc_if->msk_port == MSK_PORT_A) {
sc->msk_intrmask |= Y2_IS_PORT_A;
sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
} else {
sc->msk_intrmask |= Y2_IS_PORT_B;
sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
}
/* Configure IRQ moderation mask. */
CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
if (sc->msk_int_holdoff > 0) {
/* Configure initial IRQ moderation timer value. */
CSR_WRITE_4(sc, B2_IRQM_INI,
MSK_USECS(sc, sc->msk_int_holdoff));
CSR_WRITE_4(sc, B2_IRQM_VAL,
MSK_USECS(sc, sc->msk_int_holdoff));
/* Start IRQ moderation. */
CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
}
CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
CSR_READ_4(sc, B0_HWE_IMSK);
CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
CSR_READ_4(sc, B0_IMSK);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc_if->msk_flags &= ~MSK_FLAG_LINK;
mii_mediachg(mii);
callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
}
static void
msk_set_rambuffer(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
int ltpp, utpp;
sc = sc_if->msk_softc;
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
return;
/* Setup Rx Queue. */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
sc->msk_rxqstart[sc_if->msk_port] / 8);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
sc->msk_rxqend[sc_if->msk_port] / 8);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
sc->msk_rxqstart[sc_if->msk_port] / 8);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
sc->msk_rxqstart[sc_if->msk_port] / 8);
utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
/* Setup Tx Queue. */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
sc->msk_txqstart[sc_if->msk_port] / 8);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
sc->msk_txqend[sc_if->msk_port] / 8);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
sc->msk_txqstart[sc_if->msk_port] / 8);
CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
sc->msk_txqstart[sc_if->msk_port] / 8);
/* Enable Store & Forward for Tx side. */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
}
static void
msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
uint32_t count)
{
/* Reset the prefetch unit. */
CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
PREF_UNIT_RST_SET);
CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
PREF_UNIT_RST_CLR);
/* Set LE base address. */
CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
MSK_ADDR_LO(addr));
CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
MSK_ADDR_HI(addr));
/* Set the list last index. */
CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
count);
/* Turn on prefetch unit. */
CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
PREF_UNIT_OP_ON);
/* Dummy read to ensure write. */
CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
}
static void
msk_stop(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
struct msk_txdesc *txd;
struct msk_rxdesc *rxd;
struct msk_rxdesc *jrxd;
if_t ifp;
uint32_t val;
int i;
MSK_IF_LOCK_ASSERT(sc_if);
sc = sc_if->msk_softc;
ifp = sc_if->msk_ifp;
callout_stop(&sc_if->msk_tick_ch);
sc_if->msk_watchdog_timer = 0;
/* Disable interrupts. */
if (sc_if->msk_port == MSK_PORT_A) {
sc->msk_intrmask &= ~Y2_IS_PORT_A;
sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
} else {
sc->msk_intrmask &= ~Y2_IS_PORT_B;
sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
}
CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
CSR_READ_4(sc, B0_HWE_IMSK);
CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
CSR_READ_4(sc, B0_IMSK);
/* Disable Tx/Rx MAC. */
val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
/* Read again to ensure writing. */
GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
/* Update stats and clear counters. */
msk_stats_update(sc_if);
/* Stop Tx BMU. */
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
for (i = 0; i < MSK_TIMEOUT; i++) {
if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
BMU_STOP);
val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
} else
break;
DELAY(1);
}
if (i == MSK_TIMEOUT)
device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
RB_RST_SET | RB_DIS_OP_MD);
/* Disable all GMAC interrupt. */
CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
/* Disable PHY interrupt. */
msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
/* Disable the RAM Interface Arbiter. */
CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
/* Reset the PCI FIFO of the async Tx queue */
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
BMU_RST_SET | BMU_FIFO_RST);
/* Reset the Tx prefetch units. */
CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
PREF_UNIT_RST_SET);
/* Reset the RAM Buffer async Tx queue. */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
/* Reset Tx MAC FIFO. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
/* Set Pause Off. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
/*
* The Rx Stop command will not work for Yukon-2 if the BMU does not
* reach the end of packet and since we can't make sure that we have
* incoming data, we must reset the BMU while it is not during a DMA
* transfer. Since it is possible that the Rx path is still active,
* the Rx RAM buffer will be stopped first, so any possible incoming
* data will not trigger a DMA. After the RAM buffer is stopped, the
* BMU is polled until any DMA in progress is ended and only then it
* will be reset.
*/
/* Disable the RAM Buffer receive queue. */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
for (i = 0; i < MSK_TIMEOUT; i++) {
if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
break;
DELAY(1);
}
if (i == MSK_TIMEOUT)
device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
BMU_RST_SET | BMU_FIFO_RST);
/* Reset the Rx prefetch unit. */
CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
PREF_UNIT_RST_SET);
/* Reset the RAM Buffer receive queue. */
CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
/* Reset Rx MAC FIFO. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
/* Free Rx and Tx mbufs still in the queues. */
for (i = 0; i < MSK_RX_RING_CNT; i++) {
rxd = &sc_if->msk_cdata.msk_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
if (jrxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
jrxd->rx_dmamap);
m_freem(jrxd->rx_m);
jrxd->rx_m = NULL;
}
}
for (i = 0; i < MSK_TX_RING_CNT; i++) {
txd = &sc_if->msk_cdata.msk_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
/*
* Mark the interface down.
*/
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc_if->msk_flags &= ~MSK_FLAG_LINK;
}
/*
* When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
* counter clears high 16 bits of the counter such that accessing
* lower 16 bits should be the last operation.
*/
#define MSK_READ_MIB32(x, y) \
((((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \
(uint32_t)GMAC_READ_2(sc, x, y))
#define MSK_READ_MIB64(x, y) \
((((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \
(uint64_t)MSK_READ_MIB32(x, y))
static void
msk_stats_clear(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
uint16_t gmac;
int i;
MSK_IF_LOCK_ASSERT(sc_if);
sc = sc_if->msk_softc;
/* Set MIB Clear Counter Mode. */
gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
/* Read all MIB Counters with Clear Mode set. */
for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
(void)MSK_READ_MIB32(sc_if->msk_port, i);
/* Clear MIB Clear Counter Mode. */
gmac &= ~GM_PAR_MIB_CLR;
GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
}
static void
msk_stats_update(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
if_t ifp;
struct msk_hw_stats *stats;
uint16_t gmac;
MSK_IF_LOCK_ASSERT(sc_if);
ifp = sc_if->msk_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc = sc_if->msk_softc;
stats = &sc_if->msk_stats;
/* Set MIB Clear Counter Mode. */
gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
/* Rx stats. */
stats->rx_ucast_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
stats->rx_bcast_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
stats->rx_pause_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
stats->rx_mcast_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
stats->rx_crc_errs +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
stats->rx_good_octets +=
MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
stats->rx_bad_octets +=
MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
stats->rx_runts +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
stats->rx_runt_errs +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
stats->rx_pkts_64 +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
stats->rx_pkts_65_127 +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
stats->rx_pkts_128_255 +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
stats->rx_pkts_256_511 +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
stats->rx_pkts_512_1023 +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
stats->rx_pkts_1024_1518 +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
stats->rx_pkts_1519_max +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
stats->rx_pkts_too_long +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
stats->rx_pkts_jabbers +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
stats->rx_fifo_oflows +=
MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
/* Tx stats. */
stats->tx_ucast_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
stats->tx_bcast_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
stats->tx_pause_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
stats->tx_mcast_frames +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
stats->tx_octets +=
MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
stats->tx_pkts_64 +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
stats->tx_pkts_65_127 +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
stats->tx_pkts_128_255 +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
stats->tx_pkts_256_511 +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
stats->tx_pkts_512_1023 +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
stats->tx_pkts_1024_1518 +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
stats->tx_pkts_1519_max +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
stats->tx_colls +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
stats->tx_late_colls +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
stats->tx_excess_colls +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
stats->tx_multi_colls +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
stats->tx_single_colls +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
stats->tx_underflows +=
MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
/* Clear MIB Clear Counter Mode. */
gmac &= ~GM_PAR_MIB_CLR;
GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
}
static int
msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
{
struct msk_softc *sc;
struct msk_if_softc *sc_if;
uint32_t result, *stat;
int off;
sc_if = (struct msk_if_softc *)arg1;
sc = sc_if->msk_softc;
off = arg2;
stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
MSK_IF_LOCK(sc_if);
result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
result += *stat;
MSK_IF_UNLOCK(sc_if);
return (sysctl_handle_int(oidp, &result, 0, req));
}
static int
msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
{
struct msk_softc *sc;
struct msk_if_softc *sc_if;
uint64_t result, *stat;
int off;
sc_if = (struct msk_if_softc *)arg1;
sc = sc_if->msk_softc;
off = arg2;
stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
MSK_IF_LOCK(sc_if);
result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
result += *stat;
MSK_IF_UNLOCK(sc_if);
return (sysctl_handle_64(oidp, &result, 0, req));
}
#undef MSK_READ_MIB32
#undef MSK_READ_MIB64
#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \
SYSCTL_ADD_PROC(c, p, OID_AUTO, o, \
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, \
sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \
"IU", d)
#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \
SYSCTL_ADD_PROC(c, p, OID_AUTO, o, \
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, \
sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \
"QU", d)
static void
msk_sysctl_node(struct msk_if_softc *sc_if)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *schild;
struct sysctl_oid *tree;
ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MSK Statistics");
schild = SYSCTL_CHILDREN(tree);
tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MSK RX Statistics");
child = SYSCTL_CHILDREN(tree);
MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
child, rx_ucast_frames, "Good unicast frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
child, rx_bcast_frames, "Good broadcast frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
child, rx_pause_frames, "Pause frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
child, rx_mcast_frames, "Multicast frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
child, rx_crc_errs, "CRC errors");
MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
child, rx_good_octets, "Good octets");
MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
child, rx_bad_octets, "Bad octets");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
child, rx_pkts_64, "64 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
child, rx_pkts_65_127, "65 to 127 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
child, rx_pkts_128_255, "128 to 255 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
child, rx_pkts_256_511, "256 to 511 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
child, rx_pkts_512_1023, "512 to 1023 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
child, rx_pkts_1519_max, "1519 to max frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
child, rx_pkts_too_long, "frames too long");
MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
child, rx_pkts_jabbers, "Jabber errors");
MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
child, rx_fifo_oflows, "FIFO overflows");
tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MSK TX Statistics");
child = SYSCTL_CHILDREN(tree);
MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
child, tx_ucast_frames, "Unicast frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
child, tx_bcast_frames, "Broadcast frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
child, tx_pause_frames, "Pause frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
child, tx_mcast_frames, "Multicast frames");
MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
child, tx_octets, "Octets");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
child, tx_pkts_64, "64 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
child, tx_pkts_65_127, "65 to 127 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
child, tx_pkts_128_255, "128 to 255 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
child, tx_pkts_256_511, "256 to 511 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
child, tx_pkts_512_1023, "512 to 1023 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
child, tx_pkts_1519_max, "1519 to max frames");
MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
child, tx_colls, "Collisions");
MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
child, tx_late_colls, "Late collisions");
MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
child, tx_excess_colls, "Excessive collisions");
MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
child, tx_multi_colls, "Multiple collisions");
MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
child, tx_single_colls, "Single collisions");
MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
child, tx_underflows, "FIFO underflows");
}
#undef MSK_SYSCTL_STAT32
#undef MSK_SYSCTL_STAT64
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (!arg1)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || !req->newptr)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
MSK_PROC_MAX));
}
diff --git a/sys/dev/mxge/if_mxge.c b/sys/dev/mxge/if_mxge.c
index 4036a46645db..47793232d8d5 100644
--- a/sys/dev/mxge/if_mxge.c
+++ b/sys/dev/mxge/if_mxge.c
@@ -1,4881 +1,4875 @@
/******************************************************************************
SPDX-License-Identifier: BSD-2-Clause
Copyright (c) 2006-2013, Myricom Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Myricom Inc, nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <sys/endian.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <contrib/zlib/zlib.h>
#include <dev/zlib/zcalloc.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet6/ip6_var.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/smp.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pci_private.h> /* XXX for pci_cfg_restore */
#include <vm/vm.h> /* for pmap_mapdev() */
#include <vm/pmap.h>
#if defined(__i386) || defined(__amd64)
#include <machine/specialreg.h>
#endif
#include <dev/mxge/mxge_mcp.h>
#include <dev/mxge/mcp_gen_header.h>
/*#define MXGE_FAKE_IFP*/
#include <dev/mxge/if_mxge_var.h>
#include <sys/buf_ring.h>
#include "opt_inet.h"
#include "opt_inet6.h"
/* tunable params */
static int mxge_nvidia_ecrc_enable = 1;
static int mxge_force_firmware = 0;
static int mxge_intr_coal_delay = 30;
static int mxge_deassert_wait = 1;
static int mxge_flow_control = 1;
static int mxge_verbose = 0;
static int mxge_ticks;
static int mxge_max_slices = 1;
static int mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
static int mxge_always_promisc = 0;
static int mxge_initial_mtu = ETHERMTU_JUMBO;
static int mxge_throttle = 0;
static char *mxge_fw_unaligned = "mxge_ethp_z8e";
static char *mxge_fw_aligned = "mxge_eth_z8e";
static char *mxge_fw_rss_aligned = "mxge_rss_eth_z8e";
static char *mxge_fw_rss_unaligned = "mxge_rss_ethp_z8e";
static int mxge_probe(device_t dev);
static int mxge_attach(device_t dev);
static int mxge_detach(device_t dev);
static int mxge_shutdown(device_t dev);
static void mxge_intr(void *arg);
static device_method_t mxge_methods[] =
{
/* Device interface */
DEVMETHOD(device_probe, mxge_probe),
DEVMETHOD(device_attach, mxge_attach),
DEVMETHOD(device_detach, mxge_detach),
DEVMETHOD(device_shutdown, mxge_shutdown),
DEVMETHOD_END
};
static driver_t mxge_driver =
{
"mxge",
mxge_methods,
sizeof(mxge_softc_t),
};
/* Declare ourselves to be a child of the PCI bus.*/
DRIVER_MODULE(mxge, pci, mxge_driver, 0, 0);
MODULE_DEPEND(mxge, firmware, 1, 1, 1);
MODULE_DEPEND(mxge, zlib, 1, 1, 1);
static int mxge_load_firmware(mxge_softc_t *sc, int adopt);
static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data);
static int mxge_close(mxge_softc_t *sc, int down);
static int mxge_open(mxge_softc_t *sc);
static void mxge_tick(void *arg);
static int
mxge_probe(device_t dev)
{
int rev;
if ((pci_get_vendor(dev) == MXGE_PCI_VENDOR_MYRICOM) &&
((pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E) ||
(pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E_9))) {
rev = pci_get_revid(dev);
switch (rev) {
case MXGE_PCI_REV_Z8E:
device_set_desc(dev, "Myri10G-PCIE-8A");
break;
case MXGE_PCI_REV_Z8ES:
device_set_desc(dev, "Myri10G-PCIE-8B");
break;
default:
device_set_desc(dev, "Myri10G-PCIE-8??");
device_printf(dev, "Unrecognized rev %d NIC\n",
rev);
break;
}
return 0;
}
return ENXIO;
}
static void
mxge_enable_wc(mxge_softc_t *sc)
{
#if defined(__i386) || defined(__amd64)
vm_offset_t len;
int err;
sc->wc = 1;
len = rman_get_size(sc->mem_res);
err = pmap_change_attr((vm_offset_t) sc->sram,
len, PAT_WRITE_COMBINING);
if (err != 0) {
device_printf(sc->dev, "pmap_change_attr failed, %d\n",
err);
sc->wc = 0;
}
#endif
}
/* callback to get our DMA address */
static void
mxge_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
int error)
{
if (error == 0) {
*(bus_addr_t *) arg = segs->ds_addr;
}
}
static int
mxge_dma_alloc(mxge_softc_t *sc, mxge_dma_t *dma, size_t bytes,
bus_size_t alignment)
{
int err;
device_t dev = sc->dev;
bus_size_t boundary, maxsegsize;
if (bytes > 4096 && alignment == 4096) {
boundary = 0;
maxsegsize = bytes;
} else {
boundary = 4096;
maxsegsize = 4096;
}
/* allocate DMAable memory tags */
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
alignment, /* alignment */
boundary, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
bytes, /* maxsize */
1, /* num segs */
maxsegsize, /* maxsegsize */
BUS_DMA_COHERENT, /* flags */
NULL, NULL, /* lock */
&dma->dmat); /* tag */
if (err != 0) {
device_printf(dev, "couldn't alloc tag (err = %d)\n", err);
return err;
}
/* allocate DMAable memory & map */
err = bus_dmamem_alloc(dma->dmat, &dma->addr,
(BUS_DMA_WAITOK | BUS_DMA_COHERENT
| BUS_DMA_ZERO), &dma->map);
if (err != 0) {
device_printf(dev, "couldn't alloc mem (err = %d)\n", err);
goto abort_with_dmat;
}
/* load the memory */
err = bus_dmamap_load(dma->dmat, dma->map, dma->addr, bytes,
mxge_dmamap_callback,
(void *)&dma->bus_addr, 0);
if (err != 0) {
device_printf(dev, "couldn't load map (err = %d)\n", err);
goto abort_with_mem;
}
return 0;
abort_with_mem:
bus_dmamem_free(dma->dmat, dma->addr, dma->map);
abort_with_dmat:
(void)bus_dma_tag_destroy(dma->dmat);
return err;
}
static void
mxge_dma_free(mxge_dma_t *dma)
{
bus_dmamap_unload(dma->dmat, dma->map);
bus_dmamem_free(dma->dmat, dma->addr, dma->map);
(void)bus_dma_tag_destroy(dma->dmat);
}
/*
* The eeprom strings on the lanaiX have the format
* SN=x\0
* MAC=x:x:x:x:x:x\0
* PC=text\0
*/
static int
mxge_parse_strings(mxge_softc_t *sc)
{
char *ptr;
int i, found_mac, found_sn2;
char *endptr;
ptr = sc->eeprom_strings;
found_mac = 0;
found_sn2 = 0;
while (*ptr != '\0') {
if (strncmp(ptr, "MAC=", 4) == 0) {
ptr += 4;
for (i = 0;;) {
sc->mac_addr[i] = strtoul(ptr, &endptr, 16);
if (endptr - ptr != 2)
goto abort;
ptr = endptr;
if (++i == 6)
break;
if (*ptr++ != ':')
goto abort;
}
found_mac = 1;
} else if (strncmp(ptr, "PC=", 3) == 0) {
ptr += 3;
strlcpy(sc->product_code_string, ptr,
sizeof(sc->product_code_string));
} else if (!found_sn2 && (strncmp(ptr, "SN=", 3) == 0)) {
ptr += 3;
strlcpy(sc->serial_number_string, ptr,
sizeof(sc->serial_number_string));
} else if (strncmp(ptr, "SN2=", 4) == 0) {
/* SN2 takes precedence over SN */
ptr += 4;
found_sn2 = 1;
strlcpy(sc->serial_number_string, ptr,
sizeof(sc->serial_number_string));
}
while (*ptr++ != '\0') {}
}
if (found_mac)
return 0;
abort:
device_printf(sc->dev, "failed to parse eeprom_strings\n");
return ENXIO;
}
#if defined __i386 || defined i386 || defined __i386__ || defined __x86_64__
static void
mxge_enable_nvidia_ecrc(mxge_softc_t *sc)
{
uint32_t val;
unsigned long base, off;
char *va, *cfgptr;
device_t pdev, mcp55;
uint16_t vendor_id, device_id, word;
uintptr_t bus, slot, func, ivend, idev;
uint32_t *ptr32;
if (!mxge_nvidia_ecrc_enable)
return;
pdev = device_get_parent(device_get_parent(sc->dev));
if (pdev == NULL) {
device_printf(sc->dev, "could not find parent?\n");
return;
}
vendor_id = pci_read_config(pdev, PCIR_VENDOR, 2);
device_id = pci_read_config(pdev, PCIR_DEVICE, 2);
if (vendor_id != 0x10de)
return;
base = 0;
if (device_id == 0x005d) {
/* ck804, base address is magic */
base = 0xe0000000UL;
} else if (device_id >= 0x0374 && device_id <= 0x378) {
/* mcp55, base address stored in chipset */
mcp55 = pci_find_bsf(0, 0, 0);
if (mcp55 &&
0x10de == pci_read_config(mcp55, PCIR_VENDOR, 2) &&
0x0369 == pci_read_config(mcp55, PCIR_DEVICE, 2)) {
word = pci_read_config(mcp55, 0x90, 2);
base = ((unsigned long)word & 0x7ffeU) << 25;
}
}
if (!base)
return;
/* XXXX
Test below is commented because it is believed that doing
config read/write beyond 0xff will access the config space
for the next larger function. Uncomment this and remove
the hacky pmap_mapdev() way of accessing config space when
FreeBSD grows support for extended pcie config space access
*/
#if 0
/* See if we can, by some miracle, access the extended
config space */
val = pci_read_config(pdev, 0x178, 4);
if (val != 0xffffffff) {
val |= 0x40;
pci_write_config(pdev, 0x178, val, 4);
return;
}
#endif
/* Rather than using normal pci config space writes, we must
* map the Nvidia config space ourselves. This is because on
* opteron/nvidia class machine the 0xe000000 mapping is
* handled by the nvidia chipset, that means the internal PCI
* device (the on-chip northbridge), or the amd-8131 bridge
* and things behind them are not visible by this method.
*/
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_BUS, &bus);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_SLOT, &slot);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_FUNCTION, &func);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_VENDOR, &ivend);
BUS_READ_IVAR(device_get_parent(pdev), pdev,
PCI_IVAR_DEVICE, &idev);
off = base
+ 0x00100000UL * (unsigned long)bus
+ 0x00001000UL * (unsigned long)(func
+ 8 * slot);
/* map it into the kernel */
va = pmap_mapdev(trunc_page((vm_paddr_t)off), PAGE_SIZE);
if (va == NULL) {
device_printf(sc->dev, "pmap_kenter_temporary didn't\n");
return;
}
/* get a pointer to the config space mapped into the kernel */
cfgptr = va + (off & PAGE_MASK);
/* make sure that we can really access it */
vendor_id = *(uint16_t *)(cfgptr + PCIR_VENDOR);
device_id = *(uint16_t *)(cfgptr + PCIR_DEVICE);
if (! (vendor_id == ivend && device_id == idev)) {
device_printf(sc->dev, "mapping failed: 0x%x:0x%x\n",
vendor_id, device_id);
pmap_unmapdev(va, PAGE_SIZE);
return;
}
ptr32 = (uint32_t*)(cfgptr + 0x178);
val = *ptr32;
if (val == 0xffffffff) {
device_printf(sc->dev, "extended mapping failed\n");
pmap_unmapdev(va, PAGE_SIZE);
return;
}
*ptr32 = val | 0x40;
pmap_unmapdev(va, PAGE_SIZE);
if (mxge_verbose)
device_printf(sc->dev,
"Enabled ECRC on upstream Nvidia bridge "
"at %d:%d:%d\n",
(int)bus, (int)slot, (int)func);
return;
}
#else
static void
mxge_enable_nvidia_ecrc(mxge_softc_t *sc)
{
device_printf(sc->dev,
"Nforce 4 chipset on non-x86/amd64!?!?!\n");
return;
}
#endif
static int
mxge_dma_test(mxge_softc_t *sc, int test_type)
{
mxge_cmd_t cmd;
bus_addr_t dmatest_bus = sc->dmabench_dma.bus_addr;
int status;
uint32_t len;
char *test = " ";
/* Run a small DMA test.
* The magic multipliers to the length tell the firmware
* to do DMA read, write, or read+write tests. The
* results are returned in cmd.data0. The upper 16
* bits of the return is the number of transfers completed.
* The lower 16 bits is the time in 0.5us ticks that the
* transfers took to complete.
*/
len = sc->tx_boundary;
cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10000;
status = mxge_send_cmd(sc, test_type, &cmd);
if (status != 0) {
test = "read";
goto abort;
}
sc->read_dma = ((cmd.data0>>16) * len * 2) /
(cmd.data0 & 0xffff);
cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x1;
status = mxge_send_cmd(sc, test_type, &cmd);
if (status != 0) {
test = "write";
goto abort;
}
sc->write_dma = ((cmd.data0>>16) * len * 2) /
(cmd.data0 & 0xffff);
cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10001;
status = mxge_send_cmd(sc, test_type, &cmd);
if (status != 0) {
test = "read/write";
goto abort;
}
sc->read_write_dma = ((cmd.data0>>16) * len * 2 * 2) /
(cmd.data0 & 0xffff);
abort:
if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
device_printf(sc->dev, "DMA %s benchmark failed: %d\n",
test, status);
return status;
}
/*
* The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
* when the PCI-E Completion packets are aligned on an 8-byte
* boundary. Some PCI-E chip sets always align Completion packets; on
* the ones that do not, the alignment can be enforced by enabling
* ECRC generation (if supported).
*
* When PCI-E Completion packets are not aligned, it is actually more
* efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
*
* If the driver can neither enable ECRC nor verify that it has
* already been enabled, then it must use a firmware image which works
* around unaligned completion packets (ethp_z8e.dat), and it should
* also ensure that it never gives the device a Read-DMA which is
* larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
* enabled, then the driver should use the aligned (eth_z8e.dat)
* firmware image, and set tx_boundary to 4KB.
*/
static int
mxge_firmware_probe(mxge_softc_t *sc)
{
device_t dev = sc->dev;
int reg, status;
uint16_t pectl;
sc->tx_boundary = 4096;
/*
* Verify the max read request size was set to 4KB
* before trying the test with 4KB.
*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
pectl = pci_read_config(dev, reg + 0x8, 2);
if ((pectl & (5 << 12)) != (5 << 12)) {
device_printf(dev, "Max Read Req. size != 4k (0x%x\n",
pectl);
sc->tx_boundary = 2048;
}
}
/*
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
sc->fw_name = mxge_fw_aligned;
status = mxge_load_firmware(sc, 1);
if (status != 0) {
return status;
}
/*
* Enable ECRC if possible
*/
mxge_enable_nvidia_ecrc(sc);
/*
* Run a DMA test which watches for unaligned completions and
* aborts on the first one seen. Not required on Z8ES or newer.
*/
if (pci_get_revid(sc->dev) >= MXGE_PCI_REV_Z8ES)
return 0;
status = mxge_dma_test(sc, MXGEFW_CMD_UNALIGNED_TEST);
if (status == 0)
return 0; /* keep the aligned firmware */
if (status != E2BIG)
device_printf(dev, "DMA test failed: %d\n", status);
if (status == ENOSYS)
device_printf(dev, "Falling back to ethp! "
"Please install up to date fw\n");
return status;
}
static int
mxge_select_firmware(mxge_softc_t *sc)
{
int aligned = 0;
int force_firmware = mxge_force_firmware;
if (sc->throttle)
force_firmware = sc->throttle;
if (force_firmware != 0) {
if (force_firmware == 1)
aligned = 1;
else
aligned = 0;
if (mxge_verbose)
device_printf(sc->dev,
"Assuming %s completions (forced)\n",
aligned ? "aligned" : "unaligned");
goto abort;
}
/* if the PCIe link width is 4 or less, we can use the aligned
firmware and skip any checks */
if (sc->link_width != 0 && sc->link_width <= 4) {
device_printf(sc->dev,
"PCIe x%d Link, expect reduced performance\n",
sc->link_width);
aligned = 1;
goto abort;
}
if (0 == mxge_firmware_probe(sc))
return 0;
abort:
if (aligned) {
sc->fw_name = mxge_fw_aligned;
sc->tx_boundary = 4096;
} else {
sc->fw_name = mxge_fw_unaligned;
sc->tx_boundary = 2048;
}
return (mxge_load_firmware(sc, 0));
}
static int
mxge_validate_firmware(mxge_softc_t *sc, const mcp_gen_header_t *hdr)
{
if (be32toh(hdr->mcp_type) != MCP_TYPE_ETH) {
device_printf(sc->dev, "Bad firmware type: 0x%x\n",
be32toh(hdr->mcp_type));
return EIO;
}
/* save firmware version for sysctl */
strlcpy(sc->fw_version, hdr->version, sizeof(sc->fw_version));
if (mxge_verbose)
device_printf(sc->dev, "firmware id: %s\n", hdr->version);
sscanf(sc->fw_version, "%d.%d.%d", &sc->fw_ver_major,
&sc->fw_ver_minor, &sc->fw_ver_tiny);
if (!(sc->fw_ver_major == MXGEFW_VERSION_MAJOR
&& sc->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
device_printf(sc->dev, "Found firmware version %s\n",
sc->fw_version);
device_printf(sc->dev, "Driver needs %d.%d\n",
MXGEFW_VERSION_MAJOR, MXGEFW_VERSION_MINOR);
return EINVAL;
}
return 0;
}
static int
mxge_load_firmware_helper(mxge_softc_t *sc, uint32_t *limit)
{
z_stream zs;
char *inflate_buffer;
const struct firmware *fw;
const mcp_gen_header_t *hdr;
unsigned hdr_offset;
int status;
unsigned int i;
size_t fw_len;
fw = firmware_get(sc->fw_name);
if (fw == NULL) {
device_printf(sc->dev, "Could not find firmware image %s\n",
sc->fw_name);
return ENOENT;
}
/* setup zlib and decompress f/w */
bzero(&zs, sizeof (zs));
zs.zalloc = zcalloc_nowait;
zs.zfree = zcfree;
status = inflateInit(&zs);
if (status != Z_OK) {
status = EIO;
goto abort_with_fw;
}
/* the uncompressed size is stored as the firmware version,
which would otherwise go unused */
fw_len = (size_t) fw->version;
inflate_buffer = malloc(fw_len, M_TEMP, M_NOWAIT);
if (inflate_buffer == NULL)
goto abort_with_zs;
zs.avail_in = fw->datasize;
zs.next_in = __DECONST(char *, fw->data);
zs.avail_out = fw_len;
zs.next_out = inflate_buffer;
status = inflate(&zs, Z_FINISH);
if (status != Z_STREAM_END) {
device_printf(sc->dev, "zlib %d\n", status);
status = EIO;
goto abort_with_buffer;
}
/* check id */
hdr_offset = htobe32(*(const uint32_t *)
(inflate_buffer + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw_len) {
device_printf(sc->dev, "Bad firmware file");
status = EIO;
goto abort_with_buffer;
}
hdr = (const void*)(inflate_buffer + hdr_offset);
status = mxge_validate_firmware(sc, hdr);
if (status != 0)
goto abort_with_buffer;
/* Copy the inflated firmware to NIC SRAM. */
for (i = 0; i < fw_len; i += 256) {
mxge_pio_copy(sc->sram + MXGE_FW_OFFSET + i,
inflate_buffer + i,
min(256U, (unsigned)(fw_len - i)));
wmb();
(void)*sc->sram;
wmb();
}
*limit = fw_len;
status = 0;
abort_with_buffer:
free(inflate_buffer, M_TEMP);
abort_with_zs:
inflateEnd(&zs);
abort_with_fw:
firmware_put(fw, FIRMWARE_UNLOAD);
return status;
}
/*
* Enable or disable periodic RDMAs from the host to make certain
* chipsets resend dropped PCIe messages
*/
static void
mxge_dummy_rdma(mxge_softc_t *sc, int enable)
{
char buf_bytes[72];
volatile uint32_t *confirm;
volatile char *submit;
uint32_t *buf, dma_low, dma_high;
int i;
buf = (uint32_t *)((uintptr_t)(buf_bytes + 7) & ~7UL);
/* clear confirmation addr */
confirm = (volatile uint32_t *)sc->cmd;
*confirm = 0;
wmb();
/* send an rdma command to the PCIe engine, and wait for the
response in the confirmation address. The firmware should
write a -1 there to indicate it is alive and well
*/
dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr);
buf[0] = htobe32(dma_high); /* confirm addr MSW */
buf[1] = htobe32(dma_low); /* confirm addr LSW */
buf[2] = htobe32(0xffffffff); /* confirm data */
dma_low = MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr);
buf[3] = htobe32(dma_high); /* dummy addr MSW */
buf[4] = htobe32(dma_low); /* dummy addr LSW */
buf[5] = htobe32(enable); /* enable? */
submit = (volatile char *)(sc->sram + MXGEFW_BOOT_DUMMY_RDMA);
mxge_pio_copy(submit, buf, 64);
wmb();
DELAY(1000);
wmb();
i = 0;
while (*confirm != 0xffffffff && i < 20) {
DELAY(1000);
i++;
}
if (*confirm != 0xffffffff) {
device_printf(sc->dev, "dummy rdma %s failed (%p = 0x%x)",
(enable ? "enable" : "disable"), confirm,
*confirm);
}
return;
}
static int
mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data)
{
mcp_cmd_t *buf;
char buf_bytes[sizeof(*buf) + 8];
volatile mcp_cmd_response_t *response = sc->cmd;
volatile char *cmd_addr = sc->sram + MXGEFW_ETH_CMD;
uint32_t dma_low, dma_high;
int err, sleep_total = 0;
/* ensure buf is aligned to 8 bytes */
buf = (mcp_cmd_t *)((uintptr_t)(buf_bytes + 7) & ~7UL);
buf->data0 = htobe32(data->data0);
buf->data1 = htobe32(data->data1);
buf->data2 = htobe32(data->data2);
buf->cmd = htobe32(cmd);
dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr);
buf->response_addr.low = htobe32(dma_low);
buf->response_addr.high = htobe32(dma_high);
mtx_lock(&sc->cmd_mtx);
response->result = 0xffffffff;
wmb();
mxge_pio_copy((volatile void *)cmd_addr, buf, sizeof (*buf));
/* wait up to 20ms */
err = EAGAIN;
for (sleep_total = 0; sleep_total < 20; sleep_total++) {
bus_dmamap_sync(sc->cmd_dma.dmat,
sc->cmd_dma.map, BUS_DMASYNC_POSTREAD);
wmb();
switch (be32toh(response->result)) {
case 0:
data->data0 = be32toh(response->data);
err = 0;
break;
case 0xffffffff:
DELAY(1000);
break;
case MXGEFW_CMD_UNKNOWN:
err = ENOSYS;
break;
case MXGEFW_CMD_ERROR_UNALIGNED:
err = E2BIG;
break;
case MXGEFW_CMD_ERROR_BUSY:
err = EBUSY;
break;
case MXGEFW_CMD_ERROR_I2C_ABSENT:
err = ENXIO;
break;
default:
device_printf(sc->dev,
"mxge: command %d "
"failed, result = %d\n",
cmd, be32toh(response->result));
err = ENXIO;
break;
}
if (err != EAGAIN)
break;
}
if (err == EAGAIN)
device_printf(sc->dev, "mxge: command %d timed out"
"result = %d\n",
cmd, be32toh(response->result));
mtx_unlock(&sc->cmd_mtx);
return err;
}
static int
mxge_adopt_running_firmware(mxge_softc_t *sc)
{
struct mcp_gen_header *hdr;
const size_t bytes = sizeof (struct mcp_gen_header);
size_t hdr_offset;
int status;
/* find running firmware header */
hdr_offset = htobe32(*(volatile uint32_t *)
(sc->sram + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > sc->sram_size) {
device_printf(sc->dev,
"Running firmware has bad header offset (%d)\n",
(int)hdr_offset);
return EIO;
}
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = malloc(bytes, M_DEVBUF, M_NOWAIT);
if (hdr == NULL) {
device_printf(sc->dev, "could not malloc firmware hdr\n");
return ENOMEM;
}
bus_space_read_region_1(rman_get_bustag(sc->mem_res),
rman_get_bushandle(sc->mem_res),
hdr_offset, (char *)hdr, bytes);
status = mxge_validate_firmware(sc, hdr);
free(hdr, M_DEVBUF);
/*
* check to see if adopted firmware has bug where adopting
* it will cause broadcasts to be filtered unless the NIC
* is kept in ALLMULTI mode
*/
if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 &&
sc->fw_ver_tiny >= 4 && sc->fw_ver_tiny <= 11) {
sc->adopted_rx_filter_bug = 1;
device_printf(sc->dev, "Adopting fw %d.%d.%d: "
"working around rx filter bug\n",
sc->fw_ver_major, sc->fw_ver_minor,
sc->fw_ver_tiny);
}
return status;
}
static int
mxge_load_firmware(mxge_softc_t *sc, int adopt)
{
volatile uint32_t *confirm;
volatile char *submit;
char buf_bytes[72];
uint32_t *buf, size, dma_low, dma_high;
int status, i;
buf = (uint32_t *)((uintptr_t)(buf_bytes + 7) & ~7UL);
size = sc->sram_size;
status = mxge_load_firmware_helper(sc, &size);
if (status) {
if (!adopt)
return status;
/* Try to use the currently running firmware, if
it is new enough */
status = mxge_adopt_running_firmware(sc);
if (status) {
device_printf(sc->dev,
"failed to adopt running firmware\n");
return status;
}
device_printf(sc->dev,
"Successfully adopted running firmware\n");
if (sc->tx_boundary == 4096) {
device_printf(sc->dev,
"Using firmware currently running on NIC"
". For optimal\n");
device_printf(sc->dev,
"performance consider loading optimized "
"firmware\n");
}
sc->fw_name = mxge_fw_unaligned;
sc->tx_boundary = 2048;
return 0;
}
/* clear confirmation addr */
confirm = (volatile uint32_t *)sc->cmd;
*confirm = 0;
wmb();
/* send a reload command to the bootstrap MCP, and wait for the
response in the confirmation address. The firmware should
write a -1 there to indicate it is alive and well
*/
dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr);
dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr);
buf[0] = htobe32(dma_high); /* confirm addr MSW */
buf[1] = htobe32(dma_low); /* confirm addr LSW */
buf[2] = htobe32(0xffffffff); /* confirm data */
/* FIX: All newest firmware should un-protect the bottom of
the sram before handoff. However, the very first interfaces
do not. Therefore the handoff copy must skip the first 8 bytes
*/
/* where the code starts*/
buf[3] = htobe32(MXGE_FW_OFFSET + 8);
buf[4] = htobe32(size - 8); /* length of code */
buf[5] = htobe32(8); /* where to copy to */
buf[6] = htobe32(0); /* where to jump to */
submit = (volatile char *)(sc->sram + MXGEFW_BOOT_HANDOFF);
mxge_pio_copy(submit, buf, 64);
wmb();
DELAY(1000);
wmb();
i = 0;
while (*confirm != 0xffffffff && i < 20) {
DELAY(1000*10);
i++;
bus_dmamap_sync(sc->cmd_dma.dmat,
sc->cmd_dma.map, BUS_DMASYNC_POSTREAD);
}
if (*confirm != 0xffffffff) {
device_printf(sc->dev,"handoff failed (%p = 0x%x)",
confirm, *confirm);
return ENXIO;
}
return 0;
}
static int
mxge_update_mac_address(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
uint8_t *addr = sc->mac_addr;
int status;
cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
| (addr[2] << 8) | addr[3]);
cmd.data1 = ((addr[4] << 8) | (addr[5]));
status = mxge_send_cmd(sc, MXGEFW_SET_MAC_ADDRESS, &cmd);
return status;
}
static int
mxge_change_pause(mxge_softc_t *sc, int pause)
{
mxge_cmd_t cmd;
int status;
if (pause)
status = mxge_send_cmd(sc, MXGEFW_ENABLE_FLOW_CONTROL,
&cmd);
else
status = mxge_send_cmd(sc, MXGEFW_DISABLE_FLOW_CONTROL,
&cmd);
if (status) {
device_printf(sc->dev, "Failed to set flow control mode\n");
return ENXIO;
}
sc->pause = pause;
return 0;
}
static void
mxge_change_promisc(mxge_softc_t *sc, int promisc)
{
mxge_cmd_t cmd;
int status;
if (mxge_always_promisc)
promisc = 1;
if (promisc)
status = mxge_send_cmd(sc, MXGEFW_ENABLE_PROMISC,
&cmd);
else
status = mxge_send_cmd(sc, MXGEFW_DISABLE_PROMISC,
&cmd);
if (status) {
device_printf(sc->dev, "Failed to set promisc mode\n");
}
}
struct mxge_add_maddr_ctx {
mxge_softc_t *sc;
int error;
};
static u_int
mxge_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct mxge_add_maddr_ctx *ctx = arg;
mxge_cmd_t cmd;
if (ctx->error != 0)
return (0);
bcopy(LLADDR(sdl), &cmd.data0, 4);
bcopy(LLADDR(sdl) + 4, &cmd.data1, 2);
cmd.data0 = htonl(cmd.data0);
cmd.data1 = htonl(cmd.data1);
ctx->error = mxge_send_cmd(ctx->sc, MXGEFW_JOIN_MULTICAST_GROUP, &cmd);
return (1);
}
static void
mxge_set_multicast_list(mxge_softc_t *sc)
{
struct mxge_add_maddr_ctx ctx;
if_t ifp = sc->ifp;
mxge_cmd_t cmd;
int err;
/* This firmware is known to not support multicast */
if (!sc->fw_multicast_support)
return;
/* Disable multicast filtering while we play with the lists*/
err = mxge_send_cmd(sc, MXGEFW_ENABLE_ALLMULTI, &cmd);
if (err != 0) {
device_printf(sc->dev, "Failed MXGEFW_ENABLE_ALLMULTI,"
" error status: %d\n", err);
return;
}
if (sc->adopted_rx_filter_bug)
return;
if (if_getflags(ifp) & IFF_ALLMULTI)
/* request to disable multicast filtering, so quit here */
return;
/* Flush all the filters */
err = mxge_send_cmd(sc, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, &cmd);
if (err != 0) {
device_printf(sc->dev,
"Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
", error status: %d\n", err);
return;
}
/* Walk the multicast list, and add each address */
ctx.sc = sc;
ctx.error = 0;
if_foreach_llmaddr(ifp, mxge_add_maddr, &ctx);
if (ctx.error != 0) {
device_printf(sc->dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, "
"error status:" "%d\t", ctx.error);
/* abort, leaving multicast filtering off */
return;
}
/* Enable multicast filtering */
err = mxge_send_cmd(sc, MXGEFW_DISABLE_ALLMULTI, &cmd);
if (err != 0) {
device_printf(sc->dev, "Failed MXGEFW_DISABLE_ALLMULTI"
", error status: %d\n", err);
}
}
static int
mxge_max_mtu(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
int status;
if (MJUMPAGESIZE - MXGEFW_PAD > MXGEFW_MAX_MTU)
return MXGEFW_MAX_MTU - MXGEFW_PAD;
/* try to set nbufs to see if it we can
use virtually contiguous jumbos */
cmd.data0 = 0;
status = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS,
&cmd);
if (status == 0)
return MXGEFW_MAX_MTU - MXGEFW_PAD;
/* otherwise, we're limited to MJUMPAGESIZE */
return MJUMPAGESIZE - MXGEFW_PAD;
}
static int
mxge_reset(mxge_softc_t *sc, int interrupts_setup)
{
struct mxge_slice_state *ss;
mxge_rx_done_t *rx_done;
volatile uint32_t *irq_claim;
mxge_cmd_t cmd;
int slice, status;
/* try to send a reset command to the card to see if it
is alive */
memset(&cmd, 0, sizeof (cmd));
status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd);
if (status != 0) {
device_printf(sc->dev, "failed reset\n");
return ENXIO;
}
mxge_dummy_rdma(sc, 1);
/* set the intrq size */
cmd.data0 = sc->rx_ring_size;
status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd);
/*
* Even though we already know how many slices are supported
* via mxge_slice_probe(), MXGEFW_CMD_GET_MAX_RSS_QUEUES
* has magic side effects, and must be called after a reset.
* It must be called prior to calling any RSS related cmds,
* including assigning an interrupt queue for anything but
* slice 0. It must also be called *after*
* MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
* the firmware to compute offsets.
*/
if (sc->num_slices > 1) {
/* ask the maximum number of slices it supports */
status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
&cmd);
if (status != 0) {
device_printf(sc->dev,
"failed to get number of slices\n");
return status;
}
/*
* MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
* to setting up the interrupt queue DMA
*/
cmd.data0 = sc->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = mxge_send_cmd(sc, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd);
if (status != 0) {
device_printf(sc->dev,
"failed to set number of slices\n");
return status;
}
}
if (interrupts_setup) {
/* Now exchange information about interrupts */
for (slice = 0; slice < sc->num_slices; slice++) {
rx_done = &sc->ss[slice].rx_done;
memset(rx_done->entry, 0, sc->rx_ring_size);
cmd.data0 = MXGE_LOWPART_TO_U32(rx_done->dma.bus_addr);
cmd.data1 = MXGE_HIGHPART_TO_U32(rx_done->dma.bus_addr);
cmd.data2 = slice;
status |= mxge_send_cmd(sc,
MXGEFW_CMD_SET_INTRQ_DMA,
&cmd);
}
}
status |= mxge_send_cmd(sc,
MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd);
sc->intr_coal_delay_ptr = (volatile uint32_t *)(sc->sram + cmd.data0);
status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd);
irq_claim = (volatile uint32_t *)(sc->sram + cmd.data0);
status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
&cmd);
sc->irq_deassert = (volatile uint32_t *)(sc->sram + cmd.data0);
if (status != 0) {
device_printf(sc->dev, "failed set interrupt parameters\n");
return status;
}
*sc->intr_coal_delay_ptr = htobe32(sc->intr_coal_delay);
/* run a DMA benchmark */
(void) mxge_dma_test(sc, MXGEFW_DMA_TEST);
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
ss->irq_claim = irq_claim + (2 * slice);
/* reset mcp/driver shared state back to 0 */
ss->rx_done.idx = 0;
ss->rx_done.cnt = 0;
ss->tx.req = 0;
ss->tx.done = 0;
ss->tx.pkt_done = 0;
ss->tx.queue_active = 0;
ss->tx.activate = 0;
ss->tx.deactivate = 0;
ss->tx.wake = 0;
ss->tx.defrag = 0;
ss->tx.stall = 0;
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->lc.lro_bad_csum = 0;
ss->lc.lro_queued = 0;
ss->lc.lro_flushed = 0;
if (ss->fw_stats != NULL) {
bzero(ss->fw_stats, sizeof *ss->fw_stats);
}
}
sc->rdma_tags_available = 15;
status = mxge_update_mac_address(sc);
mxge_change_promisc(sc, if_getflags(sc->ifp) & IFF_PROMISC);
mxge_change_pause(sc, sc->pause);
mxge_set_multicast_list(sc);
if (sc->throttle) {
cmd.data0 = sc->throttle;
if (mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR,
&cmd)) {
device_printf(sc->dev,
"can't enable throttle\n");
}
}
return status;
}
static int
mxge_change_throttle(SYSCTL_HANDLER_ARGS)
{
mxge_cmd_t cmd;
mxge_softc_t *sc;
int err;
unsigned int throttle;
sc = arg1;
throttle = sc->throttle;
err = sysctl_handle_int(oidp, &throttle, arg2, req);
if (err != 0) {
return err;
}
if (throttle == sc->throttle)
return 0;
if (throttle < MXGE_MIN_THROTTLE || throttle > MXGE_MAX_THROTTLE)
return EINVAL;
mtx_lock(&sc->driver_mtx);
cmd.data0 = throttle;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd);
if (err == 0)
sc->throttle = throttle;
mtx_unlock(&sc->driver_mtx);
return err;
}
static int
mxge_change_intr_coal(SYSCTL_HANDLER_ARGS)
{
mxge_softc_t *sc;
unsigned int intr_coal_delay;
int err;
sc = arg1;
intr_coal_delay = sc->intr_coal_delay;
err = sysctl_handle_int(oidp, &intr_coal_delay, arg2, req);
if (err != 0) {
return err;
}
if (intr_coal_delay == sc->intr_coal_delay)
return 0;
if (intr_coal_delay == 0 || intr_coal_delay > 1000*1000)
return EINVAL;
mtx_lock(&sc->driver_mtx);
*sc->intr_coal_delay_ptr = htobe32(intr_coal_delay);
sc->intr_coal_delay = intr_coal_delay;
mtx_unlock(&sc->driver_mtx);
return err;
}
static int
mxge_change_flow_control(SYSCTL_HANDLER_ARGS)
{
mxge_softc_t *sc;
unsigned int enabled;
int err;
sc = arg1;
enabled = sc->pause;
err = sysctl_handle_int(oidp, &enabled, arg2, req);
if (err != 0) {
return err;
}
if (enabled == sc->pause)
return 0;
mtx_lock(&sc->driver_mtx);
err = mxge_change_pause(sc, enabled);
mtx_unlock(&sc->driver_mtx);
return err;
}
static int
mxge_handle_be32(SYSCTL_HANDLER_ARGS)
{
int err;
if (arg1 == NULL)
return EFAULT;
arg2 = be32toh(*(int *)arg1);
arg1 = NULL;
err = sysctl_handle_int(oidp, arg1, arg2, req);
return err;
}
static void
mxge_rem_sysctls(mxge_softc_t *sc)
{
struct mxge_slice_state *ss;
int slice;
if (sc->slice_sysctl_tree == NULL)
return;
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
if (ss == NULL || ss->sysctl_tree == NULL)
continue;
sysctl_ctx_free(&ss->sysctl_ctx);
ss->sysctl_tree = NULL;
}
sysctl_ctx_free(&sc->slice_sysctl_ctx);
sc->slice_sysctl_tree = NULL;
}
static void
mxge_add_sysctls(mxge_softc_t *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
mcp_irq_data_t *fw;
struct mxge_slice_state *ss;
int slice;
char slice_num[8];
ctx = device_get_sysctl_ctx(sc->dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
fw = sc->ss[0].fw_stats;
/* random information */
SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
"firmware_version",
CTLFLAG_RD, sc->fw_version,
0, "firmware version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
"serial_number",
CTLFLAG_RD, sc->serial_number_string,
0, "serial number");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
"product_code",
CTLFLAG_RD, sc->product_code_string,
0, "product_code");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"pcie_link_width",
CTLFLAG_RD, &sc->link_width,
0, "tx_boundary");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_boundary",
CTLFLAG_RD, &sc->tx_boundary,
0, "tx_boundary");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"write_combine",
CTLFLAG_RD, &sc->wc,
0, "write combining PIO?");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"read_dma_MBs",
CTLFLAG_RD, &sc->read_dma,
0, "DMA Read speed in MB/s");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"write_dma_MBs",
CTLFLAG_RD, &sc->write_dma,
0, "DMA Write speed in MB/s");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"read_write_dma_MBs",
CTLFLAG_RD, &sc->read_write_dma,
0, "DMA concurrent Read/Write speed in MB/s");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"watchdog_resets",
CTLFLAG_RD, &sc->watchdog_resets,
0, "Number of times NIC was reset");
/* performance related tunables */
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"intr_coal_delay", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
sc, 0, mxge_change_intr_coal, "I",
"interrupt coalescing delay in usecs");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
mxge_change_throttle, "I", "transmit throttling");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"flow_control_enabled",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
mxge_change_flow_control, "I",
"interrupt coalescing delay in usecs");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"deassert_wait",
CTLFLAG_RW, &mxge_deassert_wait,
0, "Wait for IRQ line to go low in ihandler");
/* stats block from firmware is in network byte order.
Need to swap it */
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"link_up", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->link_up, 0, mxge_handle_be32, "I", "link up");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"rdma_tags_available", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->rdma_tags_available, 0, mxge_handle_be32, "I",
"rdma_tags_available");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_bad_crc32", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_bad_crc32, 0, mxge_handle_be32, "I",
"dropped_bad_crc32");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_bad_phy", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_bad_phy, 0, mxge_handle_be32, "I", "dropped_bad_phy");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_link_error_or_filtered",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_link_error_or_filtered, 0, mxge_handle_be32, "I",
"dropped_link_error_or_filtered");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_link_overflow",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_link_overflow, 0, mxge_handle_be32, "I",
"dropped_link_overflow");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_multicast_filtered",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_multicast_filtered, 0, mxge_handle_be32, "I",
"dropped_multicast_filtered");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_no_big_buffer",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_no_big_buffer, 0, mxge_handle_be32, "I",
"dropped_no_big_buffer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_no_small_buffer",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_no_small_buffer, 0, mxge_handle_be32, "I",
"dropped_no_small_buffer");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_overrun",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_overrun, 0, mxge_handle_be32, "I",
"dropped_overrun");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_pause", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_pause, 0, mxge_handle_be32, "I", "dropped_pause");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_runt", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_runt, 0, mxge_handle_be32, "I", "dropped_runt");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dropped_unicast_filtered",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
&fw->dropped_unicast_filtered, 0, mxge_handle_be32, "I",
"dropped_unicast_filtered");
/* verbose printing? */
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"verbose",
CTLFLAG_RW, &mxge_verbose,
0, "verbose printing");
/* add counters exported for debugging from all slices */
sysctl_ctx_init(&sc->slice_sysctl_ctx);
sc->slice_sysctl_tree =
SYSCTL_ADD_NODE(&sc->slice_sysctl_ctx, children, OID_AUTO,
"slice", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
sysctl_ctx_init(&ss->sysctl_ctx);
ctx = &ss->sysctl_ctx;
children = SYSCTL_CHILDREN(sc->slice_sysctl_tree);
sprintf(slice_num, "%d", slice);
ss->sysctl_tree =
SYSCTL_ADD_NODE(ctx, children, OID_AUTO, slice_num,
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
children = SYSCTL_CHILDREN(ss->sysctl_tree);
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"rx_small_cnt",
CTLFLAG_RD, &ss->rx_small.cnt,
0, "rx_small_cnt");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"rx_big_cnt",
CTLFLAG_RD, &ss->rx_big.cnt,
0, "rx_small_cnt");
SYSCTL_ADD_U64(ctx, children, OID_AUTO,
"lro_flushed", CTLFLAG_RD, &ss->lc.lro_flushed,
0, "number of lro merge queues flushed");
SYSCTL_ADD_U64(ctx, children, OID_AUTO,
"lro_bad_csum", CTLFLAG_RD, &ss->lc.lro_bad_csum,
0, "number of bad csums preventing LRO");
SYSCTL_ADD_U64(ctx, children, OID_AUTO,
"lro_queued", CTLFLAG_RD, &ss->lc.lro_queued,
0, "number of frames appended to lro merge"
"queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_req",
CTLFLAG_RD, &ss->tx.req,
0, "tx_req");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_done",
CTLFLAG_RD, &ss->tx.done,
0, "tx_done");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_pkt_done",
CTLFLAG_RD, &ss->tx.pkt_done,
0, "tx_done");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_stall",
CTLFLAG_RD, &ss->tx.stall,
0, "tx_stall");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_wake",
CTLFLAG_RD, &ss->tx.wake,
0, "tx_wake");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_defrag",
CTLFLAG_RD, &ss->tx.defrag,
0, "tx_defrag");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_queue_active",
CTLFLAG_RD, &ss->tx.queue_active,
0, "tx_queue_active");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_activate",
CTLFLAG_RD, &ss->tx.activate,
0, "tx_activate");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_deactivate",
CTLFLAG_RD, &ss->tx.deactivate,
0, "tx_deactivate");
}
}
/* copy an array of mcp_kreq_ether_send_t's to the mcp. Copy
backwards one at a time and handle ring wraps */
static inline void
mxge_submit_req_backwards(mxge_tx_ring_t *tx,
mcp_kreq_ether_send_t *src, int cnt)
{
int idx, starting_slot;
starting_slot = tx->req;
while (cnt > 1) {
cnt--;
idx = (starting_slot + cnt) & tx->mask;
mxge_pio_copy(&tx->lanai[idx],
&src[cnt], sizeof(*src));
wmb();
}
}
/*
* copy an array of mcp_kreq_ether_send_t's to the mcp. Copy
* at most 32 bytes at a time, so as to avoid involving the software
* pio handler in the nic. We re-write the first segment's flags
* to mark them valid only after writing the entire chain
*/
static inline void
mxge_submit_req(mxge_tx_ring_t *tx, mcp_kreq_ether_send_t *src,
int cnt)
{
int idx, i;
uint32_t *src_ints;
volatile uint32_t *dst_ints;
mcp_kreq_ether_send_t *srcp;
volatile mcp_kreq_ether_send_t *dstp, *dst;
uint8_t last_flags;
idx = tx->req & tx->mask;
last_flags = src->flags;
src->flags = 0;
wmb();
dst = dstp = &tx->lanai[idx];
srcp = src;
if ((idx + cnt) < tx->mask) {
for (i = 0; i < (cnt - 1); i += 2) {
mxge_pio_copy(dstp, srcp, 2 * sizeof(*src));
wmb(); /* force write every 32 bytes */
srcp += 2;
dstp += 2;
}
} else {
/* submit all but the first request, and ensure
that it is submitted below */
mxge_submit_req_backwards(tx, src, cnt);
i = 0;
}
if (i < cnt) {
/* submit the first request */
mxge_pio_copy(dstp, srcp, sizeof(*src));
wmb(); /* barrier before setting valid flag */
}
/* re-write the last 32-bits with the valid flags */
src->flags = last_flags;
src_ints = (uint32_t *)src;
src_ints+=3;
dst_ints = (volatile uint32_t *)dst;
dst_ints+=3;
*dst_ints = *src_ints;
tx->req += cnt;
wmb();
}
static int
mxge_parse_tx(struct mxge_slice_state *ss, struct mbuf *m,
struct mxge_pkt_info *pi)
{
struct ether_vlan_header *eh;
uint16_t etype;
int tso = m->m_pkthdr.csum_flags & (CSUM_TSO);
#if IFCAP_TSO6 && defined(INET6)
int nxt;
#endif
eh = mtod(m, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
etype = ntohs(eh->evl_proto);
pi->ip_off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
etype = ntohs(eh->evl_encap_proto);
pi->ip_off = ETHER_HDR_LEN;
}
switch (etype) {
case ETHERTYPE_IP:
/*
* ensure ip header is in first mbuf, copy it to a
* scratch buffer if not
*/
pi->ip = (struct ip *)(m->m_data + pi->ip_off);
pi->ip6 = NULL;
if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip))) {
m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip),
ss->scratch);
pi->ip = (struct ip *)(ss->scratch + pi->ip_off);
}
pi->ip_hlen = pi->ip->ip_hl << 2;
if (!tso)
return 0;
if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr))) {
m_copydata(m, 0, pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr), ss->scratch);
pi->ip = (struct ip *)(ss->scratch + pi->ip_off);
}
pi->tcp = (struct tcphdr *)((char *)pi->ip + pi->ip_hlen);
break;
#if IFCAP_TSO6 && defined(INET6)
case ETHERTYPE_IPV6:
pi->ip6 = (struct ip6_hdr *)(m->m_data + pi->ip_off);
if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip6))) {
m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip6),
ss->scratch);
pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off);
}
nxt = 0;
pi->ip_hlen = ip6_lasthdr(m, pi->ip_off, IPPROTO_IPV6, &nxt);
pi->ip_hlen -= pi->ip_off;
if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP)
return EINVAL;
if (!tso)
return 0;
if (pi->ip_off + pi->ip_hlen > ss->sc->max_tso6_hlen)
return EINVAL;
if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr))) {
m_copydata(m, 0, pi->ip_off + pi->ip_hlen +
sizeof(struct tcphdr), ss->scratch);
pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off);
}
pi->tcp = (struct tcphdr *)((char *)pi->ip6 + pi->ip_hlen);
break;
#endif
default:
return EINVAL;
}
return 0;
}
#if IFCAP_TSO4
static void
mxge_encap_tso(struct mxge_slice_state *ss, struct mbuf *m,
int busdma_seg_cnt, struct mxge_pkt_info *pi)
{
mxge_tx_ring_t *tx;
mcp_kreq_ether_send_t *req;
bus_dma_segment_t *seg;
uint32_t low, high_swapped;
int len, seglen, cum_len, cum_len_next;
int next_is_first, chop, cnt, rdma_count, small;
uint16_t pseudo_hdr_offset, cksum_offset, mss, sum;
uint8_t flags, flags_next;
static int once;
mss = m->m_pkthdr.tso_segsz;
/* negative cum_len signifies to the
* send loop that we are still in the
* header portion of the TSO packet.
*/
cksum_offset = pi->ip_off + pi->ip_hlen;
cum_len = -(cksum_offset + (pi->tcp->th_off << 2));
/* TSO implies checksum offload on this hardware */
if (__predict_false((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) == 0)) {
/*
* If packet has full TCP csum, replace it with pseudo hdr
* sum that the NIC expects, otherwise the NIC will emit
* packets with bad TCP checksums.
*/
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
if (pi->ip6) {
#if (CSUM_TCP_IPV6 != 0) && defined(INET6)
m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6;
sum = in6_cksum_pseudo(pi->ip6,
m->m_pkthdr.len - cksum_offset,
IPPROTO_TCP, 0);
#endif
} else {
#ifdef INET
m->m_pkthdr.csum_flags |= CSUM_TCP;
sum = in_pseudo(pi->ip->ip_src.s_addr,
pi->ip->ip_dst.s_addr,
htons(IPPROTO_TCP + (m->m_pkthdr.len -
cksum_offset)));
#endif
}
m_copyback(m, offsetof(struct tcphdr, th_sum) +
cksum_offset, sizeof(sum), (caddr_t)&sum);
}
flags = MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST;
/* for TSO, pseudo_hdr_offset holds mss.
* The firmware figures out where to put
* the checksum by parsing the header. */
pseudo_hdr_offset = htobe16(mss);
if (pi->ip6) {
/*
* for IPv6 TSO, the "checksum offset" is re-purposed
* to store the TCP header len
*/
cksum_offset = (pi->tcp->th_off << 2);
}
tx = &ss->tx;
req = tx->req_list;
seg = tx->seg_list;
cnt = 0;
rdma_count = 0;
/* "rdma_count" is the number of RDMAs belonging to the
* current packet BEFORE the current send request. For
* non-TSO packets, this is equal to "count".
* For TSO packets, rdma_count needs to be reset
* to 0 after a segment cut.
*
* The rdma_count field of the send request is
* the number of RDMAs of the packet starting at
* that request. For TSO send requests with one ore more cuts
* in the middle, this is the number of RDMAs starting
* after the last cut in the request. All previous
* segments before the last cut implicitly have 1 RDMA.
*
* Since the number of RDMAs is not known beforehand,
* it must be filled-in retroactively - after each
* segmentation cut or at the end of the entire packet.
*/
while (busdma_seg_cnt) {
/* Break the busdma segment up into pieces*/
low = MXGE_LOWPART_TO_U32(seg->ds_addr);
high_swapped = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr));
len = seg->ds_len;
while (len) {
flags_next = flags & ~MXGEFW_FLAGS_FIRST;
seglen = len;
cum_len_next = cum_len + seglen;
(req-rdma_count)->rdma_count = rdma_count + 1;
if (__predict_true(cum_len >= 0)) {
/* payload */
chop = (cum_len_next > mss);
cum_len_next = cum_len_next % mss;
next_is_first = (cum_len_next == 0);
flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
flags_next |= next_is_first *
MXGEFW_FLAGS_FIRST;
rdma_count |= -(chop | next_is_first);
rdma_count += chop & !next_is_first;
} else if (cum_len_next >= 0) {
/* header ends */
rdma_count = -1;
cum_len_next = 0;
seglen = -cum_len;
small = (mss <= MXGEFW_SEND_SMALL_SIZE);
flags_next = MXGEFW_FLAGS_TSO_PLD |
MXGEFW_FLAGS_FIRST |
(small * MXGEFW_FLAGS_SMALL);
}
req->addr_high = high_swapped;
req->addr_low = htobe32(low);
req->pseudo_hdr_offset = pseudo_hdr_offset;
req->pad = 0;
req->rdma_count = 1;
req->length = htobe16(seglen);
req->cksum_offset = cksum_offset;
req->flags = flags | ((cum_len & 1) *
MXGEFW_FLAGS_ALIGN_ODD);
low += seglen;
len -= seglen;
cum_len = cum_len_next;
flags = flags_next;
req++;
cnt++;
rdma_count++;
if (cksum_offset != 0 && !pi->ip6) {
if (__predict_false(cksum_offset > seglen))
cksum_offset -= seglen;
else
cksum_offset = 0;
}
if (__predict_false(cnt > tx->max_desc))
goto drop;
}
busdma_seg_cnt--;
seg++;
}
(req-rdma_count)->rdma_count = rdma_count;
do {
req--;
req->flags |= MXGEFW_FLAGS_TSO_LAST;
} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | MXGEFW_FLAGS_FIRST)));
tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1;
mxge_submit_req(tx, tx->req_list, cnt);
if ((ss->sc->num_slices > 1) && tx->queue_active == 0) {
/* tell the NIC to start polling this slice */
*tx->send_go = 1;
tx->queue_active = 1;
tx->activate++;
wmb();
}
return;
drop:
bus_dmamap_unload(tx->dmat, tx->info[tx->req & tx->mask].map);
m_freem(m);
ss->oerrors++;
if (!once) {
printf("tx->max_desc exceeded via TSO!\n");
printf("mss = %d, %ld, %d!\n", mss,
(long)seg - (long)tx->seg_list, tx->max_desc);
once = 1;
}
return;
}
#endif /* IFCAP_TSO4 */
#ifdef MXGE_NEW_VLAN_API
/*
* We reproduce the software vlan tag insertion from
* net/if_vlan.c:vlan_start() here so that we can advertise "hardware"
* vlan tag insertion. We need to advertise this in order to have the
* vlan interface respect our csum offload flags.
*/
static struct mbuf *
mxge_vlan_tag_insert(struct mbuf *m)
{
struct ether_vlan_header *evl;
M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
if (__predict_false(m == NULL))
return NULL;
if (m->m_len < sizeof(*evl)) {
m = m_pullup(m, sizeof(*evl));
if (__predict_false(m == NULL))
return NULL;
}
/*
* Transform the Ethernet header into an Ethernet header
* with 802.1Q encapsulation.
*/
evl = mtod(m, struct ether_vlan_header *);
bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
(char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
evl->evl_tag = htons(m->m_pkthdr.ether_vtag);
m->m_flags &= ~M_VLANTAG;
return m;
}
#endif /* MXGE_NEW_VLAN_API */
static void
mxge_encap(struct mxge_slice_state *ss, struct mbuf *m)
{
struct mxge_pkt_info pi = {0,0,0,0};
mxge_softc_t *sc;
mcp_kreq_ether_send_t *req;
bus_dma_segment_t *seg;
struct mbuf *m_tmp;
mxge_tx_ring_t *tx;
int cnt, cum_len, err, i, idx, odd_flag;
uint16_t pseudo_hdr_offset;
uint8_t flags, cksum_offset;
sc = ss->sc;
tx = &ss->tx;
#ifdef MXGE_NEW_VLAN_API
if (m->m_flags & M_VLANTAG) {
m = mxge_vlan_tag_insert(m);
if (__predict_false(m == NULL))
goto drop_without_m;
}
#endif
if (m->m_pkthdr.csum_flags &
(CSUM_TSO | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) {
if (mxge_parse_tx(ss, m, &pi))
goto drop;
}
/* (try to) map the frame for DMA */
idx = tx->req & tx->mask;
err = bus_dmamap_load_mbuf_sg(tx->dmat, tx->info[idx].map,
m, tx->seg_list, &cnt,
BUS_DMA_NOWAIT);
if (__predict_false(err == EFBIG)) {
/* Too many segments in the chain. Try
to defrag */
m_tmp = m_defrag(m, M_NOWAIT);
if (m_tmp == NULL) {
goto drop;
}
ss->tx.defrag++;
m = m_tmp;
err = bus_dmamap_load_mbuf_sg(tx->dmat,
tx->info[idx].map,
m, tx->seg_list, &cnt,
BUS_DMA_NOWAIT);
}
if (__predict_false(err != 0)) {
device_printf(sc->dev, "bus_dmamap_load_mbuf_sg returned %d"
" packet len = %d\n", err, m->m_pkthdr.len);
goto drop;
}
bus_dmamap_sync(tx->dmat, tx->info[idx].map,
BUS_DMASYNC_PREWRITE);
tx->info[idx].m = m;
#if IFCAP_TSO4
/* TSO is different enough, we handle it in another routine */
if (m->m_pkthdr.csum_flags & (CSUM_TSO)) {
mxge_encap_tso(ss, m, cnt, &pi);
return;
}
#endif
req = tx->req_list;
cksum_offset = 0;
pseudo_hdr_offset = 0;
flags = MXGEFW_FLAGS_NO_TSO;
/* checksum offloading? */
if (m->m_pkthdr.csum_flags &
(CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) {
/* ensure ip header is in first mbuf, copy
it to a scratch buffer if not */
cksum_offset = pi.ip_off + pi.ip_hlen;
pseudo_hdr_offset = cksum_offset + m->m_pkthdr.csum_data;
pseudo_hdr_offset = htobe16(pseudo_hdr_offset);
req->cksum_offset = cksum_offset;
flags |= MXGEFW_FLAGS_CKSUM;
odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
} else {
odd_flag = 0;
}
if (m->m_pkthdr.len < MXGEFW_SEND_SMALL_SIZE)
flags |= MXGEFW_FLAGS_SMALL;
/* convert segments into a request list */
cum_len = 0;
seg = tx->seg_list;
req->flags = MXGEFW_FLAGS_FIRST;
for (i = 0; i < cnt; i++) {
req->addr_low =
htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr));
req->addr_high =
htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr));
req->length = htobe16(seg->ds_len);
req->cksum_offset = cksum_offset;
if (cksum_offset > seg->ds_len)
cksum_offset -= seg->ds_len;
else
cksum_offset = 0;
req->pseudo_hdr_offset = pseudo_hdr_offset;
req->pad = 0; /* complete solid 16-byte block */
req->rdma_count = 1;
req->flags |= flags | ((cum_len & 1) * odd_flag);
cum_len += seg->ds_len;
seg++;
req++;
req->flags = 0;
}
req--;
/* pad runts to 60 bytes */
if (cum_len < 60) {
req++;
req->addr_low =
htobe32(MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr));
req->addr_high =
htobe32(MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr));
req->length = htobe16(60 - cum_len);
req->cksum_offset = 0;
req->pseudo_hdr_offset = pseudo_hdr_offset;
req->pad = 0; /* complete solid 16-byte block */
req->rdma_count = 1;
req->flags |= flags | ((cum_len & 1) * odd_flag);
cnt++;
}
tx->req_list[0].rdma_count = cnt;
#if 0
/* print what the firmware will see */
for (i = 0; i < cnt; i++) {
printf("%d: addr: 0x%x 0x%x len:%d pso%d,"
"cso:%d, flags:0x%x, rdma:%d\n",
i, (int)ntohl(tx->req_list[i].addr_high),
(int)ntohl(tx->req_list[i].addr_low),
(int)ntohs(tx->req_list[i].length),
(int)ntohs(tx->req_list[i].pseudo_hdr_offset),
tx->req_list[i].cksum_offset, tx->req_list[i].flags,
tx->req_list[i].rdma_count);
}
printf("--------------\n");
#endif
tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1;
mxge_submit_req(tx, tx->req_list, cnt);
if ((ss->sc->num_slices > 1) && tx->queue_active == 0) {
/* tell the NIC to start polling this slice */
*tx->send_go = 1;
tx->queue_active = 1;
tx->activate++;
wmb();
}
return;
drop:
m_freem(m);
drop_without_m:
ss->oerrors++;
return;
}
static void
mxge_qflush(if_t ifp)
{
mxge_softc_t *sc = if_getsoftc(ifp);
mxge_tx_ring_t *tx;
struct mbuf *m;
int slice;
for (slice = 0; slice < sc->num_slices; slice++) {
tx = &sc->ss[slice].tx;
mtx_lock(&tx->mtx);
while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
m_freem(m);
mtx_unlock(&tx->mtx);
}
if_qflush(ifp);
}
static inline void
mxge_start_locked(struct mxge_slice_state *ss)
{
mxge_softc_t *sc;
struct mbuf *m;
if_t ifp;
mxge_tx_ring_t *tx;
sc = ss->sc;
ifp = sc->ifp;
tx = &ss->tx;
while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) {
m = drbr_dequeue(ifp, tx->br);
if (m == NULL) {
return;
}
/* let BPF see it */
BPF_MTAP(ifp, m);
/* give it to the nic */
mxge_encap(ss, m);
}
/* ran out of transmit slots */
if (((ss->if_drv_flags & IFF_DRV_OACTIVE) == 0)
&& (!drbr_empty(ifp, tx->br))) {
ss->if_drv_flags |= IFF_DRV_OACTIVE;
tx->stall++;
}
}
static int
mxge_transmit_locked(struct mxge_slice_state *ss, struct mbuf *m)
{
mxge_softc_t *sc;
if_t ifp;
mxge_tx_ring_t *tx;
int err;
sc = ss->sc;
ifp = sc->ifp;
tx = &ss->tx;
if ((ss->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
err = drbr_enqueue(ifp, tx->br, m);
return (err);
}
if (!drbr_needs_enqueue(ifp, tx->br) &&
((tx->mask - (tx->req - tx->done)) > tx->max_desc)) {
/* let BPF see it */
BPF_MTAP(ifp, m);
/* give it to the nic */
mxge_encap(ss, m);
} else if ((err = drbr_enqueue(ifp, tx->br, m)) != 0) {
return (err);
}
if (!drbr_empty(ifp, tx->br))
mxge_start_locked(ss);
return (0);
}
static int
mxge_transmit(if_t ifp, struct mbuf *m)
{
mxge_softc_t *sc = if_getsoftc(ifp);
struct mxge_slice_state *ss;
mxge_tx_ring_t *tx;
int err = 0;
int slice;
slice = m->m_pkthdr.flowid;
slice &= (sc->num_slices - 1); /* num_slices always power of 2 */
ss = &sc->ss[slice];
tx = &ss->tx;
if (mtx_trylock(&tx->mtx)) {
err = mxge_transmit_locked(ss, m);
mtx_unlock(&tx->mtx);
} else {
err = drbr_enqueue(ifp, tx->br, m);
}
return (err);
}
static void
mxge_start(if_t ifp)
{
mxge_softc_t *sc = if_getsoftc(ifp);
struct mxge_slice_state *ss;
/* only use the first slice for now */
ss = &sc->ss[0];
mtx_lock(&ss->tx.mtx);
mxge_start_locked(ss);
mtx_unlock(&ss->tx.mtx);
}
/*
* copy an array of mcp_kreq_ether_recv_t's to the mcp. Copy
* at most 32 bytes at a time, so as to avoid involving the software
* pio handler in the nic. We re-write the first segment's low
* DMA address to mark it valid only after we write the entire chunk
* in a burst
*/
static inline void
mxge_submit_8rx(volatile mcp_kreq_ether_recv_t *dst,
mcp_kreq_ether_recv_t *src)
{
uint32_t low;
low = src->addr_low;
src->addr_low = 0xffffffff;
mxge_pio_copy(dst, src, 4 * sizeof (*src));
wmb();
mxge_pio_copy(dst + 4, src + 4, 4 * sizeof (*src));
wmb();
src->addr_low = low;
dst->addr_low = low;
wmb();
}
static int
mxge_get_buf_small(struct mxge_slice_state *ss, bus_dmamap_t map, int idx)
{
bus_dma_segment_t seg;
struct mbuf *m;
mxge_rx_ring_t *rx = &ss->rx_small;
int cnt, err;
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
rx->alloc_fail++;
err = ENOBUFS;
goto done;
}
m->m_len = MHLEN;
err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m,
&seg, &cnt, BUS_DMA_NOWAIT);
if (err != 0) {
m_free(m);
goto done;
}
rx->info[idx].m = m;
rx->shadow[idx].addr_low =
htobe32(MXGE_LOWPART_TO_U32(seg.ds_addr));
rx->shadow[idx].addr_high =
htobe32(MXGE_HIGHPART_TO_U32(seg.ds_addr));
done:
if ((idx & 7) == 7)
mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]);
return err;
}
static int
mxge_get_buf_big(struct mxge_slice_state *ss, bus_dmamap_t map, int idx)
{
bus_dma_segment_t seg[3];
struct mbuf *m;
mxge_rx_ring_t *rx = &ss->rx_big;
int cnt, err, i;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx->cl_size);
if (m == NULL) {
rx->alloc_fail++;
err = ENOBUFS;
goto done;
}
m->m_len = rx->mlen;
err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m,
seg, &cnt, BUS_DMA_NOWAIT);
if (err != 0) {
m_free(m);
goto done;
}
rx->info[idx].m = m;
rx->shadow[idx].addr_low =
htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr));
rx->shadow[idx].addr_high =
htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr));
done:
for (i = 0; i < rx->nbufs; i++) {
if ((idx & 7) == 7) {
mxge_submit_8rx(&rx->lanai[idx - 7],
&rx->shadow[idx - 7]);
}
idx++;
}
return err;
}
#ifdef INET6
static uint16_t
mxge_csum_generic(uint16_t *raw, int len)
{
uint32_t csum;
csum = 0;
while (len > 0) {
csum += *raw;
raw++;
len -= 2;
}
csum = (csum >> 16) + (csum & 0xffff);
csum = (csum >> 16) + (csum & 0xffff);
return (uint16_t)csum;
}
static inline uint16_t
mxge_rx_csum6(void *p, struct mbuf *m, uint32_t csum)
{
uint32_t partial;
int nxt, cksum_offset;
struct ip6_hdr *ip6 = p;
uint16_t c;
nxt = ip6->ip6_nxt;
cksum_offset = sizeof (*ip6) + ETHER_HDR_LEN;
if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) {
cksum_offset = ip6_lasthdr(m, ETHER_HDR_LEN,
IPPROTO_IPV6, &nxt);
if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP)
return (1);
}
/*
* IPv6 headers do not contain a checksum, and hence
* do not checksum to zero, so they don't "fall out"
* of the partial checksum calculation like IPv4
* headers do. We need to fix the partial checksum by
* subtracting the checksum of the IPv6 header.
*/
partial = mxge_csum_generic((uint16_t *)ip6, cksum_offset -
ETHER_HDR_LEN);
csum += ~partial;
csum += (csum < ~partial);
csum = (csum >> 16) + (csum & 0xFFFF);
csum = (csum >> 16) + (csum & 0xFFFF);
c = in6_cksum_pseudo(ip6, m->m_pkthdr.len - cksum_offset, nxt,
csum);
c ^= 0xffff;
return (c);
}
#endif /* INET6 */
/*
* Myri10GE hardware checksums are not valid if the sender
* padded the frame with non-zero padding. This is because
* the firmware just does a simple 16-bit 1s complement
* checksum across the entire frame, excluding the first 14
* bytes. It is best to simply to check the checksum and
* tell the stack about it only if the checksum is good
*/
static inline uint16_t
mxge_rx_csum(struct mbuf *m, int csum)
{
struct ether_header *eh;
#ifdef INET
struct ip *ip;
#endif
#if defined(INET) || defined(INET6)
int cap = if_getcapenable(m->m_pkthdr.rcvif);
#endif
uint16_t c, etype;
eh = mtod(m, struct ether_header *);
etype = ntohs(eh->ether_type);
switch (etype) {
#ifdef INET
case ETHERTYPE_IP:
if ((cap & IFCAP_RXCSUM) == 0)
return (1);
ip = (struct ip *)(eh + 1);
if (ip->ip_p != IPPROTO_TCP && ip->ip_p != IPPROTO_UDP)
return (1);
c = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
htonl(ntohs(csum) + ntohs(ip->ip_len) -
(ip->ip_hl << 2) + ip->ip_p));
c ^= 0xffff;
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
if ((cap & IFCAP_RXCSUM_IPV6) == 0)
return (1);
c = mxge_rx_csum6((eh + 1), m, csum);
break;
#endif
default:
c = 1;
}
return (c);
}
static void
mxge_vlan_tag_remove(struct mbuf *m, uint32_t *csum)
{
struct ether_vlan_header *evl;
uint32_t partial;
evl = mtod(m, struct ether_vlan_header *);
/*
* fix checksum by subtracting ETHER_VLAN_ENCAP_LEN bytes
* after what the firmware thought was the end of the ethernet
* header.
*/
/* put checksum into host byte order */
*csum = ntohs(*csum);
partial = ntohl(*(uint32_t *)(mtod(m, char *) + ETHER_HDR_LEN));
(*csum) += ~partial;
(*csum) += ((*csum) < ~partial);
(*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF);
(*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF);
/* restore checksum to network byte order;
later consumers expect this */
*csum = htons(*csum);
/* save the tag */
#ifdef MXGE_NEW_VLAN_API
m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
#else
{
struct m_tag *mtag;
mtag = m_tag_alloc(MTAG_VLAN, MTAG_VLAN_TAG, sizeof(u_int),
M_NOWAIT);
if (mtag == NULL)
return;
VLAN_TAG_VALUE(mtag) = ntohs(evl->evl_tag);
m_tag_prepend(m, mtag);
}
#endif
m->m_flags |= M_VLANTAG;
/*
* Remove the 802.1q header by copying the Ethernet
* addresses over it and adjusting the beginning of
* the data in the mbuf. The encapsulated Ethernet
* type field is already in place.
*/
bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
}
static inline void
mxge_rx_done_big(struct mxge_slice_state *ss, uint32_t len,
uint32_t csum, int lro)
{
mxge_softc_t *sc;
if_t ifp;
struct mbuf *m;
struct ether_header *eh;
mxge_rx_ring_t *rx;
bus_dmamap_t old_map;
int idx;
sc = ss->sc;
ifp = sc->ifp;
rx = &ss->rx_big;
idx = rx->cnt & rx->mask;
rx->cnt += rx->nbufs;
/* save a pointer to the received mbuf */
m = rx->info[idx].m;
/* try to replace the received mbuf */
if (mxge_get_buf_big(ss, rx->extra_map, idx)) {
/* drop the frame -- the old mbuf is re-cycled */
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
/* unmap the received buffer */
old_map = rx->info[idx].map;
bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rx->dmat, old_map);
/* swap the bus_dmamap_t's */
rx->info[idx].map = rx->extra_map;
rx->extra_map = old_map;
/* mcp implicitly skips 1st 2 bytes so that packet is properly
* aligned */
m->m_data += MXGEFW_PAD;
m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = len;
ss->ipackets++;
eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
mxge_vlan_tag_remove(m, &csum);
}
/* flowid only valid if RSS hashing is enabled */
if (sc->num_slices > 1) {
m->m_pkthdr.flowid = (ss - sc->ss);
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
}
/* if the checksum is valid, mark it in the mbuf header */
if ((if_getcapenable(ifp) & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) &&
(0 == mxge_rx_csum(m, csum))) {
/* Tell the stack that the checksum is good */
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR |
CSUM_DATA_VALID;
#if defined(INET) || defined (INET6)
if (lro && (0 == tcp_lro_rx(&ss->lc, m, 0)))
return;
#endif
}
/* pass the frame up the stack */
if_input(ifp, m);
}
static inline void
mxge_rx_done_small(struct mxge_slice_state *ss, uint32_t len,
uint32_t csum, int lro)
{
mxge_softc_t *sc;
if_t ifp;
struct ether_header *eh;
struct mbuf *m;
mxge_rx_ring_t *rx;
bus_dmamap_t old_map;
int idx;
sc = ss->sc;
ifp = sc->ifp;
rx = &ss->rx_small;
idx = rx->cnt & rx->mask;
rx->cnt++;
/* save a pointer to the received mbuf */
m = rx->info[idx].m;
/* try to replace the received mbuf */
if (mxge_get_buf_small(ss, rx->extra_map, idx)) {
/* drop the frame -- the old mbuf is re-cycled */
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
/* unmap the received buffer */
old_map = rx->info[idx].map;
bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rx->dmat, old_map);
/* swap the bus_dmamap_t's */
rx->info[idx].map = rx->extra_map;
rx->extra_map = old_map;
/* mcp implicitly skips 1st 2 bytes so that packet is properly
* aligned */
m->m_data += MXGEFW_PAD;
m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = len;
ss->ipackets++;
eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
mxge_vlan_tag_remove(m, &csum);
}
/* flowid only valid if RSS hashing is enabled */
if (sc->num_slices > 1) {
m->m_pkthdr.flowid = (ss - sc->ss);
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
}
/* if the checksum is valid, mark it in the mbuf header */
if ((if_getcapenable(ifp) & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) &&
(0 == mxge_rx_csum(m, csum))) {
/* Tell the stack that the checksum is good */
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR |
CSUM_DATA_VALID;
#if defined(INET) || defined (INET6)
if (lro && (0 == tcp_lro_rx(&ss->lc, m, csum)))
return;
#endif
}
/* pass the frame up the stack */
if_input(ifp, m);
}
static inline void
mxge_clean_rx_done(struct mxge_slice_state *ss)
{
mxge_rx_done_t *rx_done = &ss->rx_done;
int limit = 0;
uint16_t length;
uint16_t checksum;
int lro;
lro = if_getcapenable(ss->sc->ifp) & IFCAP_LRO;
while (rx_done->entry[rx_done->idx].length != 0) {
length = ntohs(rx_done->entry[rx_done->idx].length);
rx_done->entry[rx_done->idx].length = 0;
checksum = rx_done->entry[rx_done->idx].checksum;
if (length <= (MHLEN - MXGEFW_PAD))
mxge_rx_done_small(ss, length, checksum, lro);
else
mxge_rx_done_big(ss, length, checksum, lro);
rx_done->cnt++;
rx_done->idx = rx_done->cnt & rx_done->mask;
/* limit potential for livelock */
if (__predict_false(++limit > rx_done->mask / 2))
break;
}
#if defined(INET) || defined (INET6)
tcp_lro_flush_all(&ss->lc);
#endif
}
static inline void
mxge_tx_done(struct mxge_slice_state *ss, uint32_t mcp_idx)
{
if_t ifp __unused;
mxge_tx_ring_t *tx;
struct mbuf *m;
bus_dmamap_t map;
int idx;
int *flags;
tx = &ss->tx;
ifp = ss->sc->ifp;
while (tx->pkt_done != mcp_idx) {
idx = tx->done & tx->mask;
tx->done++;
m = tx->info[idx].m;
/* mbuf and DMA map only attached to the first
segment per-mbuf */
if (m != NULL) {
ss->obytes += m->m_pkthdr.len;
if (m->m_flags & M_MCAST)
ss->omcasts++;
ss->opackets++;
tx->info[idx].m = NULL;
map = tx->info[idx].map;
bus_dmamap_unload(tx->dmat, map);
m_freem(m);
}
if (tx->info[idx].flag) {
tx->info[idx].flag = 0;
tx->pkt_done++;
}
}
/* If we have space, clear IFF_OACTIVE to tell the stack that
its OK to send packets */
flags = &ss->if_drv_flags;
mtx_lock(&ss->tx.mtx);
if ((*flags) & IFF_DRV_OACTIVE &&
tx->req - tx->done < (tx->mask + 1)/4) {
*(flags) &= ~IFF_DRV_OACTIVE;
ss->tx.wake++;
mxge_start_locked(ss);
}
if ((ss->sc->num_slices > 1) && (tx->req == tx->done)) {
/* let the NIC stop polling this queue, since there
* are no more transmits pending */
if (tx->req == tx->done) {
*tx->send_stop = 1;
tx->queue_active = 0;
tx->deactivate++;
wmb();
}
}
mtx_unlock(&ss->tx.mtx);
}
static struct mxge_media_type mxge_xfp_media_types[] =
{
{IFM_10G_CX4, 0x7f, "10GBASE-CX4 (module)"},
{IFM_10G_SR, (1 << 7), "10GBASE-SR"},
{IFM_10G_LR, (1 << 6), "10GBASE-LR"},
{0, (1 << 5), "10GBASE-ER"},
{IFM_10G_LRM, (1 << 4), "10GBASE-LRM"},
{0, (1 << 3), "10GBASE-SW"},
{0, (1 << 2), "10GBASE-LW"},
{0, (1 << 1), "10GBASE-EW"},
{0, (1 << 0), "Reserved"}
};
static struct mxge_media_type mxge_sfp_media_types[] =
{
{IFM_10G_TWINAX, 0, "10GBASE-Twinax"},
{0, (1 << 7), "Reserved"},
{IFM_10G_LRM, (1 << 6), "10GBASE-LRM"},
{IFM_10G_LR, (1 << 5), "10GBASE-LR"},
{IFM_10G_SR, (1 << 4), "10GBASE-SR"},
{IFM_10G_TWINAX,(1 << 0), "10GBASE-Twinax"}
};
static void
mxge_media_set(mxge_softc_t *sc, int media_type)
{
ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | media_type,
0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | media_type);
sc->current_media = media_type;
sc->media.ifm_media = sc->media.ifm_cur->ifm_media;
}
static void
mxge_media_init(mxge_softc_t *sc)
{
char *ptr;
int i;
ifmedia_removeall(&sc->media);
mxge_media_set(sc, IFM_AUTO);
/*
* parse the product code to deterimine the interface type
* (CX4, XFP, Quad Ribbon Fiber) by looking at the character
* after the 3rd dash in the driver's cached copy of the
* EEPROM's product code string.
*/
ptr = sc->product_code_string;
if (ptr == NULL) {
device_printf(sc->dev, "Missing product code\n");
return;
}
for (i = 0; i < 3; i++, ptr++) {
ptr = strchr(ptr, '-');
if (ptr == NULL) {
device_printf(sc->dev,
"only %d dashes in PC?!?\n", i);
return;
}
}
if (*ptr == 'C' || *(ptr +1) == 'C') {
/* -C is CX4 */
sc->connector = MXGE_CX4;
mxge_media_set(sc, IFM_10G_CX4);
} else if (*ptr == 'Q') {
/* -Q is Quad Ribbon Fiber */
sc->connector = MXGE_QRF;
device_printf(sc->dev, "Quad Ribbon Fiber Media\n");
/* FreeBSD has no media type for Quad ribbon fiber */
} else if (*ptr == 'R') {
/* -R is XFP */
sc->connector = MXGE_XFP;
} else if (*ptr == 'S' || *(ptr +1) == 'S') {
/* -S or -2S is SFP+ */
sc->connector = MXGE_SFP;
} else {
device_printf(sc->dev, "Unknown media type: %c\n", *ptr);
}
}
/*
* Determine the media type for a NIC. Some XFPs will identify
* themselves only when their link is up, so this is initiated via a
* link up interrupt. However, this can potentially take up to
* several milliseconds, so it is run via the watchdog routine, rather
* than in the interrupt handler itself.
*/
static void
mxge_media_probe(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
char *cage_type;
struct mxge_media_type *mxge_media_types = NULL;
int i, err, ms, mxge_media_type_entries;
uint32_t byte;
sc->need_media_probe = 0;
if (sc->connector == MXGE_XFP) {
/* -R is XFP */
mxge_media_types = mxge_xfp_media_types;
mxge_media_type_entries =
nitems(mxge_xfp_media_types);
byte = MXGE_XFP_COMPLIANCE_BYTE;
cage_type = "XFP";
} else if (sc->connector == MXGE_SFP) {
/* -S or -2S is SFP+ */
mxge_media_types = mxge_sfp_media_types;
mxge_media_type_entries =
nitems(mxge_sfp_media_types);
cage_type = "SFP+";
byte = 3;
} else {
/* nothing to do; media type cannot change */
return;
}
/*
* At this point we know the NIC has an XFP cage, so now we
* try to determine what is in the cage by using the
* firmware's XFP I2C commands to read the XFP 10GbE compilance
* register. We read just one byte, which may take over
* a millisecond
*/
cmd.data0 = 0; /* just fetch 1 byte, not all 256 */
cmd.data1 = byte;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd);
if (err == MXGEFW_CMD_ERROR_I2C_FAILURE) {
device_printf(sc->dev, "failed to read XFP\n");
}
if (err == MXGEFW_CMD_ERROR_I2C_ABSENT) {
device_printf(sc->dev, "Type R/S with no XFP!?!?\n");
}
if (err != MXGEFW_CMD_OK) {
return;
}
/* now we wait for the data to be cached */
cmd.data0 = byte;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd);
for (ms = 0; (err == EBUSY) && (ms < 50); ms++) {
DELAY(1000);
cmd.data0 = byte;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd);
}
if (err != MXGEFW_CMD_OK) {
device_printf(sc->dev, "failed to read %s (%d, %dms)\n",
cage_type, err, ms);
return;
}
if (cmd.data0 == mxge_media_types[0].bitmask) {
if (mxge_verbose)
device_printf(sc->dev, "%s:%s\n", cage_type,
mxge_media_types[0].name);
if (sc->current_media != mxge_media_types[0].flag) {
mxge_media_init(sc);
mxge_media_set(sc, mxge_media_types[0].flag);
}
return;
}
for (i = 1; i < mxge_media_type_entries; i++) {
if (cmd.data0 & mxge_media_types[i].bitmask) {
if (mxge_verbose)
device_printf(sc->dev, "%s:%s\n",
cage_type,
mxge_media_types[i].name);
if (sc->current_media != mxge_media_types[i].flag) {
mxge_media_init(sc);
mxge_media_set(sc, mxge_media_types[i].flag);
}
return;
}
}
if (mxge_verbose)
device_printf(sc->dev, "%s media 0x%x unknown\n",
cage_type, cmd.data0);
return;
}
static void
mxge_intr(void *arg)
{
struct mxge_slice_state *ss = arg;
mxge_softc_t *sc = ss->sc;
mcp_irq_data_t *stats = ss->fw_stats;
mxge_tx_ring_t *tx = &ss->tx;
mxge_rx_done_t *rx_done = &ss->rx_done;
uint32_t send_done_count;
uint8_t valid;
/* make sure the DMA has finished */
if (!stats->valid) {
return;
}
valid = stats->valid;
if (sc->legacy_irq) {
/* lower legacy IRQ */
*sc->irq_deassert = 0;
if (!mxge_deassert_wait)
/* don't wait for conf. that irq is low */
stats->valid = 0;
} else {
stats->valid = 0;
}
/* loop while waiting for legacy irq deassertion */
do {
/* check for transmit completes and receives */
send_done_count = be32toh(stats->send_done_count);
while ((send_done_count != tx->pkt_done) ||
(rx_done->entry[rx_done->idx].length != 0)) {
if (send_done_count != tx->pkt_done)
mxge_tx_done(ss, (int)send_done_count);
mxge_clean_rx_done(ss);
send_done_count = be32toh(stats->send_done_count);
}
if (sc->legacy_irq && mxge_deassert_wait)
wmb();
} while (*((volatile uint8_t *) &stats->valid));
/* fw link & error stats meaningful only on the first slice */
if (__predict_false((ss == sc->ss) && stats->stats_updated)) {
if (sc->link_state != stats->link_up) {
sc->link_state = stats->link_up;
if (sc->link_state) {
if_link_state_change(sc->ifp, LINK_STATE_UP);
if (mxge_verbose)
device_printf(sc->dev, "link up\n");
} else {
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
if (mxge_verbose)
device_printf(sc->dev, "link down\n");
}
sc->need_media_probe = 1;
}
if (sc->rdma_tags_available !=
be32toh(stats->rdma_tags_available)) {
sc->rdma_tags_available =
be32toh(stats->rdma_tags_available);
device_printf(sc->dev, "RDMA timed out! %d tags "
"left\n", sc->rdma_tags_available);
}
if (stats->link_down) {
sc->down_cnt += stats->link_down;
sc->link_state = 0;
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
}
/* check to see if we have rx token to pass back */
if (valid & 0x1)
*ss->irq_claim = be32toh(3);
*(ss->irq_claim + 1) = be32toh(3);
}
static void
mxge_init(void *arg)
{
mxge_softc_t *sc = arg;
if_t ifp = sc->ifp;
mtx_lock(&sc->driver_mtx);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
(void) mxge_open(sc);
mtx_unlock(&sc->driver_mtx);
}
static void
mxge_free_slice_mbufs(struct mxge_slice_state *ss)
{
int i;
#if defined(INET) || defined(INET6)
tcp_lro_free(&ss->lc);
#endif
for (i = 0; i <= ss->rx_big.mask; i++) {
if (ss->rx_big.info[i].m == NULL)
continue;
bus_dmamap_unload(ss->rx_big.dmat,
ss->rx_big.info[i].map);
m_freem(ss->rx_big.info[i].m);
ss->rx_big.info[i].m = NULL;
}
for (i = 0; i <= ss->rx_small.mask; i++) {
if (ss->rx_small.info[i].m == NULL)
continue;
bus_dmamap_unload(ss->rx_small.dmat,
ss->rx_small.info[i].map);
m_freem(ss->rx_small.info[i].m);
ss->rx_small.info[i].m = NULL;
}
/* transmit ring used only on the first slice */
if (ss->tx.info == NULL)
return;
for (i = 0; i <= ss->tx.mask; i++) {
ss->tx.info[i].flag = 0;
if (ss->tx.info[i].m == NULL)
continue;
bus_dmamap_unload(ss->tx.dmat,
ss->tx.info[i].map);
m_freem(ss->tx.info[i].m);
ss->tx.info[i].m = NULL;
}
}
static void
mxge_free_mbufs(mxge_softc_t *sc)
{
int slice;
for (slice = 0; slice < sc->num_slices; slice++)
mxge_free_slice_mbufs(&sc->ss[slice]);
}
static void
mxge_free_slice_rings(struct mxge_slice_state *ss)
{
int i;
if (ss->rx_done.entry != NULL)
mxge_dma_free(&ss->rx_done.dma);
ss->rx_done.entry = NULL;
if (ss->tx.req_bytes != NULL)
free(ss->tx.req_bytes, M_DEVBUF);
ss->tx.req_bytes = NULL;
if (ss->tx.seg_list != NULL)
free(ss->tx.seg_list, M_DEVBUF);
ss->tx.seg_list = NULL;
if (ss->rx_small.shadow != NULL)
free(ss->rx_small.shadow, M_DEVBUF);
ss->rx_small.shadow = NULL;
if (ss->rx_big.shadow != NULL)
free(ss->rx_big.shadow, M_DEVBUF);
ss->rx_big.shadow = NULL;
if (ss->tx.info != NULL) {
if (ss->tx.dmat != NULL) {
for (i = 0; i <= ss->tx.mask; i++) {
bus_dmamap_destroy(ss->tx.dmat,
ss->tx.info[i].map);
}
bus_dma_tag_destroy(ss->tx.dmat);
}
free(ss->tx.info, M_DEVBUF);
}
ss->tx.info = NULL;
if (ss->rx_small.info != NULL) {
if (ss->rx_small.dmat != NULL) {
for (i = 0; i <= ss->rx_small.mask; i++) {
bus_dmamap_destroy(ss->rx_small.dmat,
ss->rx_small.info[i].map);
}
bus_dmamap_destroy(ss->rx_small.dmat,
ss->rx_small.extra_map);
bus_dma_tag_destroy(ss->rx_small.dmat);
}
free(ss->rx_small.info, M_DEVBUF);
}
ss->rx_small.info = NULL;
if (ss->rx_big.info != NULL) {
if (ss->rx_big.dmat != NULL) {
for (i = 0; i <= ss->rx_big.mask; i++) {
bus_dmamap_destroy(ss->rx_big.dmat,
ss->rx_big.info[i].map);
}
bus_dmamap_destroy(ss->rx_big.dmat,
ss->rx_big.extra_map);
bus_dma_tag_destroy(ss->rx_big.dmat);
}
free(ss->rx_big.info, M_DEVBUF);
}
ss->rx_big.info = NULL;
}
static void
mxge_free_rings(mxge_softc_t *sc)
{
int slice;
for (slice = 0; slice < sc->num_slices; slice++)
mxge_free_slice_rings(&sc->ss[slice]);
}
static int
mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries,
int tx_ring_entries)
{
mxge_softc_t *sc = ss->sc;
size_t bytes;
int err, i;
/* allocate per-slice receive resources */
ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
ss->rx_done.mask = (2 * rx_ring_entries) - 1;
/* allocate the rx shadow rings */
bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow);
ss->rx_small.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow);
ss->rx_big.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
/* allocate the rx host info rings */
bytes = rx_ring_entries * sizeof (*ss->rx_small.info);
ss->rx_small.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
bytes = rx_ring_entries * sizeof (*ss->rx_big.info);
ss->rx_big.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
/* allocate the rx busdma resources */
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
1, /* alignment */
4096, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
MHLEN, /* maxsize */
1, /* num segs */
MHLEN, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, NULL, /* lock */
&ss->rx_small.dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating rx_small dmat\n",
err);
return err;
}
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
1, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
3*4096, /* maxsize */
1, /* num segs */
MJUM9BYTES, /* maxsegsize*/
BUS_DMA_ALLOCNOW, /* flags */
NULL, NULL, /* lock */
&ss->rx_big.dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating rx_big dmat\n",
err);
return err;
}
for (i = 0; i <= ss->rx_small.mask; i++) {
err = bus_dmamap_create(ss->rx_small.dmat, 0,
&ss->rx_small.info[i].map);
if (err != 0) {
device_printf(sc->dev, "Err %d rx_small dmamap\n",
err);
return err;
}
}
err = bus_dmamap_create(ss->rx_small.dmat, 0,
&ss->rx_small.extra_map);
if (err != 0) {
device_printf(sc->dev, "Err %d extra rx_small dmamap\n",
err);
return err;
}
for (i = 0; i <= ss->rx_big.mask; i++) {
err = bus_dmamap_create(ss->rx_big.dmat, 0,
&ss->rx_big.info[i].map);
if (err != 0) {
device_printf(sc->dev, "Err %d rx_big dmamap\n",
err);
return err;
}
}
err = bus_dmamap_create(ss->rx_big.dmat, 0,
&ss->rx_big.extra_map);
if (err != 0) {
device_printf(sc->dev, "Err %d extra rx_big dmamap\n",
err);
return err;
}
/* now allocate TX resources */
ss->tx.mask = tx_ring_entries - 1;
ss->tx.max_desc = MIN(MXGE_MAX_SEND_DESC, tx_ring_entries / 4);
/* allocate the tx request copy block */
bytes = 8 +
sizeof (*ss->tx.req_list) * (ss->tx.max_desc + 4);
ss->tx.req_bytes = malloc(bytes, M_DEVBUF, M_WAITOK);
/* ensure req_list entries are aligned to 8 bytes */
ss->tx.req_list = (mcp_kreq_ether_send_t *)
((uintptr_t)(ss->tx.req_bytes + 7) & ~7UL);
/* allocate the tx busdma segment list */
bytes = sizeof (*ss->tx.seg_list) * ss->tx.max_desc;
ss->tx.seg_list = (bus_dma_segment_t *)
malloc(bytes, M_DEVBUF, M_WAITOK);
/* allocate the tx host info ring */
bytes = tx_ring_entries * sizeof (*ss->tx.info);
ss->tx.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK);
/* allocate the tx busdma resources */
err = bus_dma_tag_create(sc->parent_dmat, /* parent */
1, /* alignment */
sc->tx_boundary, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
65536 + 256, /* maxsize */
ss->tx.max_desc - 2, /* num segs */
sc->tx_boundary, /* maxsegsz */
BUS_DMA_ALLOCNOW, /* flags */
NULL, NULL, /* lock */
&ss->tx.dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating tx dmat\n",
err);
return err;
}
/* now use these tags to setup dmamaps for each slot
in the ring */
for (i = 0; i <= ss->tx.mask; i++) {
err = bus_dmamap_create(ss->tx.dmat, 0,
&ss->tx.info[i].map);
if (err != 0) {
device_printf(sc->dev, "Err %d tx dmamap\n",
err);
return err;
}
}
return 0;
}
static int
mxge_alloc_rings(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
int tx_ring_size;
int tx_ring_entries, rx_ring_entries;
int err, slice;
/* get ring sizes */
err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd);
tx_ring_size = cmd.data0;
if (err != 0) {
device_printf(sc->dev, "Cannot determine tx ring sizes\n");
goto abort;
}
tx_ring_entries = tx_ring_size / sizeof (mcp_kreq_ether_send_t);
rx_ring_entries = sc->rx_ring_size / sizeof (mcp_dma_addr_t);
if_setsendqlen(sc->ifp, tx_ring_entries - 1);
if_setsendqready(sc->ifp);
for (slice = 0; slice < sc->num_slices; slice++) {
err = mxge_alloc_slice_rings(&sc->ss[slice],
rx_ring_entries,
tx_ring_entries);
if (err != 0)
goto abort;
}
return 0;
abort:
mxge_free_rings(sc);
return err;
}
static void
mxge_choose_params(int mtu, int *big_buf_size, int *cl_size, int *nbufs)
{
int bufsize = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD;
if (bufsize < MCLBYTES) {
/* easy, everything fits in a single buffer */
*big_buf_size = MCLBYTES;
*cl_size = MCLBYTES;
*nbufs = 1;
return;
}
if (bufsize < MJUMPAGESIZE) {
/* still easy, everything still fits in a single buffer */
*big_buf_size = MJUMPAGESIZE;
*cl_size = MJUMPAGESIZE;
*nbufs = 1;
return;
}
*cl_size = MJUM9BYTES;
*big_buf_size = MJUM9BYTES;
*nbufs = 1;
}
static int
mxge_slice_open(struct mxge_slice_state *ss, int nbufs, int cl_size)
{
mxge_softc_t *sc;
mxge_cmd_t cmd;
bus_dmamap_t map;
int err, i, slice;
sc = ss->sc;
slice = ss - sc->ss;
#if defined(INET) || defined(INET6)
(void)tcp_lro_init(&ss->lc);
#endif
ss->lc.ifp = sc->ifp;
/* get the lanai pointers to the send and receive rings */
err = 0;
cmd.data0 = slice;
err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_OFFSET, &cmd);
ss->tx.lanai =
(volatile mcp_kreq_ether_send_t *)(sc->sram + cmd.data0);
ss->tx.send_go = (volatile uint32_t *)
(sc->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
ss->tx.send_stop = (volatile uint32_t *)
(sc->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
cmd.data0 = slice;
err |= mxge_send_cmd(sc,
MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd);
ss->rx_small.lanai =
(volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0);
cmd.data0 = slice;
err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd);
ss->rx_big.lanai =
(volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0);
if (err != 0) {
device_printf(sc->dev,
"failed to get ring sizes or locations\n");
return EIO;
}
/* stock receive rings */
for (i = 0; i <= ss->rx_small.mask; i++) {
map = ss->rx_small.info[i].map;
err = mxge_get_buf_small(ss, map, i);
if (err) {
device_printf(sc->dev, "alloced %d/%d smalls\n",
i, ss->rx_small.mask + 1);
return ENOMEM;
}
}
for (i = 0; i <= ss->rx_big.mask; i++) {
ss->rx_big.shadow[i].addr_low = 0xffffffff;
ss->rx_big.shadow[i].addr_high = 0xffffffff;
}
ss->rx_big.nbufs = nbufs;
ss->rx_big.cl_size = cl_size;
ss->rx_big.mlen = if_getmtu(ss->sc->ifp) + ETHER_HDR_LEN +
ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD;
for (i = 0; i <= ss->rx_big.mask; i += ss->rx_big.nbufs) {
map = ss->rx_big.info[i].map;
err = mxge_get_buf_big(ss, map, i);
if (err) {
device_printf(sc->dev, "alloced %d/%d bigs\n",
i, ss->rx_big.mask + 1);
return ENOMEM;
}
}
return 0;
}
static int
mxge_open(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
int err, big_bytes, nbufs, slice, cl_size, i;
bus_addr_t bus;
volatile uint8_t *itable;
struct mxge_slice_state *ss;
/* Copy the MAC address in case it was overridden */
bcopy(if_getlladdr(sc->ifp), sc->mac_addr, ETHER_ADDR_LEN);
err = mxge_reset(sc, 1);
if (err != 0) {
device_printf(sc->dev, "failed to reset\n");
return EIO;
}
if (sc->num_slices > 1) {
/* setup the indirection table */
cmd.data0 = sc->num_slices;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
&cmd);
err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
&cmd);
if (err != 0) {
device_printf(sc->dev,
"failed to setup rss tables\n");
return err;
}
/* just enable an identity mapping */
itable = sc->sram + cmd.data0;
for (i = 0; i < sc->num_slices; i++)
itable[i] = (uint8_t)i;
cmd.data0 = 1;
cmd.data1 = mxge_rss_hash_type;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_ENABLE, &cmd);
if (err != 0) {
device_printf(sc->dev, "failed to enable slices\n");
return err;
}
}
mxge_choose_params(if_getmtu(sc->ifp), &big_bytes, &cl_size, &nbufs);
cmd.data0 = nbufs;
err = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS,
&cmd);
/* error is only meaningful if we're trying to set
MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS > 1 */
if (err && nbufs > 1) {
device_printf(sc->dev,
"Failed to set alway-use-n to %d\n",
nbufs);
return EIO;
}
/* Give the firmware the mtu and the big and small buffer
sizes. The firmware wants the big buf size to be a power
of two. Luckily, FreeBSD's clusters are powers of two */
cmd.data0 = if_getmtu(sc->ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
err = mxge_send_cmd(sc, MXGEFW_CMD_SET_MTU, &cmd);
cmd.data0 = MHLEN - MXGEFW_PAD;
err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE,
&cmd);
cmd.data0 = big_bytes;
err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd);
if (err != 0) {
device_printf(sc->dev, "failed to setup params\n");
goto abort;
}
/* Now give him the pointer to the stats block */
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
cmd.data0 =
MXGE_LOWPART_TO_U32(ss->fw_stats_dma.bus_addr);
cmd.data1 =
MXGE_HIGHPART_TO_U32(ss->fw_stats_dma.bus_addr);
cmd.data2 = sizeof(struct mcp_irq_data);
cmd.data2 |= (slice << 16);
err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd);
}
if (err != 0) {
bus = sc->ss->fw_stats_dma.bus_addr;
bus += offsetof(struct mcp_irq_data, send_done_count);
cmd.data0 = MXGE_LOWPART_TO_U32(bus);
cmd.data1 = MXGE_HIGHPART_TO_U32(bus);
err = mxge_send_cmd(sc,
MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
&cmd);
/* Firmware cannot support multicast without STATS_DMA_V2 */
sc->fw_multicast_support = 0;
} else {
sc->fw_multicast_support = 1;
}
if (err != 0) {
device_printf(sc->dev, "failed to setup params\n");
goto abort;
}
for (slice = 0; slice < sc->num_slices; slice++) {
err = mxge_slice_open(&sc->ss[slice], nbufs, cl_size);
if (err != 0) {
device_printf(sc->dev, "couldn't open slice %d\n",
slice);
goto abort;
}
}
/* Finally, start the firmware running */
err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_UP, &cmd);
if (err) {
device_printf(sc->dev, "Couldn't bring up link\n");
goto abort;
}
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
ss->if_drv_flags |= IFF_DRV_RUNNING;
ss->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
return 0;
abort:
mxge_free_mbufs(sc);
return err;
}
static int
mxge_close(mxge_softc_t *sc, int down)
{
mxge_cmd_t cmd;
int err, old_down_cnt;
struct mxge_slice_state *ss;
int slice;
for (slice = 0; slice < sc->num_slices; slice++) {
ss = &sc->ss[slice];
ss->if_drv_flags &= ~IFF_DRV_RUNNING;
}
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
if (!down) {
old_down_cnt = sc->down_cnt;
wmb();
err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_DOWN, &cmd);
if (err) {
device_printf(sc->dev,
"Couldn't bring down link\n");
}
if (old_down_cnt == sc->down_cnt) {
/* wait for down irq */
DELAY(10 * sc->intr_coal_delay);
}
wmb();
if (old_down_cnt == sc->down_cnt) {
device_printf(sc->dev, "never got down irq\n");
}
}
mxge_free_mbufs(sc);
return 0;
}
static void
mxge_setup_cfg_space(mxge_softc_t *sc)
{
device_t dev = sc->dev;
int reg;
uint16_t lnk, pectl;
/* find the PCIe link width and set max read request to 4KB*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
lnk = pci_read_config(dev, reg + 0x12, 2);
sc->link_width = (lnk >> 4) & 0x3f;
if (sc->pectl == 0) {
pectl = pci_read_config(dev, reg + 0x8, 2);
pectl = (pectl & ~0x7000) | (5 << 12);
pci_write_config(dev, reg + 0x8, pectl, 2);
sc->pectl = pectl;
} else {
/* restore saved pectl after watchdog reset */
pci_write_config(dev, reg + 0x8, sc->pectl, 2);
}
}
/* Enable DMA and Memory space access */
pci_enable_busmaster(dev);
}
static uint32_t
mxge_read_reboot(mxge_softc_t *sc)
{
device_t dev = sc->dev;
uint32_t vs;
/* find the vendor specific offset */
if (pci_find_cap(dev, PCIY_VENDOR, &vs) != 0) {
device_printf(sc->dev,
"could not find vendor specific offset\n");
return (uint32_t)-1;
}
/* enable read32 mode */
pci_write_config(dev, vs + 0x10, 0x3, 1);
/* tell NIC which register to read */
pci_write_config(dev, vs + 0x18, 0xfffffff0, 4);
return (pci_read_config(dev, vs + 0x14, 4));
}
static void
mxge_watchdog_reset(mxge_softc_t *sc)
{
struct pci_devinfo *dinfo;
struct mxge_slice_state *ss;
int err, running, s, num_tx_slices = 1;
uint32_t reboot;
uint16_t cmd;
err = ENXIO;
device_printf(sc->dev, "Watchdog reset!\n");
/*
* check to see if the NIC rebooted. If it did, then all of
* PCI config space has been reset, and things like the
* busmaster bit will be zero. If this is the case, then we
* must restore PCI config space before the NIC can be used
* again
*/
cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2);
if (cmd == 0xffff) {
/*
* maybe the watchdog caught the NIC rebooting; wait
* up to 100ms for it to finish. If it does not come
* back, then give up
*/
DELAY(1000*100);
cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2);
if (cmd == 0xffff) {
device_printf(sc->dev, "NIC disappeared!\n");
}
}
if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) {
/* print the reboot status */
reboot = mxge_read_reboot(sc);
device_printf(sc->dev, "NIC rebooted, status = 0x%x\n",
reboot);
running = if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING;
if (running) {
/*
* quiesce NIC so that TX routines will not try to
* xmit after restoration of BAR
*/
/* Mark the link as down */
if (sc->link_state) {
sc->link_state = 0;
if_link_state_change(sc->ifp,
LINK_STATE_DOWN);
}
num_tx_slices = sc->num_slices;
/* grab all TX locks to ensure no tx */
for (s = 0; s < num_tx_slices; s++) {
ss = &sc->ss[s];
mtx_lock(&ss->tx.mtx);
}
mxge_close(sc, 1);
}
/* restore PCI configuration space */
dinfo = device_get_ivars(sc->dev);
pci_cfg_restore(sc->dev, dinfo);
/* and redo any changes we made to our config space */
mxge_setup_cfg_space(sc);
/* reload f/w */
err = mxge_load_firmware(sc, 0);
if (err) {
device_printf(sc->dev,
"Unable to re-load f/w\n");
}
if (running) {
if (!err)
err = mxge_open(sc);
/* release all TX locks */
for (s = 0; s < num_tx_slices; s++) {
ss = &sc->ss[s];
mxge_start_locked(ss);
mtx_unlock(&ss->tx.mtx);
}
}
sc->watchdog_resets++;
} else {
device_printf(sc->dev,
"NIC did not reboot, not resetting\n");
err = 0;
}
if (err) {
device_printf(sc->dev, "watchdog reset failed\n");
} else {
if (sc->dying == 2)
sc->dying = 0;
callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc);
}
}
static void
mxge_watchdog_task(void *arg, int pending)
{
mxge_softc_t *sc = arg;
mtx_lock(&sc->driver_mtx);
mxge_watchdog_reset(sc);
mtx_unlock(&sc->driver_mtx);
}
static void
mxge_warn_stuck(mxge_softc_t *sc, mxge_tx_ring_t *tx, int slice)
{
tx = &sc->ss[slice].tx;
device_printf(sc->dev, "slice %d struck? ring state:\n", slice);
device_printf(sc->dev,
"tx.req=%d tx.done=%d, tx.queue_active=%d\n",
tx->req, tx->done, tx->queue_active);
device_printf(sc->dev, "tx.activate=%d tx.deactivate=%d\n",
tx->activate, tx->deactivate);
device_printf(sc->dev, "pkt_done=%d fw=%d\n",
tx->pkt_done,
be32toh(sc->ss->fw_stats->send_done_count));
}
static int
mxge_watchdog(mxge_softc_t *sc)
{
mxge_tx_ring_t *tx;
uint32_t rx_pause = be32toh(sc->ss->fw_stats->dropped_pause);
int i, err = 0;
/* see if we have outstanding transmits, which
have been pending for more than mxge_ticks */
for (i = 0; (i < sc->num_slices) && (err == 0); i++) {
tx = &sc->ss[i].tx;
if (tx->req != tx->done &&
tx->watchdog_req != tx->watchdog_done &&
tx->done == tx->watchdog_done) {
/* check for pause blocking before resetting */
if (tx->watchdog_rx_pause == rx_pause) {
mxge_warn_stuck(sc, tx, i);
taskqueue_enqueue(sc->tq, &sc->watchdog_task);
return (ENXIO);
}
else
device_printf(sc->dev, "Flow control blocking "
"xmits, check link partner\n");
}
tx->watchdog_req = tx->req;
tx->watchdog_done = tx->done;
tx->watchdog_rx_pause = rx_pause;
}
if (sc->need_media_probe)
mxge_media_probe(sc);
return (err);
}
static uint64_t
mxge_get_counter(if_t ifp, ift_counter cnt)
{
struct mxge_softc *sc;
uint64_t rv;
sc = if_getsoftc(ifp);
rv = 0;
switch (cnt) {
case IFCOUNTER_IPACKETS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].ipackets;
return (rv);
case IFCOUNTER_OPACKETS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].opackets;
return (rv);
case IFCOUNTER_OERRORS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].oerrors;
return (rv);
case IFCOUNTER_OBYTES:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].obytes;
return (rv);
case IFCOUNTER_OMCASTS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].omcasts;
return (rv);
case IFCOUNTER_OQDROPS:
for (int s = 0; s < sc->num_slices; s++)
rv += sc->ss[s].tx.br->br_drops;
return (rv);
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
mxge_tick(void *arg)
{
mxge_softc_t *sc = arg;
u_long pkts = 0;
int err = 0;
int running, ticks;
uint16_t cmd;
ticks = mxge_ticks;
running = if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING;
if (running) {
if (!sc->watchdog_countdown) {
err = mxge_watchdog(sc);
sc->watchdog_countdown = 4;
}
sc->watchdog_countdown--;
}
if (pkts == 0) {
/* ensure NIC did not suffer h/w fault while idle */
cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2);
if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) {
sc->dying = 2;
taskqueue_enqueue(sc->tq, &sc->watchdog_task);
err = ENXIO;
}
/* look less often if NIC is idle */
ticks *= 4;
}
if (err == 0)
callout_reset(&sc->co_hdl, ticks, mxge_tick, sc);
}
static int
mxge_media_change(if_t ifp)
{
return EINVAL;
}
static int
mxge_change_mtu(mxge_softc_t *sc, int mtu)
{
if_t ifp = sc->ifp;
int real_mtu, old_mtu;
int err = 0;
real_mtu = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
if ((real_mtu > sc->max_mtu) || real_mtu < 60)
return EINVAL;
mtx_lock(&sc->driver_mtx);
old_mtu = if_getmtu(ifp);
if_setmtu(ifp, mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
mxge_close(sc, 0);
err = mxge_open(sc);
if (err != 0) {
if_setmtu(ifp, old_mtu);
mxge_close(sc, 0);
(void) mxge_open(sc);
}
}
mtx_unlock(&sc->driver_mtx);
return err;
}
static void
mxge_media_status(if_t ifp, struct ifmediareq *ifmr)
{
mxge_softc_t *sc = if_getsoftc(ifp);
if (sc == NULL)
return;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER | IFM_FDX;
ifmr->ifm_status |= sc->link_state ? IFM_ACTIVE : 0;
ifmr->ifm_active |= sc->current_media;
}
static int
mxge_fetch_i2c(mxge_softc_t *sc, struct ifi2creq *i2c)
{
mxge_cmd_t cmd;
uint32_t i2c_args;
int i, ms, err;
if (i2c->dev_addr != 0xA0 &&
i2c->dev_addr != 0xA2)
return (EINVAL);
if (i2c->len > sizeof(i2c->data))
return (EINVAL);
for (i = 0; i < i2c->len; i++) {
i2c_args = i2c->dev_addr << 0x8;
i2c_args |= i2c->offset + i;
cmd.data0 = 0; /* just fetch 1 byte, not all 256 */
cmd.data1 = i2c_args;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd);
if (err != MXGEFW_CMD_OK)
return (EIO);
/* now we wait for the data to be cached */
cmd.data0 = i2c_args & 0xff;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd);
for (ms = 0; (err == EBUSY) && (ms < 50); ms++) {
cmd.data0 = i2c_args & 0xff;
err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd);
if (err == EBUSY)
DELAY(1000);
}
if (err != MXGEFW_CMD_OK)
return (EIO);
i2c->data[i] = cmd.data0;
}
return (0);
}
static int
mxge_ioctl(if_t ifp, u_long command, caddr_t data)
{
mxge_softc_t *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
struct ifi2creq i2c;
int err, mask;
err = 0;
switch (command) {
case SIOCSIFMTU:
err = mxge_change_mtu(sc, ifr->ifr_mtu);
break;
case SIOCSIFFLAGS:
mtx_lock(&sc->driver_mtx);
if (sc->dying) {
mtx_unlock(&sc->driver_mtx);
return EINVAL;
}
if (if_getflags(ifp) & IFF_UP) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
err = mxge_open(sc);
} else {
/* take care of promis can allmulti
flag chages */
mxge_change_promisc(sc,
if_getflags(ifp) & IFF_PROMISC);
mxge_set_multicast_list(sc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
mxge_close(sc, 0);
}
}
mtx_unlock(&sc->driver_mtx);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
mtx_lock(&sc->driver_mtx);
if (sc->dying) {
mtx_unlock(&sc->driver_mtx);
return (EINVAL);
}
mxge_set_multicast_list(sc);
mtx_unlock(&sc->driver_mtx);
break;
case SIOCSIFCAP:
mtx_lock(&sc->driver_mtx);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if (mask & IFCAP_TXCSUM) {
if (IFCAP_TXCSUM & if_getcapenable(ifp)) {
mask &= ~IFCAP_TSO4;
if_setcapenablebit(ifp, 0, (IFCAP_TXCSUM|IFCAP_TSO4));
if_sethwassistbits(ifp, 0, (CSUM_TCP | CSUM_UDP));
} else {
if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0);
}
}
if (mask & IFCAP_RXCSUM) {
if (IFCAP_RXCSUM & if_getcapenable(ifp)) {
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM);
} else {
if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
}
}
if (mask & IFCAP_TSO4) {
if (IFCAP_TSO4 & if_getcapenable(ifp)) {
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
} else if (IFCAP_TXCSUM & if_getcapenable(ifp)) {
if_setcapenablebit(ifp, IFCAP_TSO4, 0);
if_sethwassistbits(ifp, CSUM_TSO, 0);
} else {
printf("mxge requires tx checksum offload"
" be enabled to use TSO\n");
err = EINVAL;
}
}
#if IFCAP_TSO6
if (mask & IFCAP_TXCSUM_IPV6) {
if (IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp)) {
mask &= ~IFCAP_TSO6;
if_setcapenablebit(ifp, 0,
IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
if_sethwassistbits(ifp, 0,
CSUM_TCP_IPV6 | CSUM_UDP);
} else {
if_setcapenablebit(ifp, IFCAP_TXCSUM_IPV6, 0);
if_sethwassistbits(ifp,
CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
}
}
if (mask & IFCAP_RXCSUM_IPV6) {
if (IFCAP_RXCSUM_IPV6 & if_getcapenable(ifp)) {
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM_IPV6);
} else {
if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0);
}
}
if (mask & IFCAP_TSO6) {
if (IFCAP_TSO6 & if_getcapenable(ifp)) {
if_setcapenablebit(ifp, 0, IFCAP_TSO6);
} else if (IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp)) {
if_setcapenablebit(ifp, IFCAP_TSO6, 0);
if_sethwassistbits(ifp, CSUM_TSO, 0);
} else {
printf("mxge requires tx checksum offload"
" be enabled to use TSO\n");
err = EINVAL;
}
}
#endif /*IFCAP_TSO6 */
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (!(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) ||
!(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING))
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
mtx_unlock(&sc->driver_mtx);
VLAN_CAPABILITIES(ifp);
break;
case SIOCGIFMEDIA:
mtx_lock(&sc->driver_mtx);
if (sc->dying) {
mtx_unlock(&sc->driver_mtx);
return (EINVAL);
}
mxge_media_probe(sc);
mtx_unlock(&sc->driver_mtx);
err = ifmedia_ioctl(ifp, (struct ifreq *)data,
&sc->media, command);
break;
case SIOCGI2C:
if (sc->connector != MXGE_XFP &&
sc->connector != MXGE_SFP) {
err = ENXIO;
break;
}
err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (err != 0)
break;
mtx_lock(&sc->driver_mtx);
if (sc->dying) {
mtx_unlock(&sc->driver_mtx);
return (EINVAL);
}
err = mxge_fetch_i2c(sc, &i2c);
mtx_unlock(&sc->driver_mtx);
if (err == 0)
err = copyout(&i2c, ifr_data_get_ptr(ifr),
sizeof(i2c));
break;
default:
err = ether_ioctl(ifp, command, data);
break;
}
return err;
}
static void
mxge_fetch_tunables(mxge_softc_t *sc)
{
TUNABLE_INT_FETCH("hw.mxge.max_slices", &mxge_max_slices);
TUNABLE_INT_FETCH("hw.mxge.flow_control_enabled",
&mxge_flow_control);
TUNABLE_INT_FETCH("hw.mxge.intr_coal_delay",
&mxge_intr_coal_delay);
TUNABLE_INT_FETCH("hw.mxge.nvidia_ecrc_enable",
&mxge_nvidia_ecrc_enable);
TUNABLE_INT_FETCH("hw.mxge.force_firmware",
&mxge_force_firmware);
TUNABLE_INT_FETCH("hw.mxge.deassert_wait",
&mxge_deassert_wait);
TUNABLE_INT_FETCH("hw.mxge.verbose",
&mxge_verbose);
TUNABLE_INT_FETCH("hw.mxge.ticks", &mxge_ticks);
TUNABLE_INT_FETCH("hw.mxge.always_promisc", &mxge_always_promisc);
TUNABLE_INT_FETCH("hw.mxge.rss_hash_type", &mxge_rss_hash_type);
TUNABLE_INT_FETCH("hw.mxge.rss_hashtype", &mxge_rss_hash_type);
TUNABLE_INT_FETCH("hw.mxge.initial_mtu", &mxge_initial_mtu);
TUNABLE_INT_FETCH("hw.mxge.throttle", &mxge_throttle);
if (bootverbose)
mxge_verbose = 1;
if (mxge_intr_coal_delay < 0 || mxge_intr_coal_delay > 10*1000)
mxge_intr_coal_delay = 30;
if (mxge_ticks == 0)
mxge_ticks = hz / 2;
sc->pause = mxge_flow_control;
if (mxge_rss_hash_type < MXGEFW_RSS_HASH_TYPE_IPV4
|| mxge_rss_hash_type > MXGEFW_RSS_HASH_TYPE_MAX) {
mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
}
if (mxge_initial_mtu > ETHERMTU_JUMBO ||
mxge_initial_mtu < ETHER_MIN_LEN)
mxge_initial_mtu = ETHERMTU_JUMBO;
if (mxge_throttle && mxge_throttle > MXGE_MAX_THROTTLE)
mxge_throttle = MXGE_MAX_THROTTLE;
if (mxge_throttle && mxge_throttle < MXGE_MIN_THROTTLE)
mxge_throttle = MXGE_MIN_THROTTLE;
sc->throttle = mxge_throttle;
}
static void
mxge_free_slices(mxge_softc_t *sc)
{
struct mxge_slice_state *ss;
int i;
if (sc->ss == NULL)
return;
for (i = 0; i < sc->num_slices; i++) {
ss = &sc->ss[i];
if (ss->fw_stats != NULL) {
mxge_dma_free(&ss->fw_stats_dma);
ss->fw_stats = NULL;
if (ss->tx.br != NULL) {
drbr_free(ss->tx.br, M_DEVBUF);
ss->tx.br = NULL;
}
mtx_destroy(&ss->tx.mtx);
}
if (ss->rx_done.entry != NULL) {
mxge_dma_free(&ss->rx_done.dma);
ss->rx_done.entry = NULL;
}
}
free(sc->ss, M_DEVBUF);
sc->ss = NULL;
}
static int
mxge_alloc_slices(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
struct mxge_slice_state *ss;
size_t bytes;
int err, i, max_intr_slots;
err = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd);
if (err != 0) {
device_printf(sc->dev, "Cannot determine rx ring size\n");
return err;
}
sc->rx_ring_size = cmd.data0;
max_intr_slots = 2 * (sc->rx_ring_size / sizeof (mcp_dma_addr_t));
bytes = sizeof (*sc->ss) * sc->num_slices;
sc->ss = malloc(bytes, M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->ss == NULL)
return (ENOMEM);
for (i = 0; i < sc->num_slices; i++) {
ss = &sc->ss[i];
ss->sc = sc;
/* allocate per-slice rx interrupt queues */
bytes = max_intr_slots * sizeof (*ss->rx_done.entry);
err = mxge_dma_alloc(sc, &ss->rx_done.dma, bytes, 4096);
if (err != 0)
goto abort;
ss->rx_done.entry = ss->rx_done.dma.addr;
bzero(ss->rx_done.entry, bytes);
/*
* allocate the per-slice firmware stats; stats
* (including tx) are used used only on the first
* slice for now
*/
bytes = sizeof (*ss->fw_stats);
err = mxge_dma_alloc(sc, &ss->fw_stats_dma,
sizeof (*ss->fw_stats), 64);
if (err != 0)
goto abort;
ss->fw_stats = (mcp_irq_data_t *)ss->fw_stats_dma.addr;
snprintf(ss->tx.mtx_name, sizeof(ss->tx.mtx_name),
"%s:tx(%d)", device_get_nameunit(sc->dev), i);
mtx_init(&ss->tx.mtx, ss->tx.mtx_name, NULL, MTX_DEF);
ss->tx.br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK,
&ss->tx.mtx);
}
return (0);
abort:
mxge_free_slices(sc);
return (ENOMEM);
}
static void
mxge_slice_probe(mxge_softc_t *sc)
{
mxge_cmd_t cmd;
char *old_fw;
int msix_cnt, status, max_intr_slots;
sc->num_slices = 1;
/*
* don't enable multiple slices if they are not enabled,
* or if this is not an SMP system
*/
if (mxge_max_slices == 0 || mxge_max_slices == 1 || mp_ncpus < 2)
return;
/* see how many MSI-X interrupts are available */
msix_cnt = pci_msix_count(sc->dev);
if (msix_cnt < 2)
return;
/* now load the slice aware firmware see what it supports */
old_fw = sc->fw_name;
if (old_fw == mxge_fw_aligned)
sc->fw_name = mxge_fw_rss_aligned;
else
sc->fw_name = mxge_fw_rss_unaligned;
status = mxge_load_firmware(sc, 0);
if (status != 0) {
device_printf(sc->dev, "Falling back to a single slice\n");
return;
}
/* try to send a reset command to the card to see if it
is alive */
memset(&cmd, 0, sizeof (cmd));
status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd);
if (status != 0) {
device_printf(sc->dev, "failed reset\n");
goto abort_with_fw;
}
/* get rx ring size */
status = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd);
if (status != 0) {
device_printf(sc->dev, "Cannot determine rx ring size\n");
goto abort_with_fw;
}
max_intr_slots = 2 * (cmd.data0 / sizeof (mcp_dma_addr_t));
/* tell it the size of the interrupt queues */
cmd.data0 = max_intr_slots * sizeof (struct mcp_slot);
status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd);
if (status != 0) {
device_printf(sc->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
goto abort_with_fw;
}
/* ask the maximum number of slices it supports */
status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd);
if (status != 0) {
device_printf(sc->dev,
"failed MXGEFW_CMD_GET_MAX_RSS_QUEUES\n");
goto abort_with_fw;
}
sc->num_slices = cmd.data0;
if (sc->num_slices > msix_cnt)
sc->num_slices = msix_cnt;
if (mxge_max_slices == -1) {
/* cap to number of CPUs in system */
if (sc->num_slices > mp_ncpus)
sc->num_slices = mp_ncpus;
} else {
if (sc->num_slices > mxge_max_slices)
sc->num_slices = mxge_max_slices;
}
/* make sure it is a power of two */
while (sc->num_slices & (sc->num_slices - 1))
sc->num_slices--;
if (mxge_verbose)
device_printf(sc->dev, "using %d slices\n",
sc->num_slices);
return;
abort_with_fw:
sc->fw_name = old_fw;
(void) mxge_load_firmware(sc, 0);
}
static int
mxge_add_msix_irqs(mxge_softc_t *sc)
{
size_t bytes;
int count, err, i, rid;
rid = PCIR_BAR(2);
sc->msix_table_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->msix_table_res == NULL) {
device_printf(sc->dev, "couldn't alloc MSIX table res\n");
return ENXIO;
}
count = sc->num_slices;
err = pci_alloc_msix(sc->dev, &count);
if (err != 0) {
device_printf(sc->dev, "pci_alloc_msix: failed, wanted %d"
"err = %d \n", sc->num_slices, err);
goto abort_with_msix_table;
}
if (count < sc->num_slices) {
device_printf(sc->dev, "pci_alloc_msix: need %d, got %d\n",
count, sc->num_slices);
device_printf(sc->dev,
"Try setting hw.mxge.max_slices to %d\n",
count);
err = ENOSPC;
goto abort_with_msix;
}
bytes = sizeof (*sc->msix_irq_res) * sc->num_slices;
sc->msix_irq_res = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO);
if (sc->msix_irq_res == NULL) {
err = ENOMEM;
goto abort_with_msix;
}
for (i = 0; i < sc->num_slices; i++) {
rid = i + 1;
sc->msix_irq_res[i] = bus_alloc_resource_any(sc->dev,
SYS_RES_IRQ,
&rid, RF_ACTIVE);
if (sc->msix_irq_res[i] == NULL) {
device_printf(sc->dev, "couldn't allocate IRQ res"
" for message %d\n", i);
err = ENXIO;
goto abort_with_res;
}
}
bytes = sizeof (*sc->msix_ih) * sc->num_slices;
sc->msix_ih = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO);
for (i = 0; i < sc->num_slices; i++) {
err = bus_setup_intr(sc->dev, sc->msix_irq_res[i],
INTR_TYPE_NET | INTR_MPSAFE, NULL,
mxge_intr, &sc->ss[i], &sc->msix_ih[i]);
if (err != 0) {
device_printf(sc->dev, "couldn't setup intr for "
"message %d\n", i);
goto abort_with_intr;
}
bus_describe_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_ih[i], "s%d", i);
}
if (mxge_verbose) {
device_printf(sc->dev, "using %d msix IRQs:",
sc->num_slices);
for (i = 0; i < sc->num_slices; i++)
printf(" %jd", rman_get_start(sc->msix_irq_res[i]));
printf("\n");
}
return (0);
abort_with_intr:
for (i = 0; i < sc->num_slices; i++) {
if (sc->msix_ih[i] != NULL) {
bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_ih[i]);
sc->msix_ih[i] = NULL;
}
}
free(sc->msix_ih, M_DEVBUF);
abort_with_res:
for (i = 0; i < sc->num_slices; i++) {
rid = i + 1;
if (sc->msix_irq_res[i] != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ, rid,
sc->msix_irq_res[i]);
sc->msix_irq_res[i] = NULL;
}
free(sc->msix_irq_res, M_DEVBUF);
abort_with_msix:
pci_release_msi(sc->dev);
abort_with_msix_table:
bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2),
sc->msix_table_res);
return err;
}
static int
mxge_add_single_irq(mxge_softc_t *sc)
{
int count, err, rid;
count = pci_msi_count(sc->dev);
if (count == 1 && pci_alloc_msi(sc->dev, &count) == 0) {
rid = 1;
} else {
rid = 0;
sc->legacy_irq = 1;
}
sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->irq_res == NULL) {
device_printf(sc->dev, "could not alloc interrupt\n");
return ENXIO;
}
if (mxge_verbose)
device_printf(sc->dev, "using %s irq %jd\n",
sc->legacy_irq ? "INTx" : "MSI",
rman_get_start(sc->irq_res));
err = bus_setup_intr(sc->dev, sc->irq_res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
mxge_intr, &sc->ss[0], &sc->ih);
if (err != 0) {
bus_release_resource(sc->dev, SYS_RES_IRQ,
sc->legacy_irq ? 0 : 1, sc->irq_res);
if (!sc->legacy_irq)
pci_release_msi(sc->dev);
}
return err;
}
static void
mxge_rem_msix_irqs(mxge_softc_t *sc)
{
int i, rid;
for (i = 0; i < sc->num_slices; i++) {
if (sc->msix_ih[i] != NULL) {
bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
sc->msix_ih[i]);
sc->msix_ih[i] = NULL;
}
}
free(sc->msix_ih, M_DEVBUF);
for (i = 0; i < sc->num_slices; i++) {
rid = i + 1;
if (sc->msix_irq_res[i] != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ, rid,
sc->msix_irq_res[i]);
sc->msix_irq_res[i] = NULL;
}
free(sc->msix_irq_res, M_DEVBUF);
bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2),
sc->msix_table_res);
pci_release_msi(sc->dev);
return;
}
static void
mxge_rem_single_irq(mxge_softc_t *sc)
{
bus_teardown_intr(sc->dev, sc->irq_res, sc->ih);
bus_release_resource(sc->dev, SYS_RES_IRQ,
sc->legacy_irq ? 0 : 1, sc->irq_res);
if (!sc->legacy_irq)
pci_release_msi(sc->dev);
}
static void
mxge_rem_irq(mxge_softc_t *sc)
{
if (sc->num_slices > 1)
mxge_rem_msix_irqs(sc);
else
mxge_rem_single_irq(sc);
}
static int
mxge_add_irq(mxge_softc_t *sc)
{
int err;
if (sc->num_slices > 1)
err = mxge_add_msix_irqs(sc);
else
err = mxge_add_single_irq(sc);
if (0 && err == 0 && sc->num_slices > 1) {
mxge_rem_msix_irqs(sc);
err = mxge_add_msix_irqs(sc);
}
return err;
}
static int
mxge_attach(device_t dev)
{
mxge_cmd_t cmd;
mxge_softc_t *sc = device_get_softc(dev);
if_t ifp;
int err, rid;
sc->dev = dev;
mxge_fetch_tunables(sc);
TASK_INIT(&sc->watchdog_task, 1, mxge_watchdog_task, sc);
sc->tq = taskqueue_create("mxge_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
if (sc->tq == NULL) {
err = ENOMEM;
goto abort_with_nothing;
}
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR, /* low */
BUS_SPACE_MAXADDR, /* high */
NULL, NULL, /* filter */
65536 + 256, /* maxsize */
MXGE_MAX_SEND_DESC, /* num segs */
65536, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lock */
&sc->parent_dmat); /* tag */
if (err != 0) {
device_printf(sc->dev, "Err %d allocating parent dmat\n",
err);
goto abort_with_tq;
}
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- err = ENOSPC;
- goto abort_with_parent_dmat;
- }
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
snprintf(sc->cmd_mtx_name, sizeof(sc->cmd_mtx_name), "%s:cmd",
device_get_nameunit(dev));
mtx_init(&sc->cmd_mtx, sc->cmd_mtx_name, NULL, MTX_DEF);
snprintf(sc->driver_mtx_name, sizeof(sc->driver_mtx_name),
"%s:drv", device_get_nameunit(dev));
mtx_init(&sc->driver_mtx, sc->driver_mtx_name,
MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->co_hdl, &sc->driver_mtx, 0);
mxge_setup_cfg_space(sc);
/* Map the board into the kernel */
rid = PCIR_BARS;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem_res == NULL) {
device_printf(dev, "could not map memory\n");
err = ENXIO;
goto abort_with_lock;
}
sc->sram = rman_get_virtual(sc->mem_res);
sc->sram_size = 2*1024*1024 - (2*(48*1024)+(32*1024)) - 0x100;
if (sc->sram_size > rman_get_size(sc->mem_res)) {
device_printf(dev, "impossible memory region size %jd\n",
rman_get_size(sc->mem_res));
err = ENXIO;
goto abort_with_mem_res;
}
/* make NULL terminated copy of the EEPROM strings section of
lanai SRAM */
bzero(sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE);
bus_space_read_region_1(rman_get_bustag(sc->mem_res),
rman_get_bushandle(sc->mem_res),
sc->sram_size - MXGE_EEPROM_STRINGS_SIZE,
sc->eeprom_strings,
MXGE_EEPROM_STRINGS_SIZE - 2);
err = mxge_parse_strings(sc);
if (err != 0)
goto abort_with_mem_res;
/* Enable write combining for efficient use of PCIe bus */
mxge_enable_wc(sc);
/* Allocate the out of band dma memory */
err = mxge_dma_alloc(sc, &sc->cmd_dma,
sizeof (mxge_cmd_t), 64);
if (err != 0)
goto abort_with_mem_res;
sc->cmd = (mcp_cmd_response_t *) sc->cmd_dma.addr;
err = mxge_dma_alloc(sc, &sc->zeropad_dma, 64, 64);
if (err != 0)
goto abort_with_cmd_dma;
err = mxge_dma_alloc(sc, &sc->dmabench_dma, 4096, 4096);
if (err != 0)
goto abort_with_zeropad_dma;
/* select & load the firmware */
err = mxge_select_firmware(sc);
if (err != 0)
goto abort_with_dmabench;
sc->intr_coal_delay = mxge_intr_coal_delay;
mxge_slice_probe(sc);
err = mxge_alloc_slices(sc);
if (err != 0)
goto abort_with_dmabench;
err = mxge_reset(sc, 0);
if (err != 0)
goto abort_with_slices;
err = mxge_alloc_rings(sc);
if (err != 0) {
device_printf(sc->dev, "failed to allocate rings\n");
goto abort_with_slices;
}
err = mxge_add_irq(sc);
if (err != 0) {
device_printf(sc->dev, "failed to add irq\n");
goto abort_with_rings;
}
if_setbaudrate(ifp, IF_Gbps(10));
if_setcapabilities(ifp, IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4 |
IFCAP_VLAN_MTU | IFCAP_LINKSTATE | IFCAP_TXCSUM_IPV6 |
IFCAP_RXCSUM_IPV6);
#if defined(INET) || defined(INET6)
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
#endif
#ifdef MXGE_NEW_VLAN_API
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0);
/* Only FW 1.4.32 and newer can do TSO over vlans */
if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 &&
sc->fw_ver_tiny >= 32)
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
#endif
sc->max_mtu = mxge_max_mtu(sc);
if (sc->max_mtu >= 9000)
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
else
device_printf(dev, "MTU limited to %d. Install "
"latest firmware for 9000 byte jumbo support\n",
sc->max_mtu - ETHER_HDR_LEN);
if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_TSO);
if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
/* check to see if f/w supports TSO for IPv6 */
if (!mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, &cmd)) {
if (CSUM_TCP_IPV6)
if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
sc->max_tso6_hlen = min(cmd.data0,
sizeof (sc->ss[0].scratch));
}
if_setcapenable(ifp, if_getcapabilities(ifp));
if (sc->lro_cnt == 0)
if_setcapenablebit(ifp, 0, IFCAP_LRO);
if_setinitfn(ifp, mxge_init);
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, mxge_ioctl);
if_setstartfn(ifp, mxge_start);
if_setgetcounterfn(ifp, mxge_get_counter);
if_sethwtsomax(ifp, IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ifp, sc->ss[0].tx.max_desc);
if_sethwtsomaxsegsize(ifp, IP_MAXPACKET);
/* Initialise the ifmedia structure */
ifmedia_init(&sc->media, 0, mxge_media_change,
mxge_media_status);
mxge_media_init(sc);
mxge_media_probe(sc);
sc->dying = 0;
ether_ifattach(ifp, sc->mac_addr);
/* ether_ifattach sets mtu to ETHERMTU */
if (mxge_initial_mtu != ETHERMTU)
mxge_change_mtu(sc, mxge_initial_mtu);
mxge_add_sysctls(sc);
if_settransmitfn(ifp, mxge_transmit);
if_setqflushfn(ifp, mxge_qflush);
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->dev));
callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc);
return 0;
abort_with_rings:
mxge_free_rings(sc);
abort_with_slices:
mxge_free_slices(sc);
abort_with_dmabench:
mxge_dma_free(&sc->dmabench_dma);
abort_with_zeropad_dma:
mxge_dma_free(&sc->zeropad_dma);
abort_with_cmd_dma:
mxge_dma_free(&sc->cmd_dma);
abort_with_mem_res:
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res);
abort_with_lock:
pci_disable_busmaster(dev);
mtx_destroy(&sc->cmd_mtx);
mtx_destroy(&sc->driver_mtx);
if_free(ifp);
-abort_with_parent_dmat:
bus_dma_tag_destroy(sc->parent_dmat);
abort_with_tq:
if (sc->tq != NULL) {
taskqueue_drain(sc->tq, &sc->watchdog_task);
taskqueue_free(sc->tq);
sc->tq = NULL;
}
abort_with_nothing:
return err;
}
static int
mxge_detach(device_t dev)
{
mxge_softc_t *sc = device_get_softc(dev);
if (mxge_vlans_active(sc)) {
device_printf(sc->dev,
"Detach vlans before removing module\n");
return EBUSY;
}
mtx_lock(&sc->driver_mtx);
sc->dying = 1;
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)
mxge_close(sc, 0);
mtx_unlock(&sc->driver_mtx);
ether_ifdetach(sc->ifp);
if (sc->tq != NULL) {
taskqueue_drain(sc->tq, &sc->watchdog_task);
taskqueue_free(sc->tq);
sc->tq = NULL;
}
callout_drain(&sc->co_hdl);
ifmedia_removeall(&sc->media);
mxge_dummy_rdma(sc, 0);
mxge_rem_sysctls(sc);
mxge_rem_irq(sc);
mxge_free_rings(sc);
mxge_free_slices(sc);
mxge_dma_free(&sc->dmabench_dma);
mxge_dma_free(&sc->zeropad_dma);
mxge_dma_free(&sc->cmd_dma);
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res);
pci_disable_busmaster(dev);
mtx_destroy(&sc->cmd_mtx);
mtx_destroy(&sc->driver_mtx);
if_free(sc->ifp);
bus_dma_tag_destroy(sc->parent_dmat);
return 0;
}
static int
mxge_shutdown(device_t dev)
{
return 0;
}
/*
This file uses Myri10GE driver indentation.
Local Variables:
c-file-style:"linux"
tab-width:8
End:
*/
diff --git a/sys/dev/my/if_my.c b/sys/dev/my/if_my.c
index 4b7e5e711707..f6d407fedade 100644
--- a/sys/dev/my/if_my.c
+++ b/sys/dev/my/if_my.c
@@ -1,1760 +1,1754 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Written by: yen_cw@myson.com.tw
* Copyright (c) 2002 Myson Technology Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/queue.h>
#include <sys/types.h>
#include <sys/module.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#define NBPFILTER 1
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_dl.h>
#include <net/bpf.h>
#include <vm/vm.h> /* for vtophys */
#include <vm/pmap.h> /* for vtophys */
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
/*
* #define MY_USEIOSPACE
*/
static int MY_USEIOSPACE = 1;
#ifdef MY_USEIOSPACE
#define MY_RES SYS_RES_IOPORT
#define MY_RID MY_PCI_LOIO
#else
#define MY_RES SYS_RES_MEMORY
#define MY_RID MY_PCI_LOMEM
#endif
#include <dev/my/if_myreg.h>
/*
* Various supported device vendors/types and their names.
*/
struct my_type *my_info_tmp;
static struct my_type my_devs[] = {
{MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
{MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
{MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
{0, 0, NULL}
};
/*
* Various supported PHY vendors/types and their names. Note that this driver
* will work with pretty much any MII-compliant PHY, so failure to positively
* identify the chip is not a fatal error.
*/
static struct my_type my_phys[] = {
{MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
{SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
{AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
{MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
{LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
{0, 0, "<MII-compliant physical interface>"}
};
static int my_probe(device_t);
static int my_attach(device_t);
static int my_detach(device_t);
static int my_newbuf(struct my_softc *, struct my_chain_onefrag *);
static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
static void my_rxeof(struct my_softc *);
static void my_txeof(struct my_softc *);
static void my_txeoc(struct my_softc *);
static void my_intr(void *);
static void my_start(if_t);
static void my_start_locked(if_t);
static int my_ioctl(if_t, u_long, caddr_t);
static void my_init(void *);
static void my_init_locked(struct my_softc *);
static void my_stop(struct my_softc *);
static void my_autoneg_timeout(void *);
static void my_watchdog(void *);
static int my_shutdown(device_t);
static int my_ifmedia_upd(if_t);
static void my_ifmedia_sts(if_t, struct ifmediareq *);
static u_int16_t my_phy_readreg(struct my_softc *, int);
static void my_phy_writereg(struct my_softc *, int, int);
static void my_autoneg_xmit(struct my_softc *);
static void my_autoneg_mii(struct my_softc *, int, int);
static void my_setmode_mii(struct my_softc *, int);
static void my_getmode_mii(struct my_softc *);
static void my_setcfg(struct my_softc *, int);
static void my_setmulti(struct my_softc *);
static void my_reset(struct my_softc *);
static int my_list_rx_init(struct my_softc *);
static int my_list_tx_init(struct my_softc *);
static long my_send_cmd_to_phy(struct my_softc *, int, int);
#define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
#define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
static device_method_t my_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, my_probe),
DEVMETHOD(device_attach, my_attach),
DEVMETHOD(device_detach, my_detach),
DEVMETHOD(device_shutdown, my_shutdown),
DEVMETHOD_END
};
static driver_t my_driver = {
"my",
my_methods,
sizeof(struct my_softc)
};
DRIVER_MODULE(my, pci, my_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, my, my_devs,
nitems(my_devs) - 1);
MODULE_DEPEND(my, pci, 1, 1, 1);
MODULE_DEPEND(my, ether, 1, 1, 1);
static long
my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
{
long miir;
int i;
int mask, data;
MY_LOCK_ASSERT(sc);
/* enable MII output */
miir = CSR_READ_4(sc, MY_MANAGEMENT);
miir &= 0xfffffff0;
miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
/* send 32 1's preamble */
for (i = 0; i < 32; i++) {
/* low MDC; MDO is already high (miir) */
miir &= ~MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
/* high MDC */
miir |= MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
}
/* calculate ST+OP+PHYAD+REGAD+TA */
data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
/* sent out */
mask = 0x8000;
while (mask) {
/* low MDC, prepare MDO */
miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
if (mask & data)
miir |= MY_MASK_MIIR_MII_MDO;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
/* high MDC */
miir |= MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
DELAY(30);
/* next */
mask >>= 1;
if (mask == 0x2 && opcode == MY_OP_READ)
miir &= ~MY_MASK_MIIR_MII_WRITE;
}
return miir;
}
static u_int16_t
my_phy_readreg(struct my_softc * sc, int reg)
{
long miir;
int mask, data;
MY_LOCK_ASSERT(sc);
if (sc->my_info->my_did == MTD803ID)
data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
else {
miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
/* read data */
mask = 0x8000;
data = 0;
while (mask) {
/* low MDC */
miir &= ~MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
/* read MDI */
miir = CSR_READ_4(sc, MY_MANAGEMENT);
if (miir & MY_MASK_MIIR_MII_MDI)
data |= mask;
/* high MDC, and wait */
miir |= MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
DELAY(30);
/* next */
mask >>= 1;
}
/* low MDC */
miir &= ~MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
}
return (u_int16_t) data;
}
static void
my_phy_writereg(struct my_softc * sc, int reg, int data)
{
long miir;
int mask;
MY_LOCK_ASSERT(sc);
if (sc->my_info->my_did == MTD803ID)
CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
else {
miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
/* write data */
mask = 0x8000;
while (mask) {
/* low MDC, prepare MDO */
miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
if (mask & data)
miir |= MY_MASK_MIIR_MII_MDO;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
DELAY(1);
/* high MDC */
miir |= MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
DELAY(1);
/* next */
mask >>= 1;
}
/* low MDC */
miir &= ~MY_MASK_MIIR_MII_MDC;
CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
}
return;
}
static u_int
my_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *hashes = arg;
int h;
h = ~ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
/*
* Program the 64-bit multicast hash filter.
*/
static void
my_setmulti(struct my_softc * sc)
{
if_t ifp;
u_int32_t hashes[2] = {0, 0};
u_int32_t rxfilt;
MY_LOCK_ASSERT(sc);
ifp = sc->my_ifp;
rxfilt = CSR_READ_4(sc, MY_TCRRCR);
if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
rxfilt |= MY_AM;
CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
return;
}
/* first, zot all the existing hash bits */
CSR_WRITE_4(sc, MY_MAR0, 0);
CSR_WRITE_4(sc, MY_MAR1, 0);
/* now program new ones */
if (if_foreach_llmaddr(ifp, my_hash_maddr, hashes) > 0)
rxfilt |= MY_AM;
else
rxfilt &= ~MY_AM;
CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
}
/*
* Initiate an autonegotiation session.
*/
static void
my_autoneg_xmit(struct my_softc * sc)
{
u_int16_t phy_sts = 0;
MY_LOCK_ASSERT(sc);
my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
DELAY(500);
while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
phy_sts = my_phy_readreg(sc, PHY_BMCR);
phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
my_phy_writereg(sc, PHY_BMCR, phy_sts);
return;
}
static void
my_autoneg_timeout(void *arg)
{
struct my_softc *sc;
sc = arg;
MY_LOCK_ASSERT(sc);
my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
}
/*
* Invoke autonegotiation on a PHY.
*/
static void
my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
{
u_int16_t phy_sts = 0, media, advert, ability;
u_int16_t ability2 = 0;
if_t ifp;
struct ifmedia *ifm;
MY_LOCK_ASSERT(sc);
ifm = &sc->ifmedia;
ifp = sc->my_ifp;
ifm->ifm_media = IFM_ETHER | IFM_AUTO;
#ifndef FORCE_AUTONEG_TFOUR
/*
* First, see if autoneg is supported. If not, there's no point in
* continuing.
*/
phy_sts = my_phy_readreg(sc, PHY_BMSR);
if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
if (verbose)
device_printf(sc->my_dev,
"autonegotiation not supported\n");
ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
return;
}
#endif
switch (flag) {
case MY_FLAG_FORCEDELAY:
/*
* XXX Never use this option anywhere but in the probe
* routine: making the kernel stop dead in its tracks for
* three whole seconds after we've gone multi-user is really
* bad manners.
*/
my_autoneg_xmit(sc);
DELAY(5000000);
break;
case MY_FLAG_SCHEDDELAY:
/*
* Wait for the transmitter to go idle before starting an
* autoneg session, otherwise my_start() may clobber our
* timeout, and we don't want to allow transmission during an
* autoneg session since that can screw it up.
*/
if (sc->my_cdata.my_tx_head != NULL) {
sc->my_want_auto = 1;
MY_UNLOCK(sc);
return;
}
my_autoneg_xmit(sc);
callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
sc);
sc->my_autoneg = 1;
sc->my_want_auto = 0;
return;
case MY_FLAG_DELAYTIMEO:
callout_stop(&sc->my_autoneg_timer);
sc->my_autoneg = 0;
break;
default:
device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
return;
}
if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
if (verbose)
device_printf(sc->my_dev, "autoneg complete, ");
phy_sts = my_phy_readreg(sc, PHY_BMSR);
} else {
if (verbose)
device_printf(sc->my_dev, "autoneg not complete, ");
}
media = my_phy_readreg(sc, PHY_BMCR);
/* Link is good. Report modes and set duplex mode. */
if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
if (verbose)
device_printf(sc->my_dev, "link status good. ");
advert = my_phy_readreg(sc, PHY_ANAR);
ability = my_phy_readreg(sc, PHY_LPAR);
if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
(sc->my_pinfo->my_vid == LevelOnePHYID0)) {
ability2 = my_phy_readreg(sc, PHY_1000SR);
if (ability2 & PHY_1000SR_1000BTXFULL) {
advert = 0;
ability = 0;
/*
* this version did not support 1000M,
* ifm->ifm_media =
* IFM_ETHER|IFM_1000_T|IFM_FDX;
*/
ifm->ifm_media =
IFM_ETHER | IFM_100_TX | IFM_FDX;
media &= ~PHY_BMCR_SPEEDSEL;
media |= PHY_BMCR_1000;
media |= PHY_BMCR_DUPLEX;
printf("(full-duplex, 1000Mbps)\n");
} else if (ability2 & PHY_1000SR_1000BTXHALF) {
advert = 0;
ability = 0;
/*
* this version did not support 1000M,
* ifm->ifm_media = IFM_ETHER|IFM_1000_T;
*/
ifm->ifm_media = IFM_ETHER | IFM_100_TX;
media &= ~PHY_BMCR_SPEEDSEL;
media &= ~PHY_BMCR_DUPLEX;
media |= PHY_BMCR_1000;
printf("(half-duplex, 1000Mbps)\n");
}
}
if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
ifm->ifm_media = IFM_ETHER | IFM_100_T4;
media |= PHY_BMCR_SPEEDSEL;
media &= ~PHY_BMCR_DUPLEX;
printf("(100baseT4)\n");
} else if (advert & PHY_ANAR_100BTXFULL &&
ability & PHY_ANAR_100BTXFULL) {
ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
media |= PHY_BMCR_SPEEDSEL;
media |= PHY_BMCR_DUPLEX;
printf("(full-duplex, 100Mbps)\n");
} else if (advert & PHY_ANAR_100BTXHALF &&
ability & PHY_ANAR_100BTXHALF) {
ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
media |= PHY_BMCR_SPEEDSEL;
media &= ~PHY_BMCR_DUPLEX;
printf("(half-duplex, 100Mbps)\n");
} else if (advert & PHY_ANAR_10BTFULL &&
ability & PHY_ANAR_10BTFULL) {
ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
media &= ~PHY_BMCR_SPEEDSEL;
media |= PHY_BMCR_DUPLEX;
printf("(full-duplex, 10Mbps)\n");
} else if (advert) {
ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
media &= ~PHY_BMCR_SPEEDSEL;
media &= ~PHY_BMCR_DUPLEX;
printf("(half-duplex, 10Mbps)\n");
}
media &= ~PHY_BMCR_AUTONEGENBL;
/* Set ASIC's duplex mode to match the PHY. */
my_phy_writereg(sc, PHY_BMCR, media);
my_setcfg(sc, media);
} else {
if (verbose)
device_printf(sc->my_dev, "no carrier\n");
}
my_init_locked(sc);
if (sc->my_tx_pend) {
sc->my_autoneg = 0;
sc->my_tx_pend = 0;
my_start_locked(ifp);
}
return;
}
/*
* To get PHY ability.
*/
static void
my_getmode_mii(struct my_softc * sc)
{
u_int16_t bmsr;
if_t ifp;
MY_LOCK_ASSERT(sc);
ifp = sc->my_ifp;
bmsr = my_phy_readreg(sc, PHY_BMSR);
if (bootverbose)
device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
/* fallback */
sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
if (bmsr & PHY_BMSR_10BTHALF) {
if (bootverbose)
device_printf(sc->my_dev,
"10Mbps half-duplex mode supported\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
}
if (bmsr & PHY_BMSR_10BTFULL) {
if (bootverbose)
device_printf(sc->my_dev,
"10Mbps full-duplex mode supported\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
0, NULL);
sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
}
if (bmsr & PHY_BMSR_100BTXHALF) {
if (bootverbose)
device_printf(sc->my_dev,
"100Mbps half-duplex mode supported\n");
if_setbaudrate(ifp, 100000000);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
0, NULL);
sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
}
if (bmsr & PHY_BMSR_100BTXFULL) {
if (bootverbose)
device_printf(sc->my_dev,
"100Mbps full-duplex mode supported\n");
if_setbaudrate(ifp, 100000000);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
0, NULL);
sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
}
/* Some also support 100BaseT4. */
if (bmsr & PHY_BMSR_100BT4) {
if (bootverbose)
device_printf(sc->my_dev, "100baseT4 mode supported\n");
if_setbaudrate(ifp, 100000000);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
#ifdef FORCE_AUTONEG_TFOUR
if (bootverbose)
device_printf(sc->my_dev,
"forcing on autoneg support for BT4\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
#endif
}
#if 0 /* this version did not support 1000M, */
if (sc->my_pinfo->my_vid == MarvellPHYID0) {
if (bootverbose)
device_printf(sc->my_dev,
"1000Mbps half-duplex mode supported\n");
if_setbaudrate(ifp, 1000000000);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
0, NULL);
if (bootverbose)
device_printf(sc->my_dev,
"1000Mbps full-duplex mode supported\n");
if_setbaudrate(ifp, 1000000000);
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
0, NULL);
sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
}
#endif
if (bmsr & PHY_BMSR_CANAUTONEG) {
if (bootverbose)
device_printf(sc->my_dev, "autoneg supported\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
}
return;
}
/*
* Set speed and duplex mode.
*/
static void
my_setmode_mii(struct my_softc * sc, int media)
{
u_int16_t bmcr;
MY_LOCK_ASSERT(sc);
/*
* If an autoneg session is in progress, stop it.
*/
if (sc->my_autoneg) {
device_printf(sc->my_dev, "canceling autoneg session\n");
callout_stop(&sc->my_autoneg_timer);
sc->my_autoneg = sc->my_want_auto = 0;
bmcr = my_phy_readreg(sc, PHY_BMCR);
bmcr &= ~PHY_BMCR_AUTONEGENBL;
my_phy_writereg(sc, PHY_BMCR, bmcr);
}
device_printf(sc->my_dev, "selecting MII, ");
bmcr = my_phy_readreg(sc, PHY_BMCR);
bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
#if 0 /* this version did not support 1000M, */
if (IFM_SUBTYPE(media) == IFM_1000_T) {
printf("1000Mbps/T4, half-duplex\n");
bmcr &= ~PHY_BMCR_SPEEDSEL;
bmcr &= ~PHY_BMCR_DUPLEX;
bmcr |= PHY_BMCR_1000;
}
#endif
if (IFM_SUBTYPE(media) == IFM_100_T4) {
printf("100Mbps/T4, half-duplex\n");
bmcr |= PHY_BMCR_SPEEDSEL;
bmcr &= ~PHY_BMCR_DUPLEX;
}
if (IFM_SUBTYPE(media) == IFM_100_TX) {
printf("100Mbps, ");
bmcr |= PHY_BMCR_SPEEDSEL;
}
if (IFM_SUBTYPE(media) == IFM_10_T) {
printf("10Mbps, ");
bmcr &= ~PHY_BMCR_SPEEDSEL;
}
if ((media & IFM_GMASK) == IFM_FDX) {
printf("full duplex\n");
bmcr |= PHY_BMCR_DUPLEX;
} else {
printf("half duplex\n");
bmcr &= ~PHY_BMCR_DUPLEX;
}
my_phy_writereg(sc, PHY_BMCR, bmcr);
my_setcfg(sc, bmcr);
return;
}
/*
* The Myson manual states that in order to fiddle with the 'full-duplex' and
* '100Mbps' bits in the netconfig register, we first have to put the
* transmit and/or receive logic in the idle state.
*/
static void
my_setcfg(struct my_softc * sc, int bmcr)
{
int i, restart = 0;
MY_LOCK_ASSERT(sc);
if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
restart = 1;
MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
for (i = 0; i < MY_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_4(sc, MY_TCRRCR) &
(MY_TXRUN | MY_RXRUN)))
break;
}
if (i == MY_TIMEOUT)
device_printf(sc->my_dev,
"failed to force tx and rx to idle \n");
}
MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
if (bmcr & PHY_BMCR_1000)
MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
else if (!(bmcr & PHY_BMCR_SPEEDSEL))
MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
if (bmcr & PHY_BMCR_DUPLEX)
MY_SETBIT(sc, MY_TCRRCR, MY_FD);
else
MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
if (restart)
MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
return;
}
static void
my_reset(struct my_softc * sc)
{
int i;
MY_LOCK_ASSERT(sc);
MY_SETBIT(sc, MY_BCR, MY_SWR);
for (i = 0; i < MY_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
break;
}
if (i == MY_TIMEOUT)
device_printf(sc->my_dev, "reset never completed!\n");
/* Wait a little while for the chip to get its brains in order. */
DELAY(1000);
return;
}
/*
* Probe for a Myson chip. Check the PCI vendor and device IDs against our
* list and return a device name if we find a match.
*/
static int
my_probe(device_t dev)
{
struct my_type *t;
t = my_devs;
while (t->my_name != NULL) {
if ((pci_get_vendor(dev) == t->my_vid) &&
(pci_get_device(dev) == t->my_did)) {
device_set_desc(dev, t->my_name);
my_info_tmp = t;
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia setup and
* ethernet/BPF attach.
*/
static int
my_attach(device_t dev)
{
int i;
u_char eaddr[ETHER_ADDR_LEN];
u_int32_t iobase;
struct my_softc *sc;
if_t ifp;
int media = IFM_ETHER | IFM_100_TX | IFM_FDX;
unsigned int round;
caddr_t roundptr;
struct my_type *p;
u_int16_t phy_vid, phy_did, phy_sts = 0;
int rid, error = 0;
sc = device_get_softc(dev);
sc->my_dev = dev;
mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
if (my_info_tmp->my_did == MTD800ID) {
iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
if (iobase & 0x300)
MY_USEIOSPACE = 0;
}
rid = MY_RID;
sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
if (sc->my_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto destroy_mutex;
}
sc->my_btag = rman_get_bustag(sc->my_res);
sc->my_bhandle = rman_get_bushandle(sc->my_res);
rid = 0;
sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->my_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto release_io;
}
sc->my_info = my_info_tmp;
/* Reset the adapter. */
MY_LOCK(sc);
my_reset(sc);
MY_UNLOCK(sc);
/*
* Get station address
*/
for (i = 0; i < ETHER_ADDR_LEN; ++i)
eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
M_DEVBUF, M_NOWAIT);
if (sc->my_ldata_ptr == NULL) {
device_printf(dev, "no memory for list buffers!\n");
error = ENXIO;
goto release_irq;
}
sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
round = (uintptr_t)sc->my_ldata_ptr & 0xF;
roundptr = sc->my_ldata_ptr;
for (i = 0; i < 8; i++) {
if (round % 8) {
round++;
roundptr++;
} else
break;
}
sc->my_ldata = (struct my_list_data *) roundptr;
bzero(sc->my_ldata, sizeof(struct my_list_data));
ifp = sc->my_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto free_ldata;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, my_ioctl);
if_setstartfn(ifp, my_start);
if_setinitfn(ifp, my_init);
if_setbaudrate(ifp, 10000000);
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
if (sc->my_info->my_did == MTD803ID)
sc->my_pinfo = my_phys;
else {
if (bootverbose)
device_printf(dev, "probing for a PHY\n");
MY_LOCK(sc);
for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
if (bootverbose)
device_printf(dev, "checking address: %d\n", i);
sc->my_phy_addr = i;
phy_sts = my_phy_readreg(sc, PHY_BMSR);
if ((phy_sts != 0) && (phy_sts != 0xffff))
break;
else
phy_sts = 0;
}
if (phy_sts) {
phy_vid = my_phy_readreg(sc, PHY_VENID);
phy_did = my_phy_readreg(sc, PHY_DEVID);
if (bootverbose) {
device_printf(dev, "found PHY at address %d, ",
sc->my_phy_addr);
printf("vendor id: %x device id: %x\n",
phy_vid, phy_did);
}
p = my_phys;
while (p->my_vid) {
if (phy_vid == p->my_vid) {
sc->my_pinfo = p;
break;
}
p++;
}
if (sc->my_pinfo == NULL)
sc->my_pinfo = &my_phys[PHY_UNKNOWN];
if (bootverbose)
device_printf(dev, "PHY type: %s\n",
sc->my_pinfo->my_name);
} else {
MY_UNLOCK(sc);
device_printf(dev, "MII without any phy!\n");
error = ENXIO;
goto free_if;
}
MY_UNLOCK(sc);
}
/* Do ifmedia setup. */
ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
MY_LOCK(sc);
my_getmode_mii(sc);
my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
media = sc->ifmedia.ifm_media;
my_stop(sc);
MY_UNLOCK(sc);
ifmedia_set(&sc->ifmedia, media);
ether_ifattach(ifp, eaddr);
error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, my_intr, sc, &sc->my_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
goto detach_if;
}
return (0);
detach_if:
ether_ifdetach(ifp);
free_if:
if_free(ifp);
-free_ldata:
free(sc->my_ldata_ptr, M_DEVBUF);
release_irq:
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
release_io:
bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
destroy_mutex:
mtx_destroy(&sc->my_mtx);
return (error);
}
static int
my_detach(device_t dev)
{
struct my_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->my_ifp;
ether_ifdetach(ifp);
MY_LOCK(sc);
my_stop(sc);
MY_UNLOCK(sc);
bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
callout_drain(&sc->my_watchdog);
callout_drain(&sc->my_autoneg_timer);
if_free(ifp);
free(sc->my_ldata_ptr, M_DEVBUF);
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
mtx_destroy(&sc->my_mtx);
return (0);
}
/*
* Initialize the transmit descriptors.
*/
static int
my_list_tx_init(struct my_softc * sc)
{
struct my_chain_data *cd;
struct my_list_data *ld;
int i;
MY_LOCK_ASSERT(sc);
cd = &sc->my_cdata;
ld = sc->my_ldata;
for (i = 0; i < MY_TX_LIST_CNT; i++) {
cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
if (i == (MY_TX_LIST_CNT - 1))
cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
else
cd->my_tx_chain[i].my_nextdesc =
&cd->my_tx_chain[i + 1];
}
cd->my_tx_free = &cd->my_tx_chain[0];
cd->my_tx_tail = cd->my_tx_head = NULL;
return (0);
}
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that we
* arrange the descriptors in a closed ring, so that the last descriptor
* points back to the first.
*/
static int
my_list_rx_init(struct my_softc * sc)
{
struct my_chain_data *cd;
struct my_list_data *ld;
int i;
MY_LOCK_ASSERT(sc);
cd = &sc->my_cdata;
ld = sc->my_ldata;
for (i = 0; i < MY_RX_LIST_CNT; i++) {
cd->my_rx_chain[i].my_ptr =
(struct my_desc *) & ld->my_rx_list[i];
if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
MY_UNLOCK(sc);
return (ENOBUFS);
}
if (i == (MY_RX_LIST_CNT - 1)) {
cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
} else {
cd->my_rx_chain[i].my_nextdesc =
&cd->my_rx_chain[i + 1];
ld->my_rx_list[i].my_next =
vtophys(&ld->my_rx_list[i + 1]);
}
}
cd->my_rx_head = &cd->my_rx_chain[0];
return (0);
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
*/
static int
my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
{
struct mbuf *m_new = NULL;
MY_LOCK_ASSERT(sc);
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->my_dev,
"no memory for rx list -- packet dropped!\n");
return (ENOBUFS);
}
if (!(MCLGET(m_new, M_NOWAIT))) {
device_printf(sc->my_dev,
"no memory for rx list -- packet dropped!\n");
m_freem(m_new);
return (ENOBUFS);
}
c->my_mbuf = m_new;
c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
c->my_ptr->my_status = MY_OWNByNIC;
return (0);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to the higher
* level protocols.
*/
static void
my_rxeof(struct my_softc * sc)
{
struct ether_header *eh;
struct mbuf *m;
if_t ifp;
struct my_chain_onefrag *cur_rx;
int total_len = 0;
u_int32_t rxstat;
MY_LOCK_ASSERT(sc);
ifp = sc->my_ifp;
while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
& MY_OWNByNIC)) {
cur_rx = sc->my_cdata.my_rx_head;
sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
if (rxstat & MY_ES) { /* error summary: give up this rx pkt */
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
cur_rx->my_ptr->my_status = MY_OWNByNIC;
continue;
}
/* No errors; receive the packet. */
total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
total_len -= ETHER_CRC_LEN;
if (total_len < MINCLSIZE) {
m = m_devget(mtod(cur_rx->my_mbuf, char *),
total_len, 0, ifp, NULL);
cur_rx->my_ptr->my_status = MY_OWNByNIC;
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
} else {
m = cur_rx->my_mbuf;
/*
* Try to conjure up a new mbuf cluster. If that
* fails, it means we have an out of memory condition
* and should leave the buffer in place and continue.
* This will result in a lost packet, but there's
* little else we can do in this situation.
*/
if (my_newbuf(sc, cur_rx) == ENOBUFS) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
cur_rx->my_ptr->my_status = MY_OWNByNIC;
continue;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = total_len;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
eh = mtod(m, struct ether_header *);
#if NBPFILTER > 0
/*
* Handle BPF listeners. Let the BPF user see the packet, but
* don't pass it up to the ether_input() layer unless it's a
* broadcast packet, multicast packet, matches our ethernet
* address or the interface is in promiscuous mode.
*/
if (bpf_peers_present_if(ifp)) {
bpf_mtap_if(ifp, m);
if (if_getflags(ifp) & IFF_PROMISC &&
(bcmp(eh->ether_dhost, if_getlladdr(sc->my_ifp),
ETHER_ADDR_LEN) &&
(eh->ether_dhost[0] & 1) == 0)) {
m_freem(m);
continue;
}
}
#endif
MY_UNLOCK(sc);
if_input(ifp, m);
MY_LOCK(sc);
}
return;
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up the list
* buffers.
*/
static void
my_txeof(struct my_softc * sc)
{
struct my_chain *cur_tx;
if_t ifp;
MY_LOCK_ASSERT(sc);
ifp = sc->my_ifp;
/* Clear the timeout timer. */
sc->my_timer = 0;
if (sc->my_cdata.my_tx_head == NULL) {
return;
}
/*
* Go through our tx list and free mbufs for those frames that have
* been transmitted.
*/
while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
u_int32_t txstat;
cur_tx = sc->my_cdata.my_tx_head;
txstat = MY_TXSTATUS(cur_tx);
if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
break;
if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
if (txstat & MY_TXERR) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if (txstat & MY_EC) /* excessive collision */
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
if (txstat & MY_LC) /* late collision */
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
}
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
(txstat & MY_NCRMASK) >> MY_NCRShift);
}
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
m_freem(cur_tx->my_mbuf);
cur_tx->my_mbuf = NULL;
if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
sc->my_cdata.my_tx_head = NULL;
sc->my_cdata.my_tx_tail = NULL;
break;
}
sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
}
if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask));
}
return;
}
/*
* TX 'end of channel' interrupt handler.
*/
static void
my_txeoc(struct my_softc * sc)
{
if_t ifp;
MY_LOCK_ASSERT(sc);
ifp = sc->my_ifp;
sc->my_timer = 0;
if (sc->my_cdata.my_tx_head == NULL) {
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->my_cdata.my_tx_tail = NULL;
if (sc->my_want_auto)
my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
} else {
if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
sc->my_timer = 5;
CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
}
}
return;
}
static void
my_intr(void *arg)
{
struct my_softc *sc;
if_t ifp;
u_int32_t status;
sc = arg;
MY_LOCK(sc);
ifp = sc->my_ifp;
if (!(if_getflags(ifp) & IFF_UP)) {
MY_UNLOCK(sc);
return;
}
/* Disable interrupts. */
CSR_WRITE_4(sc, MY_IMR, 0x00000000);
for (;;) {
status = CSR_READ_4(sc, MY_ISR);
status &= MY_INTRS;
if (status)
CSR_WRITE_4(sc, MY_ISR, status);
else
break;
if (status & MY_RI) /* receive interrupt */
my_rxeof(sc);
if ((status & MY_RBU) || (status & MY_RxErr)) {
/* rx buffer unavailable or rx error */
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
#ifdef foo
my_stop(sc);
my_reset(sc);
my_init_locked(sc);
#endif
}
if (status & MY_TI) /* tx interrupt */
my_txeof(sc);
if (status & MY_ETI) /* tx early interrupt */
my_txeof(sc);
if (status & MY_TBU) /* tx buffer unavailable */
my_txeoc(sc);
#if 0 /* 90/1/18 delete */
if (status & MY_FBE) {
my_reset(sc);
my_init_locked(sc);
}
#endif
}
/* Re-enable interrupts. */
CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
if (!if_sendq_empty(ifp))
my_start_locked(ifp);
MY_UNLOCK(sc);
return;
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
{
struct my_desc *f = NULL;
int total_len;
struct mbuf *m, *m_new = NULL;
MY_LOCK_ASSERT(sc);
/* calculate the total tx pkt length */
total_len = 0;
for (m = m_head; m != NULL; m = m->m_next)
total_len += m->m_len;
/*
* Start packing the mbufs in this chain into the fragment pointers.
* Stop when we run out of fragments or hit the end of the mbuf
* chain.
*/
m = m_head;
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->my_dev, "no memory for tx list");
return (1);
}
if (m_head->m_pkthdr.len > MHLEN) {
if (!(MCLGET(m_new, M_NOWAIT))) {
m_freem(m_new);
device_printf(sc->my_dev, "no memory for tx list");
return (1);
}
}
m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
m_freem(m_head);
m_head = m_new;
f = &c->my_ptr->my_frag[0];
f->my_status = 0;
f->my_data = vtophys(mtod(m_new, caddr_t));
total_len = m_new->m_len;
f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
f->my_ctl |= total_len << MY_PKTShift; /* pkt size */
f->my_ctl |= total_len; /* buffer size */
/* 89/12/29 add, for mtd891 *//* [ 89? ] */
if (sc->my_info->my_did == MTD891ID)
f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
c->my_mbuf = m_head;
c->my_lastdesc = 0;
MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
return (0);
}
/*
* Main transmit routine. To avoid having to do mbuf copies, we put pointers
* to the mbuf data regions directly in the transmit lists. We also save a
* copy of the pointers since the transmit list fragment pointers are
* physical addresses.
*/
static void
my_start(if_t ifp)
{
struct my_softc *sc;
sc = if_getsoftc(ifp);
MY_LOCK(sc);
my_start_locked(ifp);
MY_UNLOCK(sc);
}
static void
my_start_locked(if_t ifp)
{
struct my_softc *sc;
struct mbuf *m_head = NULL;
struct my_chain *cur_tx = NULL, *start_tx;
sc = if_getsoftc(ifp);
MY_LOCK_ASSERT(sc);
if (sc->my_autoneg) {
sc->my_tx_pend = 1;
return;
}
/*
* Check for an available queue slot. If there are none, punt.
*/
if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
return;
}
start_tx = sc->my_cdata.my_tx_free;
while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/* Pick a descriptor off the free list. */
cur_tx = sc->my_cdata.my_tx_free;
sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
/* Pack the data into the descriptor. */
my_encap(sc, cur_tx, m_head);
if (cur_tx != start_tx)
MY_TXOWN(cur_tx) = MY_OWNByNIC;
#if NBPFILTER > 0
/*
* If there's a BPF listener, bounce a copy of this frame to
* him.
*/
BPF_MTAP(ifp, cur_tx->my_mbuf);
#endif
}
/*
* If there are no packets queued, bail.
*/
if (cur_tx == NULL) {
return;
}
/*
* Place the request for the upload interrupt in the last descriptor
* in the chain. This way, if we're chaining several packets at once,
* we'll only get an interrupt once for the whole chain rather than
* once for each packet.
*/
MY_TXCTL(cur_tx) |= MY_TXIC;
cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
sc->my_cdata.my_tx_tail = cur_tx;
if (sc->my_cdata.my_tx_head == NULL)
sc->my_cdata.my_tx_head = start_tx;
MY_TXOWN(start_tx) = MY_OWNByNIC;
CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->my_timer = 5;
return;
}
static void
my_init(void *xsc)
{
struct my_softc *sc = xsc;
MY_LOCK(sc);
my_init_locked(sc);
MY_UNLOCK(sc);
}
static void
my_init_locked(struct my_softc *sc)
{
if_t ifp = sc->my_ifp;
u_int16_t phy_bmcr = 0;
MY_LOCK_ASSERT(sc);
if (sc->my_autoneg) {
return;
}
if (sc->my_pinfo != NULL)
phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
my_stop(sc);
my_reset(sc);
/*
* Set cache alignment and burst length.
*/
#if 0 /* 89/9/1 modify, */
CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
#endif
CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
/*
* 89/12/29 add, for mtd891,
*/
if (sc->my_info->my_did == MTD891ID) {
MY_SETBIT(sc, MY_BCR, MY_PROG);
MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
}
my_setcfg(sc, phy_bmcr);
/* Init circular RX list. */
if (my_list_rx_init(sc) == ENOBUFS) {
device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
my_stop(sc);
return;
}
/* Init TX descriptors. */
my_list_tx_init(sc);
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & IFF_PROMISC)
MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
else
MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
/*
* Set capture broadcast bit to capture broadcast frames.
*/
if (if_getflags(ifp) & IFF_BROADCAST)
MY_SETBIT(sc, MY_TCRRCR, MY_AB);
else
MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
/*
* Program the multicast filter, if necessary.
*/
my_setmulti(sc);
/*
* Load the address of the RX list.
*/
MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
/*
* Enable interrupts.
*/
CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
/* Enable receiver and transmitter. */
MY_SETBIT(sc, MY_TCRRCR, MY_RE);
MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
MY_SETBIT(sc, MY_TCRRCR, MY_TE);
/* Restore state of BMCR */
if (sc->my_pinfo != NULL)
my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
return;
}
/*
* Set media options.
*/
static int
my_ifmedia_upd(if_t ifp)
{
struct my_softc *sc;
struct ifmedia *ifm;
sc = if_getsoftc(ifp);
MY_LOCK(sc);
ifm = &sc->ifmedia;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
MY_UNLOCK(sc);
return (EINVAL);
}
if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
else
my_setmode_mii(sc, ifm->ifm_media);
MY_UNLOCK(sc);
return (0);
}
/*
* Report current media status.
*/
static void
my_ifmedia_sts(if_t ifp, struct ifmediareq * ifmr)
{
struct my_softc *sc;
u_int16_t advert = 0, ability = 0;
sc = if_getsoftc(ifp);
MY_LOCK(sc);
ifmr->ifm_active = IFM_ETHER;
if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
#if 0 /* this version did not support 1000M, */
if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
#endif
if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
else
ifmr->ifm_active = IFM_ETHER | IFM_10_T;
if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
MY_UNLOCK(sc);
return;
}
ability = my_phy_readreg(sc, PHY_LPAR);
advert = my_phy_readreg(sc, PHY_ANAR);
#if 0 /* this version did not support 1000M, */
if (sc->my_pinfo->my_vid = MarvellPHYID0) {
ability2 = my_phy_readreg(sc, PHY_1000SR);
if (ability2 & PHY_1000SR_1000BTXFULL) {
advert = 0;
ability = 0;
ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
} else if (ability & PHY_1000SR_1000BTXHALF) {
advert = 0;
ability = 0;
ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
}
}
#endif
if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
MY_UNLOCK(sc);
return;
}
static int
my_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct my_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
int error;
switch (command) {
case SIOCSIFFLAGS:
MY_LOCK(sc);
if (if_getflags(ifp) & IFF_UP)
my_init_locked(sc);
else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
my_stop(sc);
MY_UNLOCK(sc);
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
MY_LOCK(sc);
my_setmulti(sc);
MY_UNLOCK(sc);
error = 0;
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
my_watchdog(void *arg)
{
struct my_softc *sc;
if_t ifp;
sc = arg;
MY_LOCK_ASSERT(sc);
callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
if (sc->my_timer == 0 || --sc->my_timer > 0)
return;
ifp = sc->my_ifp;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_printf(ifp, "watchdog timeout\n");
if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
if_printf(ifp, "no carrier - transceiver cable problem?\n");
my_stop(sc);
my_reset(sc);
my_init_locked(sc);
if (!if_sendq_empty(ifp))
my_start_locked(ifp);
}
/*
* Stop the adapter and free any mbufs allocated to the RX and TX lists.
*/
static void
my_stop(struct my_softc * sc)
{
int i;
if_t ifp;
MY_LOCK_ASSERT(sc);
ifp = sc->my_ifp;
callout_stop(&sc->my_autoneg_timer);
callout_stop(&sc->my_watchdog);
MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
CSR_WRITE_4(sc, MY_IMR, 0x00000000);
CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
/*
* Free data in the RX lists.
*/
for (i = 0; i < MY_RX_LIST_CNT; i++) {
if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
}
}
bzero((char *)&sc->my_ldata->my_rx_list,
sizeof(sc->my_ldata->my_rx_list));
/*
* Free the TX list buffers.
*/
for (i = 0; i < MY_TX_LIST_CNT; i++) {
if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
}
}
bzero((char *)&sc->my_ldata->my_tx_list,
sizeof(sc->my_ldata->my_tx_list));
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
return;
}
/*
* Stop all chip I/O so that the kernel's probe routines don't get confused
* by errant DMAs when rebooting.
*/
static int
my_shutdown(device_t dev)
{
struct my_softc *sc;
sc = device_get_softc(dev);
MY_LOCK(sc);
my_stop(sc);
MY_UNLOCK(sc);
return 0;
}
diff --git a/sys/dev/neta/if_mvneta.c b/sys/dev/neta/if_mvneta.c
index 8a2c2ec8512c..053e959c94ac 100644
--- a/sys/dev/neta/if_mvneta.c
+++ b/sys/dev/neta/if_mvneta.c
@@ -1,3624 +1,3619 @@
/*
* Copyright (c) 2017 Stormshield.
* Copyright (c) 2017 Semihalf.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/smp.h>
#include <sys/taskqueue.h>
#ifdef MVNETA_KTR
#include <sys/ktr.h>
#endif
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp_lro.h>
#include <sys/sockio.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <dev/clk/clk.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include <arm/mv/mvvar.h>
#if !defined(__aarch64__)
#include <arm/mv/mvreg.h>
#include <arm/mv/mvwin.h>
#endif
#include "if_mvnetareg.h"
#include "if_mvnetavar.h"
#include "miibus_if.h"
#include "mdio_if.h"
#ifdef MVNETA_DEBUG
#define STATIC /* nothing */
#else
#define STATIC static
#endif
#define DASSERT(x) KASSERT((x), (#x))
#define A3700_TCLK_250MHZ 250000000
/* Device Register Initialization */
STATIC int mvneta_initreg(if_t);
/* Descriptor Ring Control for each of queues */
STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
STATIC int mvneta_dma_create(struct mvneta_softc *);
/* Rx/Tx Queue Control */
STATIC int mvneta_rx_queue_init(if_t, int);
STATIC int mvneta_tx_queue_init(if_t, int);
STATIC int mvneta_rx_queue_enable(if_t, int);
STATIC int mvneta_tx_queue_enable(if_t, int);
STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
/* Interrupt Handlers */
STATIC void mvneta_disable_intr(struct mvneta_softc *);
STATIC void mvneta_enable_intr(struct mvneta_softc *);
STATIC void mvneta_rxtxth_intr(void *);
STATIC int mvneta_misc_intr(struct mvneta_softc *);
STATIC void mvneta_tick(void *);
/* struct ifnet and mii callbacks*/
STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
#ifdef MVNETA_MULTIQUEUE
STATIC int mvneta_transmit(if_t, struct mbuf *);
#else /* !MVNETA_MULTIQUEUE */
STATIC void mvneta_start(if_t);
#endif
STATIC void mvneta_qflush(if_t);
STATIC void mvneta_tx_task(void *, int);
STATIC int mvneta_ioctl(if_t, u_long, caddr_t);
STATIC void mvneta_init(void *);
STATIC void mvneta_init_locked(void *);
STATIC void mvneta_stop(struct mvneta_softc *);
STATIC void mvneta_stop_locked(struct mvneta_softc *);
STATIC int mvneta_mediachange(if_t);
STATIC void mvneta_mediastatus(if_t, struct ifmediareq *);
STATIC void mvneta_portup(struct mvneta_softc *);
STATIC void mvneta_portdown(struct mvneta_softc *);
/* Link State Notify */
STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
STATIC int mvneta_update_media(struct mvneta_softc *, int);
STATIC void mvneta_adjust_link(struct mvneta_softc *);
STATIC void mvneta_update_eee(struct mvneta_softc *);
STATIC void mvneta_update_fc(struct mvneta_softc *);
STATIC void mvneta_link_isr(struct mvneta_softc *);
STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
STATIC void mvneta_linkup(struct mvneta_softc *);
STATIC void mvneta_linkdown(struct mvneta_softc *);
STATIC void mvneta_linkreset(struct mvneta_softc *);
/* Tx Subroutines */
STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
STATIC void mvneta_tx_set_csumflag(if_t,
struct mvneta_tx_desc *, struct mbuf *);
STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
STATIC void mvneta_tx_drain(struct mvneta_softc *);
/* Rx Subroutines */
STATIC int mvneta_rx(struct mvneta_softc *, int, int);
STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
STATIC void mvneta_rx_set_csumflag(if_t,
struct mvneta_rx_desc *, struct mbuf *);
STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
/* MAC address filter */
STATIC void mvneta_filter_setup(struct mvneta_softc *);
/* sysctl(9) */
STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
STATIC void sysctl_mvneta_init(struct mvneta_softc *);
/* MIB */
STATIC void mvneta_clear_mib(struct mvneta_softc *);
STATIC uint64_t mvneta_read_mib(struct mvneta_softc *, int);
STATIC void mvneta_update_mib(struct mvneta_softc *);
/* Switch */
STATIC boolean_t mvneta_has_switch(device_t);
#define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
#define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
STATIC struct mtx mii_mutex;
STATIC int mii_init = 0;
/* Device */
STATIC int mvneta_detach(device_t);
/* MII */
STATIC int mvneta_miibus_readreg(device_t, int, int);
STATIC int mvneta_miibus_writereg(device_t, int, int, int);
static device_method_t mvneta_methods[] = {
/* Device interface */
DEVMETHOD(device_detach, mvneta_detach),
/* MII interface */
DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
/* MDIO interface */
DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
/* End */
DEVMETHOD_END
};
DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
DRIVER_MODULE(miibus, mvneta, miibus_driver, 0, 0);
DRIVER_MODULE(mdio, mvneta, mdio_driver, 0, 0);
MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
MODULE_DEPEND(mvneta, ether, 1, 1, 1);
MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
/*
* List of MIB register and names
*/
enum mvneta_mib_idx
{
MVNETA_MIB_RX_GOOD_OCT_IDX,
MVNETA_MIB_RX_BAD_OCT_IDX,
MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
MVNETA_MIB_RX_GOOD_FRAME_IDX,
MVNETA_MIB_RX_BAD_FRAME_IDX,
MVNETA_MIB_RX_BCAST_FRAME_IDX,
MVNETA_MIB_RX_MCAST_FRAME_IDX,
MVNETA_MIB_RX_FRAME64_OCT_IDX,
MVNETA_MIB_RX_FRAME127_OCT_IDX,
MVNETA_MIB_RX_FRAME255_OCT_IDX,
MVNETA_MIB_RX_FRAME511_OCT_IDX,
MVNETA_MIB_RX_FRAME1023_OCT_IDX,
MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
MVNETA_MIB_TX_GOOD_OCT_IDX,
MVNETA_MIB_TX_GOOD_FRAME_IDX,
MVNETA_MIB_TX_EXCES_COL_IDX,
MVNETA_MIB_TX_MCAST_FRAME_IDX,
MVNETA_MIB_TX_BCAST_FRAME_IDX,
MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
MVNETA_MIB_FC_SENT_IDX,
MVNETA_MIB_FC_GOOD_IDX,
MVNETA_MIB_FC_BAD_IDX,
MVNETA_MIB_PKT_UNDERSIZE_IDX,
MVNETA_MIB_PKT_FRAGMENT_IDX,
MVNETA_MIB_PKT_OVERSIZE_IDX,
MVNETA_MIB_PKT_JABBER_IDX,
MVNETA_MIB_MAC_RX_ERR_IDX,
MVNETA_MIB_MAC_CRC_ERR_IDX,
MVNETA_MIB_MAC_COL_IDX,
MVNETA_MIB_MAC_LATE_COL_IDX,
};
STATIC struct mvneta_mib_def {
uint32_t regnum;
int reg64;
const char *sysctl_name;
const char *desc;
} mvneta_mib_list[] = {
[MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
"rx_good_oct", "Good Octets Rx"},
[MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
"rx_bad_oct", "Bad Octets Rx"},
[MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
"tx_mac_err", "MAC Transmit Error"},
[MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
"rx_good_frame", "Good Frames Rx"},
[MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
"rx_bad_frame", "Bad Frames Rx"},
[MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
"rx_bcast_frame", "Broadcast Frames Rx"},
[MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
"rx_mcast_frame", "Multicast Frames Rx"},
[MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
"rx_frame_1_64", "Frame Size 1 - 64"},
[MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
"rx_frame_65_127", "Frame Size 65 - 127"},
[MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
"rx_frame_128_255", "Frame Size 128 - 255"},
[MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
"rx_frame_256_511", "Frame Size 256 - 511"},
[MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
"rx_frame_512_1023", "Frame Size 512 - 1023"},
[MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
"rx_fame_1024_max", "Frame Size 1024 - Max"},
[MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
"tx_good_oct", "Good Octets Tx"},
[MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
"tx_good_frame", "Good Frames Tx"},
[MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
"tx_exces_collision", "Excessive Collision"},
[MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
"tx_mcast_frame", "Multicast Frames Tx"},
[MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
"tx_bcast_frame", "Broadcast Frames Tx"},
[MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
"tx_mac_ctl_err", "Unknown MAC Control"},
[MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
"fc_tx", "Flow Control Tx"},
[MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
"fc_rx_good", "Good Flow Control Rx"},
[MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
"fc_rx_bad", "Bad Flow Control Rx"},
[MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
"pkt_undersize", "Undersized Packets Rx"},
[MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
"pkt_fragment", "Fragmented Packets Rx"},
[MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
"pkt_oversize", "Oversized Packets Rx"},
[MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
"pkt_jabber", "Jabber Packets Rx"},
[MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
"mac_rx_err", "MAC Rx Errors"},
[MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
"mac_crc_err", "MAC CRC Errors"},
[MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
"mac_collision", "MAC Collision"},
[MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
"mac_late_collision", "MAC Late Collision"},
};
static struct resource_spec res_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0}
};
static struct {
driver_intr_t *handler;
char * description;
} mvneta_intrs[] = {
{ mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
};
static int
mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
{
unsigned int mac_h;
unsigned int mac_l;
mac_l = (addr[4] << 8) | (addr[5]);
mac_h = (addr[0] << 24) | (addr[1] << 16) |
(addr[2] << 8) | (addr[3] << 0);
MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
return (0);
}
static int
mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
{
uint32_t mac_l, mac_h;
#ifdef FDT
if (mvneta_fdt_mac_address(sc, addr) == 0)
return (0);
#endif
/*
* Fall back -- use the currently programmed address.
*/
mac_l = MVNETA_READ(sc, MVNETA_MACAL);
mac_h = MVNETA_READ(sc, MVNETA_MACAH);
if (mac_l == 0 && mac_h == 0) {
/*
* Generate pseudo-random MAC.
* Set lower part to random number | unit number.
*/
mac_l = arc4random() & ~0xff;
mac_l |= device_get_unit(sc->dev) & 0xff;
mac_h = arc4random();
mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
if (bootverbose) {
device_printf(sc->dev,
"Could not acquire MAC address. "
"Using randomized one.\n");
}
}
addr[0] = (mac_h & 0xff000000) >> 24;
addr[1] = (mac_h & 0x00ff0000) >> 16;
addr[2] = (mac_h & 0x0000ff00) >> 8;
addr[3] = (mac_h & 0x000000ff);
addr[4] = (mac_l & 0x0000ff00) >> 8;
addr[5] = (mac_l & 0x000000ff);
return (0);
}
STATIC boolean_t
mvneta_has_switch(device_t self)
{
#ifdef FDT
return (mvneta_has_switch_fdt(self));
#endif
return (false);
}
STATIC int
mvneta_dma_create(struct mvneta_softc *sc)
{
size_t maxsize, maxsegsz;
size_t q;
int error;
/*
* Create Tx DMA
*/
maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* parent */
16, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
maxsize, /* maxsize */
1, /* nsegments */
maxsegsz, /* maxsegsz */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->tx_dtag); /* dmat */
if (error != 0) {
device_printf(sc->dev,
"Failed to create DMA tag for Tx descriptors.\n");
goto fail;
}
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
MVNETA_MAX_FRAME, /* maxsize */
MVNETA_TX_SEGLIMIT, /* nsegments */
MVNETA_MAX_FRAME, /* maxsegsz */
BUS_DMA_ALLOCNOW, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->txmbuf_dtag);
if (error != 0) {
device_printf(sc->dev,
"Failed to create DMA tag for Tx mbufs.\n");
goto fail;
}
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
error = mvneta_ring_alloc_tx_queue(sc, q);
if (error != 0) {
device_printf(sc->dev,
"Failed to allocate DMA safe memory for TxQ: %zu\n", q);
goto fail;
}
}
/*
* Create Rx DMA.
*/
/* Create tag for Rx descripors */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* parent */
32, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
1, /* nsegments */
sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->rx_dtag); /* dmat */
if (error != 0) {
device_printf(sc->dev,
"Failed to create DMA tag for Rx descriptors.\n");
goto fail;
}
/* Create tag for Rx buffers */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* parent */
32, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
MVNETA_MAX_FRAME, 1, /* maxsize, nsegments */
MVNETA_MAX_FRAME, /* maxsegsz */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->rxbuf_dtag); /* dmat */
if (error != 0) {
device_printf(sc->dev,
"Failed to create DMA tag for Rx buffers.\n");
goto fail;
}
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
device_printf(sc->dev,
"Failed to allocate DMA safe memory for RxQ: %zu\n", q);
goto fail;
}
}
return (0);
fail:
mvneta_detach(sc->dev);
return (error);
}
/* ARGSUSED */
int
mvneta_attach(device_t self)
{
struct mvneta_softc *sc;
if_t ifp;
device_t child;
int ifm_target;
int q, error;
#if !defined(__aarch64__)
uint32_t reg;
#endif
clk_t clk;
sc = device_get_softc(self);
sc->dev = self;
mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
error = bus_alloc_resources(self, res_spec, sc->res);
if (error) {
device_printf(self, "could not allocate resources\n");
return (ENXIO);
}
sc->version = MVNETA_READ(sc, MVNETA_PV);
device_printf(self, "version is %x\n", sc->version);
callout_init(&sc->tick_ch, 0);
/*
* make sure DMA engines are in reset state
*/
MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
error = clk_get_by_ofw_index(sc->dev, ofw_bus_get_node(sc->dev), 0,
&clk);
if (error != 0) {
#if defined(__aarch64__)
device_printf(sc->dev,
"Cannot get clock, using default frequency: %d\n",
A3700_TCLK_250MHZ);
sc->clk_freq = A3700_TCLK_250MHZ;
#else
device_printf(sc->dev,
"Cannot get clock, using get_tclk()\n");
sc->clk_freq = get_tclk();
#endif
} else {
error = clk_get_freq(clk, &sc->clk_freq);
if (error != 0) {
device_printf(sc->dev,
"Cannot obtain frequency from parent clock\n");
bus_release_resources(sc->dev, res_spec, sc->res);
return (error);
}
}
#if !defined(__aarch64__)
/*
* Disable port snoop for buffers and descriptors
* to avoid L2 caching of both without DRAM copy.
* Obtain coherency settings from the first MBUS
* window attribute.
*/
if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
}
#endif
error = bus_setup_intr(self, sc->res[1],
INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
&sc->ih_cookie[0]);
if (error) {
device_printf(self, "could not setup %s\n",
mvneta_intrs[0].description);
mvneta_detach(self);
return (error);
}
/*
* MAC address
*/
if (mvneta_get_mac_address(sc, sc->enaddr)) {
device_printf(self, "no mac address.\n");
return (ENXIO);
}
mvneta_set_mac_address(sc, sc->enaddr);
mvneta_disable_intr(sc);
/* Allocate network interface */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(self, "if_alloc() failed\n");
- mvneta_detach(self);
- return (ENOMEM);
- }
if_initname(ifp, device_get_name(self), device_get_unit(self));
/*
* We can support 802.1Q VLAN-sized frames and jumbo
* Ethernet frames.
*/
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
#ifdef MVNETA_MULTIQUEUE
if_settransmitfn(ifp, mvneta_transmit);
if_setqflushfn(ifp, mvneta_qflush);
#else /* !MVNETA_MULTIQUEUE */
if_setstartfn(ifp, mvneta_start);
if_setsendqlen(ifp, MVNETA_TX_RING_CNT - 1);
if_setsendqready(ifp);
#endif
if_setinitfn(ifp, mvneta_init);
if_setioctlfn(ifp, mvneta_ioctl);
/*
* We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
*/
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
/*
* As VLAN hardware tagging is not supported
* but is necessary to perform VLAN hardware checksums,
* it is done in the driver
*/
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0);
/*
* Currently IPv6 HW checksum is broken, so make sure it is disabled.
*/
if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM_IPV6);
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Disabled option(s):
* - Support for Large Receive Offload
*/
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP);
sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */
/*
* Device DMA Buffer allocation.
* Handles resource deallocation in case of failure.
*/
error = mvneta_dma_create(sc);
if (error != 0) {
mvneta_detach(self);
return (error);
}
/* Initialize queues */
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
error = mvneta_ring_init_tx_queue(sc, q);
if (error != 0) {
mvneta_detach(self);
return (error);
}
}
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
error = mvneta_ring_init_rx_queue(sc, q);
if (error != 0) {
mvneta_detach(self);
return (error);
}
}
/*
* Enable DMA engines and Initialize Device Registers.
*/
MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
mvneta_sc_lock(sc);
mvneta_filter_setup(sc);
mvneta_sc_unlock(sc);
mvneta_initreg(ifp);
/*
* Now MAC is working, setup MII.
*/
if (mii_init == 0) {
/*
* MII bus is shared by all MACs and all PHYs in SoC.
* serializing the bus access should be safe.
*/
mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
mii_init = 1;
}
/* Attach PHY(s) */
if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(self, "MII attach failed, error: %d\n",
error);
ether_ifdetach(sc->ifp);
mvneta_detach(self);
return (error);
}
sc->mii = device_get_softc(sc->miibus);
sc->phy_attached = 1;
/* Disable auto-negotiation in MAC - rely on PHY layer */
mvneta_update_autoneg(sc, FALSE);
} else if (sc->use_inband_status == TRUE) {
/* In-band link status */
ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
mvneta_mediastatus);
/* Configure media */
ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
0, NULL);
ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
0, NULL);
ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
0, NULL);
ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
/* Enable auto-negotiation */
mvneta_update_autoneg(sc, TRUE);
mvneta_sc_lock(sc);
if (MVNETA_IS_LINKUP(sc))
mvneta_linkup(sc);
else
mvneta_linkdown(sc);
mvneta_sc_unlock(sc);
} else {
/* Fixed-link, use predefined values */
mvneta_update_autoneg(sc, FALSE);
ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
mvneta_mediastatus);
ifm_target = IFM_ETHER;
switch (sc->phy_speed) {
case 2500:
if (sc->phy_mode != MVNETA_PHY_SGMII &&
sc->phy_mode != MVNETA_PHY_QSGMII) {
device_printf(self,
"2.5G speed can work only in (Q)SGMII mode\n");
ether_ifdetach(sc->ifp);
mvneta_detach(self);
return (ENXIO);
}
ifm_target |= IFM_2500_T;
break;
case 1000:
ifm_target |= IFM_1000_T;
break;
case 100:
ifm_target |= IFM_100_TX;
break;
case 10:
ifm_target |= IFM_10_T;
break;
default:
ether_ifdetach(sc->ifp);
mvneta_detach(self);
return (ENXIO);
}
if (sc->phy_fdx)
ifm_target |= IFM_FDX;
else
ifm_target |= IFM_HDX;
ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
if_link_state_change(sc->ifp, LINK_STATE_UP);
if (mvneta_has_switch(self)) {
if (bootverbose)
device_printf(self, "This device is attached to a switch\n");
child = device_add_child(sc->dev, "mdio", -1);
if (child == NULL) {
ether_ifdetach(sc->ifp);
mvneta_detach(self);
return (ENXIO);
}
bus_generic_attach(sc->dev);
bus_generic_attach(child);
}
/* Configure MAC media */
mvneta_update_media(sc, ifm_target);
}
ether_ifattach(ifp, sc->enaddr);
callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
sysctl_mvneta_init(sc);
return (0);
}
STATIC int
mvneta_detach(device_t dev)
{
struct mvneta_softc *sc;
int q;
sc = device_get_softc(dev);
if (device_is_attached(dev)) {
mvneta_stop(sc);
callout_drain(&sc->tick_ch);
ether_ifdetach(sc->ifp);
}
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
mvneta_ring_dealloc_rx_queue(sc, q);
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
mvneta_ring_dealloc_tx_queue(sc, q);
device_delete_children(dev);
if (sc->ih_cookie[0] != NULL)
bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]);
if (sc->tx_dtag != NULL)
bus_dma_tag_destroy(sc->tx_dtag);
if (sc->rx_dtag != NULL)
bus_dma_tag_destroy(sc->rx_dtag);
if (sc->txmbuf_dtag != NULL)
bus_dma_tag_destroy(sc->txmbuf_dtag);
if (sc->rxbuf_dtag != NULL)
bus_dma_tag_destroy(sc->rxbuf_dtag);
bus_release_resources(dev, res_spec, sc->res);
if (sc->ifp)
if_free(sc->ifp);
if (mtx_initialized(&sc->mtx))
mtx_destroy(&sc->mtx);
return (0);
}
/*
* MII
*/
STATIC int
mvneta_miibus_readreg(device_t dev, int phy, int reg)
{
struct mvneta_softc *sc;
if_t ifp;
uint32_t smi, val;
int i;
sc = device_get_softc(dev);
ifp = sc->ifp;
mtx_lock(&mii_mutex);
for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
break;
DELAY(1);
}
if (i == MVNETA_PHY_TIMEOUT) {
if_printf(ifp, "SMI busy timeout\n");
mtx_unlock(&mii_mutex);
return (-1);
}
smi = MVNETA_SMI_PHYAD(phy) |
MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
MVNETA_WRITE(sc, MVNETA_SMI, smi);
for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
break;
DELAY(1);
}
if (i == MVNETA_PHY_TIMEOUT) {
if_printf(ifp, "SMI busy timeout\n");
mtx_unlock(&mii_mutex);
return (-1);
}
for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
smi = MVNETA_READ(sc, MVNETA_SMI);
if (smi & MVNETA_SMI_READVALID)
break;
DELAY(1);
}
if (i == MVNETA_PHY_TIMEOUT) {
if_printf(ifp, "SMI busy timeout\n");
mtx_unlock(&mii_mutex);
return (-1);
}
mtx_unlock(&mii_mutex);
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", if_getname(ifp), i,
MVNETA_PHY_TIMEOUT);
#endif
val = smi & MVNETA_SMI_DATA_MASK;
#ifdef MVNETA_KTR
CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_getname(ifp), phy,
reg, val);
#endif
return (val);
}
STATIC int
mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct mvneta_softc *sc;
if_t ifp;
uint32_t smi;
int i;
sc = device_get_softc(dev);
ifp = sc->ifp;
#ifdef MVNETA_KTR
CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_name(ifp),
phy, reg, val);
#endif
mtx_lock(&mii_mutex);
for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
break;
DELAY(1);
}
if (i == MVNETA_PHY_TIMEOUT) {
if_printf(ifp, "SMI busy timeout\n");
mtx_unlock(&mii_mutex);
return (0);
}
smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
MVNETA_WRITE(sc, MVNETA_SMI, smi);
for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
break;
DELAY(1);
}
mtx_unlock(&mii_mutex);
if (i == MVNETA_PHY_TIMEOUT)
if_printf(ifp, "phy write timed out\n");
return (0);
}
STATIC void
mvneta_portup(struct mvneta_softc *sc)
{
int q;
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
mvneta_rx_lockq(sc, q);
mvneta_rx_queue_enable(sc->ifp, q);
mvneta_rx_unlockq(sc, q);
}
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
mvneta_tx_lockq(sc, q);
mvneta_tx_queue_enable(sc->ifp, q);
mvneta_tx_unlockq(sc, q);
}
}
STATIC void
mvneta_portdown(struct mvneta_softc *sc)
{
struct mvneta_rx_ring *rx;
struct mvneta_tx_ring *tx;
int q, cnt;
uint32_t reg;
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
rx = MVNETA_RX_RING(sc, q);
mvneta_rx_lockq(sc, q);
rx->queue_status = MVNETA_QUEUE_DISABLED;
mvneta_rx_unlockq(sc, q);
}
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
tx = MVNETA_TX_RING(sc, q);
mvneta_tx_lockq(sc, q);
tx->queue_status = MVNETA_QUEUE_DISABLED;
mvneta_tx_unlockq(sc, q);
}
/* Wait for all Rx activity to terminate. */
reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
reg = MVNETA_RQC_DIS(reg);
MVNETA_WRITE(sc, MVNETA_RQC, reg);
cnt = 0;
do {
if (cnt >= RX_DISABLE_TIMEOUT) {
if_printf(sc->ifp,
"timeout for RX stopped. rqc 0x%x\n", reg);
break;
}
cnt++;
reg = MVNETA_READ(sc, MVNETA_RQC);
} while ((reg & MVNETA_RQC_EN_MASK) != 0);
/* Wait for all Tx activity to terminate. */
reg = MVNETA_READ(sc, MVNETA_PIE);
reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
MVNETA_WRITE(sc, MVNETA_PIE, reg);
reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
reg = MVNETA_TQC_DIS(reg);
MVNETA_WRITE(sc, MVNETA_TQC, reg);
cnt = 0;
do {
if (cnt >= TX_DISABLE_TIMEOUT) {
if_printf(sc->ifp,
"timeout for TX stopped. tqc 0x%x\n", reg);
break;
}
cnt++;
reg = MVNETA_READ(sc, MVNETA_TQC);
} while ((reg & MVNETA_TQC_EN_MASK) != 0);
/* Wait for all Tx FIFO is empty */
cnt = 0;
do {
if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
if_printf(sc->ifp,
"timeout for TX FIFO drained. ps0 0x%x\n", reg);
break;
}
cnt++;
reg = MVNETA_READ(sc, MVNETA_PS0);
} while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
((reg & MVNETA_PS0_TXINPROG) != 0));
}
/*
* Device Register Initialization
* reset device registers to device driver default value.
* the device is not enabled here.
*/
STATIC int
mvneta_initreg(if_t ifp)
{
struct mvneta_softc *sc;
int q;
uint32_t reg;
sc = if_getsoftc(ifp);
#ifdef MVNETA_KTR
CTR1(KTR_SPARE2, "%s initializing device register", if_name(ifp));
#endif
/* Disable Legacy WRR, Disable EJP, Release from reset. */
MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
/* Enable mbus retry. */
MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
/* Init TX/RX Queue Registers */
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
mvneta_rx_lockq(sc, q);
if (mvneta_rx_queue_init(ifp, q) != 0) {
device_printf(sc->dev,
"initialization failed: cannot initialize queue\n");
mvneta_rx_unlockq(sc, q);
return (ENOBUFS);
}
mvneta_rx_unlockq(sc, q);
}
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
mvneta_tx_lockq(sc, q);
if (mvneta_tx_queue_init(ifp, q) != 0) {
device_printf(sc->dev,
"initialization failed: cannot initialize queue\n");
mvneta_tx_unlockq(sc, q);
return (ENOBUFS);
}
mvneta_tx_unlockq(sc, q);
}
/*
* Ethernet Unit Control - disable automatic PHY management by HW.
* In case the port uses SMI-controlled PHY, poll its status with
* mii_tick() and update MAC settings accordingly.
*/
reg = MVNETA_READ(sc, MVNETA_EUC);
reg &= ~MVNETA_EUC_POLLING;
MVNETA_WRITE(sc, MVNETA_EUC, reg);
/* EEE: Low Power Idle */
reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
reg = MVNETA_LPIC2_MUSTSET;
MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
/* Port MAC Control set 0 */
reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
/* Port MAC Control set 2 */
reg = MVNETA_READ(sc, MVNETA_PMACC2);
switch (sc->phy_mode) {
case MVNETA_PHY_QSGMII:
reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
break;
case MVNETA_PHY_SGMII:
reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
break;
case MVNETA_PHY_RGMII:
case MVNETA_PHY_RGMII_ID:
reg |= MVNETA_PMACC2_RGMIIEN;
break;
}
reg |= MVNETA_PMACC2_MUSTSET;
reg &= ~MVNETA_PMACC2_PORTMACRESET;
MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
/* Port Configuration Extended: enable Tx CRC generation */
reg = MVNETA_READ(sc, MVNETA_PXCX);
reg &= ~MVNETA_PXCX_TXCRCDIS;
MVNETA_WRITE(sc, MVNETA_PXCX, reg);
/* clear MIB counter registers(clear by read) */
mvneta_sc_lock(sc);
mvneta_clear_mib(sc);
mvneta_sc_unlock(sc);
/* Set SDC register except IPGINT bits */
reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
reg |= MVNETA_SDC_BLMR;
reg |= MVNETA_SDC_BLMT;
MVNETA_WRITE(sc, MVNETA_SDC, reg);
return (0);
}
STATIC void
mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
{
if (error != 0)
return;
*(bus_addr_t *)arg = segs->ds_addr;
}
STATIC int
mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_rx_ring *rx;
struct mvneta_buf *rxbuf;
bus_dmamap_t dmap;
int i, error;
if (q >= MVNETA_RX_QNUM_MAX)
return (EINVAL);
rx = MVNETA_RX_RING(sc, q);
mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
/* Allocate DMA memory for Rx descriptors */
error = bus_dmamem_alloc(sc->rx_dtag,
(void**)&(rx->desc),
BUS_DMA_NOWAIT | BUS_DMA_ZERO,
&rx->desc_map);
if (error != 0 || rx->desc == NULL)
goto fail;
error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
rx->desc,
sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
if (error != 0)
goto fail;
for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
if (error != 0) {
device_printf(sc->dev,
"Failed to create DMA map for Rx buffer num: %d\n", i);
goto fail;
}
rxbuf = &rx->rxbuf[i];
rxbuf->dmap = dmap;
rxbuf->m = NULL;
}
return (0);
fail:
mvneta_rx_lockq(sc, q);
mvneta_ring_flush_rx_queue(sc, q);
mvneta_rx_unlockq(sc, q);
mvneta_ring_dealloc_rx_queue(sc, q);
device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
return (error);
}
STATIC int
mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_tx_ring *tx;
int error;
if (q >= MVNETA_TX_QNUM_MAX)
return (EINVAL);
tx = MVNETA_TX_RING(sc, q);
mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
error = bus_dmamem_alloc(sc->tx_dtag,
(void**)&(tx->desc),
BUS_DMA_NOWAIT | BUS_DMA_ZERO,
&tx->desc_map);
if (error != 0 || tx->desc == NULL)
goto fail;
error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
tx->desc,
sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
if (error != 0)
goto fail;
#ifdef MVNETA_MULTIQUEUE
tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
&tx->ring_mtx);
if (tx->br == NULL) {
device_printf(sc->dev,
"Could not setup buffer ring for TxQ(%d)\n", q);
error = ENOMEM;
goto fail;
}
#endif
return (0);
fail:
mvneta_tx_lockq(sc, q);
mvneta_ring_flush_tx_queue(sc, q);
mvneta_tx_unlockq(sc, q);
mvneta_ring_dealloc_tx_queue(sc, q);
device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
return (error);
}
STATIC void
mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_tx_ring *tx;
struct mvneta_buf *txbuf;
void *kva;
int error;
int i;
if (q >= MVNETA_TX_QNUM_MAX)
return;
tx = MVNETA_TX_RING(sc, q);
if (tx->taskq != NULL) {
/* Remove task */
while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
taskqueue_drain(tx->taskq, &tx->task);
}
#ifdef MVNETA_MULTIQUEUE
if (tx->br != NULL)
drbr_free(tx->br, M_DEVBUF);
#endif
if (sc->txmbuf_dtag != NULL) {
for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
txbuf = &tx->txbuf[i];
if (txbuf->dmap != NULL) {
error = bus_dmamap_destroy(sc->txmbuf_dtag,
txbuf->dmap);
if (error != 0) {
panic("%s: map busy for Tx descriptor (Q%d, %d)",
__func__, q, i);
}
}
}
}
if (tx->desc_pa != 0)
bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
kva = (void *)tx->desc;
if (kva != NULL)
bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
if (mtx_name(&tx->ring_mtx) != NULL)
mtx_destroy(&tx->ring_mtx);
memset(tx, 0, sizeof(*tx));
}
STATIC void
mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_rx_ring *rx;
struct lro_ctrl *lro;
void *kva;
if (q >= MVNETA_RX_QNUM_MAX)
return;
rx = MVNETA_RX_RING(sc, q);
if (rx->desc_pa != 0)
bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
kva = (void *)rx->desc;
if (kva != NULL)
bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
lro = &rx->lro;
tcp_lro_free(lro);
if (mtx_name(&rx->ring_mtx) != NULL)
mtx_destroy(&rx->ring_mtx);
memset(rx, 0, sizeof(*rx));
}
STATIC int
mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_rx_ring *rx;
struct lro_ctrl *lro;
int error;
if (q >= MVNETA_RX_QNUM_MAX)
return (0);
rx = MVNETA_RX_RING(sc, q);
rx->dma = rx->cpu = 0;
rx->queue_th_received = MVNETA_RXTH_COUNT;
rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */
/* Initialize LRO */
rx->lro_enabled = FALSE;
if ((if_getcapenable(sc->ifp) & IFCAP_LRO) != 0) {
lro = &rx->lro;
error = tcp_lro_init(lro);
if (error != 0)
device_printf(sc->dev, "LRO Initialization failed!\n");
else {
rx->lro_enabled = TRUE;
lro->ifp = sc->ifp;
}
}
return (0);
}
STATIC int
mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_tx_ring *tx;
struct mvneta_buf *txbuf;
int i, error;
if (q >= MVNETA_TX_QNUM_MAX)
return (0);
tx = MVNETA_TX_RING(sc, q);
/* Tx handle */
for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
txbuf = &tx->txbuf[i];
txbuf->m = NULL;
/* Tx handle needs DMA map for busdma_load_mbuf() */
error = bus_dmamap_create(sc->txmbuf_dtag, 0,
&txbuf->dmap);
if (error != 0) {
device_printf(sc->dev,
"can't create dma map (tx ring %d)\n", i);
return (error);
}
}
tx->dma = tx->cpu = 0;
tx->used = 0;
tx->drv_error = 0;
tx->queue_status = MVNETA_QUEUE_DISABLED;
tx->queue_hung = FALSE;
tx->ifp = sc->ifp;
tx->qidx = q;
TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
taskqueue_thread_enqueue, &tx->taskq);
taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
device_get_nameunit(sc->dev), q);
return (0);
}
STATIC void
mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_tx_ring *tx;
struct mvneta_buf *txbuf;
int i;
tx = MVNETA_TX_RING(sc, q);
KASSERT_TX_MTX(sc, q);
/* Tx handle */
for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
txbuf = &tx->txbuf[i];
bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
if (txbuf->m != NULL) {
m_freem(txbuf->m);
txbuf->m = NULL;
}
}
tx->dma = tx->cpu = 0;
tx->used = 0;
}
STATIC void
mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
{
struct mvneta_rx_ring *rx;
struct mvneta_buf *rxbuf;
int i;
rx = MVNETA_RX_RING(sc, q);
KASSERT_RX_MTX(sc, q);
/* Rx handle */
for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
rxbuf = &rx->rxbuf[i];
mvneta_rx_buf_free(sc, rxbuf);
}
rx->dma = rx->cpu = 0;
}
/*
* Rx/Tx Queue Control
*/
STATIC int
mvneta_rx_queue_init(if_t ifp, int q)
{
struct mvneta_softc *sc;
struct mvneta_rx_ring *rx;
uint32_t reg;
sc = if_getsoftc(ifp);
KASSERT_RX_MTX(sc, q);
rx = MVNETA_RX_RING(sc, q);
DASSERT(rx->desc_pa != 0);
/* descriptor address */
MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
/* Rx buffer size and descriptor ring size */
reg = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3);
reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", if_name(ifp), q,
MVNETA_READ(sc, MVNETA_PRXDQS(q)));
#endif
/* Rx packet offset address */
reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", if_name(ifp), q,
MVNETA_READ(sc, MVNETA_PRXC(q)));
#endif
/* if DMA is not working, register is not updated */
DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
return (0);
}
STATIC int
mvneta_tx_queue_init(if_t ifp, int q)
{
struct mvneta_softc *sc;
struct mvneta_tx_ring *tx;
uint32_t reg;
sc = if_getsoftc(ifp);
KASSERT_TX_MTX(sc, q);
tx = MVNETA_TX_RING(sc, q);
DASSERT(tx->desc_pa != 0);
/* descriptor address */
MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
/* descriptor ring size */
reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
/* if DMA is not working, register is not updated */
DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
return (0);
}
STATIC int
mvneta_rx_queue_enable(if_t ifp, int q)
{
struct mvneta_softc *sc;
struct mvneta_rx_ring *rx;
uint32_t reg;
sc = if_getsoftc(ifp);
rx = MVNETA_RX_RING(sc, q);
KASSERT_RX_MTX(sc, q);
/* Set Rx interrupt threshold */
reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
/* Unmask RXTX_TH Intr. */
reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
/* Enable Rx queue */
reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
reg |= MVNETA_RQC_ENQ(q);
MVNETA_WRITE(sc, MVNETA_RQC, reg);
rx->queue_status = MVNETA_QUEUE_WORKING;
return (0);
}
STATIC int
mvneta_tx_queue_enable(if_t ifp, int q)
{
struct mvneta_softc *sc;
struct mvneta_tx_ring *tx;
sc = if_getsoftc(ifp);
tx = MVNETA_TX_RING(sc, q);
KASSERT_TX_MTX(sc, q);
/* Enable Tx queue */
MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
tx->queue_status = MVNETA_QUEUE_IDLE;
tx->queue_hung = FALSE;
return (0);
}
STATIC __inline void
mvneta_rx_lockq(struct mvneta_softc *sc, int q)
{
DASSERT(q >= 0);
DASSERT(q < MVNETA_RX_QNUM_MAX);
mtx_lock(&sc->rx_ring[q].ring_mtx);
}
STATIC __inline void
mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
{
DASSERT(q >= 0);
DASSERT(q < MVNETA_RX_QNUM_MAX);
mtx_unlock(&sc->rx_ring[q].ring_mtx);
}
STATIC __inline int __unused
mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
{
DASSERT(q >= 0);
DASSERT(q < MVNETA_TX_QNUM_MAX);
return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
}
STATIC __inline void
mvneta_tx_lockq(struct mvneta_softc *sc, int q)
{
DASSERT(q >= 0);
DASSERT(q < MVNETA_TX_QNUM_MAX);
mtx_lock(&sc->tx_ring[q].ring_mtx);
}
STATIC __inline void
mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
{
DASSERT(q >= 0);
DASSERT(q < MVNETA_TX_QNUM_MAX);
mtx_unlock(&sc->tx_ring[q].ring_mtx);
}
/*
* Interrupt Handlers
*/
STATIC void
mvneta_disable_intr(struct mvneta_softc *sc)
{
MVNETA_WRITE(sc, MVNETA_EUIM, 0);
MVNETA_WRITE(sc, MVNETA_EUIC, 0);
MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
MVNETA_WRITE(sc, MVNETA_PMIM, 0);
MVNETA_WRITE(sc, MVNETA_PMIC, 0);
MVNETA_WRITE(sc, MVNETA_PIE, 0);
}
STATIC void
mvneta_enable_intr(struct mvneta_softc *sc)
{
uint32_t reg;
/* Enable Summary Bit to check all interrupt cause. */
reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
if (!sc->phy_attached || sc->use_inband_status) {
/* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
}
/* Enable All Queue Interrupt */
reg = MVNETA_READ(sc, MVNETA_PIE);
reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
MVNETA_WRITE(sc, MVNETA_PIE, reg);
}
STATIC void
mvneta_rxtxth_intr(void *arg)
{
struct mvneta_softc *sc;
if_t ifp;
uint32_t ic, queues;
sc = arg;
ifp = sc->ifp;
#ifdef MVNETA_KTR
CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", if_name(ifp));
#endif
ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
if (ic == 0)
return;
MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
/* Ack maintenance interrupt first */
if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
(!sc->phy_attached || sc->use_inband_status))) {
mvneta_sc_lock(sc);
mvneta_misc_intr(sc);
mvneta_sc_unlock(sc);
}
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
return;
/* RxTxTH interrupt */
queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
if (__predict_true(queues)) {
#ifdef MVNETA_KTR
CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", if_name(ifp));
#endif
/* At the moment the driver support only one RX queue. */
DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
mvneta_rx(sc, 0, 0);
}
}
STATIC int
mvneta_misc_intr(struct mvneta_softc *sc)
{
uint32_t ic;
int claimed = 0;
#ifdef MVNETA_KTR
CTR1(KTR_SPARE2, "%s got MISC_INTR", if_name(sc->ifp));
#endif
KASSERT_SC_MTX(sc);
for (;;) {
ic = MVNETA_READ(sc, MVNETA_PMIC);
ic &= MVNETA_READ(sc, MVNETA_PMIM);
if (ic == 0)
break;
MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
claimed = 1;
if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
mvneta_link_isr(sc);
}
return (claimed);
}
STATIC void
mvneta_tick(void *arg)
{
struct mvneta_softc *sc;
struct mvneta_tx_ring *tx;
struct mvneta_rx_ring *rx;
int q;
uint32_t fc_prev, fc_curr;
sc = arg;
/*
* This is done before mib update to get the right stats
* for this tick.
*/
mvneta_tx_drain(sc);
/* Extract previous flow-control frame received counter. */
fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
/* Read mib registers (clear by read). */
mvneta_update_mib(sc);
/* Extract current flow-control frame received counter. */
fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
if (sc->phy_attached && if_getflags(sc->ifp) & IFF_UP) {
mvneta_sc_lock(sc);
mii_tick(sc->mii);
/* Adjust MAC settings */
mvneta_adjust_link(sc);
mvneta_sc_unlock(sc);
}
/*
* We were unable to refill the rx queue and left the rx func, leaving
* the ring without mbuf and no way to call the refill func.
*/
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
rx = MVNETA_RX_RING(sc, q);
if (rx->needs_refill == TRUE) {
mvneta_rx_lockq(sc, q);
mvneta_rx_queue_refill(sc, q);
mvneta_rx_unlockq(sc, q);
}
}
/*
* Watchdog:
* - check if queue is mark as hung.
* - ignore hung status if we received some pause frame
* as hardware may have paused packet transmit.
*/
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
/*
* We should take queue lock, but as we only read
* queue status we can do it without lock, we may
* only missdetect queue status for one tick.
*/
tx = MVNETA_TX_RING(sc, q);
if (tx->queue_hung && (fc_curr - fc_prev) == 0)
goto timeout;
}
callout_schedule(&sc->tick_ch, hz);
return;
timeout:
if_printf(sc->ifp, "watchdog timeout\n");
mvneta_sc_lock(sc);
sc->counter_watchdog++;
sc->counter_watchdog_mib++;
/* Trigger reinitialize sequence. */
mvneta_stop_locked(sc);
mvneta_init_locked(sc);
mvneta_sc_unlock(sc);
}
STATIC void
mvneta_qflush(if_t ifp)
{
#ifdef MVNETA_MULTIQUEUE
struct mvneta_softc *sc;
struct mvneta_tx_ring *tx;
struct mbuf *m;
size_t q;
sc = if_getsoftc(ifp);
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
tx = MVNETA_TX_RING(sc, q);
mvneta_tx_lockq(sc, q);
while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
m_freem(m);
mvneta_tx_unlockq(sc, q);
}
#endif
if_qflush(ifp);
}
STATIC void
mvneta_tx_task(void *arg, int pending)
{
struct mvneta_softc *sc;
struct mvneta_tx_ring *tx;
if_t ifp;
int error;
tx = arg;
ifp = tx->ifp;
sc = if_getsoftc(ifp);
mvneta_tx_lockq(sc, tx->qidx);
error = mvneta_xmit_locked(sc, tx->qidx);
mvneta_tx_unlockq(sc, tx->qidx);
/* Try again */
if (__predict_false(error != 0 && error != ENETDOWN)) {
pause("mvneta_tx_task_sleep", 1);
taskqueue_enqueue(tx->taskq, &tx->task);
}
}
STATIC int
mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
{
struct mvneta_tx_ring *tx;
if_t ifp;
int error;
KASSERT_TX_MTX(sc, q);
tx = MVNETA_TX_RING(sc, q);
error = 0;
ifp = sc->ifp;
/* Dont enqueue packet if the queue is disabled. */
if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
m_freem(*m);
*m = NULL;
return (ENETDOWN);
}
/* Reclaim mbuf if above threshold. */
if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
mvneta_tx_queue_complete(sc, q);
/* Do not call transmit path if queue is already too full. */
if (__predict_false(tx->used >
MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
return (ENOBUFS);
error = mvneta_tx_queue(sc, m, q);
if (__predict_false(error != 0))
return (error);
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, *m);
/* Set watchdog on */
tx->watchdog_time = ticks;
tx->queue_status = MVNETA_QUEUE_WORKING;
return (error);
}
#ifdef MVNETA_MULTIQUEUE
STATIC int
mvneta_transmit(if_t ifp, struct mbuf *m)
{
struct mvneta_softc *sc;
struct mvneta_tx_ring *tx;
int error;
int q;
sc = if_getsoftc(ifp);
/* Use default queue if there is no flow id as thread can migrate. */
if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
else
q = 0;
tx = MVNETA_TX_RING(sc, q);
/* If buf_ring is full start transmit immediately. */
if (buf_ring_full(tx->br)) {
mvneta_tx_lockq(sc, q);
mvneta_xmit_locked(sc, q);
mvneta_tx_unlockq(sc, q);
}
/*
* If the buf_ring is empty we will not reorder packets.
* If the lock is available transmit without using buf_ring.
*/
if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
error = mvneta_xmitfast_locked(sc, q, &m);
mvneta_tx_unlockq(sc, q);
if (__predict_true(error == 0))
return (0);
/* Transmit can fail in fastpath. */
if (__predict_false(m == NULL))
return (error);
}
/* Enqueue then schedule taskqueue. */
error = drbr_enqueue(ifp, tx->br, m);
if (__predict_false(error != 0))
return (error);
taskqueue_enqueue(tx->taskq, &tx->task);
return (0);
}
STATIC int
mvneta_xmit_locked(struct mvneta_softc *sc, int q)
{
if_t ifp;
struct mvneta_tx_ring *tx;
struct mbuf *m;
int error;
KASSERT_TX_MTX(sc, q);
ifp = sc->ifp;
tx = MVNETA_TX_RING(sc, q);
error = 0;
while ((m = drbr_peek(ifp, tx->br)) != NULL) {
error = mvneta_xmitfast_locked(sc, q, &m);
if (__predict_false(error != 0)) {
if (m != NULL)
drbr_putback(ifp, tx->br, m);
else
drbr_advance(ifp, tx->br);
break;
}
drbr_advance(ifp, tx->br);
}
return (error);
}
#else /* !MVNETA_MULTIQUEUE */
STATIC void
mvneta_start(if_t ifp)
{
struct mvneta_softc *sc;
struct mvneta_tx_ring *tx;
int error;
sc = if_getsoftc(ifp);
tx = MVNETA_TX_RING(sc, 0);
mvneta_tx_lockq(sc, 0);
error = mvneta_xmit_locked(sc, 0);
mvneta_tx_unlockq(sc, 0);
/* Handle retransmit in the background taskq. */
if (__predict_false(error != 0 && error != ENETDOWN))
taskqueue_enqueue(tx->taskq, &tx->task);
}
STATIC int
mvneta_xmit_locked(struct mvneta_softc *sc, int q)
{
if_t ifp;
struct mbuf *m;
int error;
KASSERT_TX_MTX(sc, q);
ifp = sc->ifp;
error = 0;
while (!if_sendq_empty(ifp)) {
m = if_dequeue(ifp);
if (m == NULL)
break;
error = mvneta_xmitfast_locked(sc, q, &m);
if (__predict_false(error != 0)) {
if (m != NULL)
if_sendq_prepend(ifp, m);
break;
}
}
return (error);
}
#endif
STATIC int
mvneta_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct mvneta_softc *sc;
struct mvneta_rx_ring *rx;
struct ifreq *ifr;
int error, mask;
uint32_t flags;
bool reinit;
int q;
error = 0;
reinit = false;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
switch (cmd) {
case SIOCSIFFLAGS:
mvneta_sc_lock(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
flags = if_getflags(ifp) ^ sc->mvneta_if_flags;
if (flags != 0)
sc->mvneta_if_flags = if_getflags(ifp);
if ((flags & IFF_PROMISC) != 0)
mvneta_filter_setup(sc);
} else {
mvneta_init_locked(sc);
sc->mvneta_if_flags = if_getflags(ifp);
if (sc->phy_attached)
mii_mediachg(sc->mii);
mvneta_sc_unlock(sc);
break;
}
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
mvneta_stop_locked(sc);
sc->mvneta_if_flags = if_getflags(ifp);
mvneta_sc_unlock(sc);
break;
case SIOCSIFCAP:
if (if_getmtu(ifp) > sc->tx_csum_limit &&
ifr->ifr_reqcap & IFCAP_TXCSUM)
ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if (mask & IFCAP_HWCSUM) {
if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap,
IFCAP_HWCSUM);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
CSUM_UDP);
else
if_sethwassist(ifp, 0);
}
if (mask & IFCAP_LRO) {
mvneta_sc_lock(sc);
if_togglecapenable(ifp, IFCAP_LRO);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
rx = MVNETA_RX_RING(sc, q);
rx->lro_enabled = !rx->lro_enabled;
}
}
mvneta_sc_unlock(sc);
}
VLAN_CAPABILITIES(ifp);
break;
case SIOCSIFMEDIA:
if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
(ifr->ifr_media & IFM_FDX) == 0) {
device_printf(sc->dev,
"%s half-duplex unsupported\n",
IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
"1000Base-T" :
"2500Base-T");
error = EINVAL;
break;
}
case SIOCGIFMEDIA: /* FALLTHROUGH */
case SIOCGIFXMEDIA:
if (!sc->phy_attached)
error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
cmd);
else
error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
cmd);
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
MVNETA_ETHER_SIZE) {
error = EINVAL;
} else {
if_setmtu(ifp, ifr->ifr_mtu);
mvneta_sc_lock(sc);
if (if_getmtu(ifp) + MVNETA_ETHER_SIZE <= MCLBYTES) {
sc->rx_frame_size = MCLBYTES;
} else {
sc->rx_frame_size = MJUM9BYTES;
}
if (if_getmtu(ifp) > sc->tx_csum_limit) {
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
if_sethwassist(ifp, 0);
} else {
if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
CSUM_UDP);
}
/*
* Reinitialize RX queues.
* We need to update RX descriptor size.
*/
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
reinit = true;
mvneta_stop_locked(sc);
}
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
mvneta_rx_lockq(sc, q);
if (mvneta_rx_queue_init(ifp, q) != 0) {
device_printf(sc->dev,
"initialization failed:"
" cannot initialize queue\n");
mvneta_rx_unlockq(sc, q);
error = ENOBUFS;
break;
}
mvneta_rx_unlockq(sc, q);
}
if (reinit)
mvneta_init_locked(sc);
mvneta_sc_unlock(sc);
}
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
STATIC void
mvneta_init_locked(void *arg)
{
struct mvneta_softc *sc;
if_t ifp;
uint32_t reg;
int q, cpu;
sc = arg;
ifp = sc->ifp;
if (!device_is_attached(sc->dev) ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
mvneta_disable_intr(sc);
callout_stop(&sc->tick_ch);
/* Get the latest mac address */
bcopy(if_getlladdr(ifp), sc->enaddr, ETHER_ADDR_LEN);
mvneta_set_mac_address(sc, sc->enaddr);
mvneta_filter_setup(sc);
/* Start DMA Engine */
MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
/* Enable port */
reg = MVNETA_READ(sc, MVNETA_PMACC0);
reg |= MVNETA_PMACC0_PORTEN;
reg &= ~MVNETA_PMACC0_FRAMESIZELIMIT_MASK;
reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
/* Allow access to each TXQ/RXQ from both CPU's */
for (cpu = 0; cpu < mp_ncpus; ++cpu)
MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
mvneta_rx_lockq(sc, q);
mvneta_rx_queue_refill(sc, q);
mvneta_rx_unlockq(sc, q);
}
if (!sc->phy_attached)
mvneta_linkup(sc);
/* Enable interrupt */
mvneta_enable_intr(sc);
/* Set Counter */
callout_schedule(&sc->tick_ch, hz);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
}
STATIC void
mvneta_init(void *arg)
{
struct mvneta_softc *sc;
sc = arg;
mvneta_sc_lock(sc);
mvneta_init_locked(sc);
if (sc->phy_attached)
mii_mediachg(sc->mii);
mvneta_sc_unlock(sc);
}
/* ARGSUSED */
STATIC void
mvneta_stop_locked(struct mvneta_softc *sc)
{
if_t ifp;
uint32_t reg;
int q;
ifp = sc->ifp;
if (ifp == NULL || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
mvneta_disable_intr(sc);
callout_stop(&sc->tick_ch);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
/* Link down */
if (sc->linkup == TRUE)
mvneta_linkdown(sc);
/* Reset the MAC Port Enable bit */
reg = MVNETA_READ(sc, MVNETA_PMACC0);
reg &= ~MVNETA_PMACC0_PORTEN;
MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
/* Disable each of queue */
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
mvneta_rx_lockq(sc, q);
mvneta_ring_flush_rx_queue(sc, q);
mvneta_rx_unlockq(sc, q);
}
/*
* Hold Reset state of DMA Engine
* (must write 0x0 to restart it)
*/
MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
mvneta_tx_lockq(sc, q);
mvneta_ring_flush_tx_queue(sc, q);
mvneta_tx_unlockq(sc, q);
}
}
STATIC void
mvneta_stop(struct mvneta_softc *sc)
{
mvneta_sc_lock(sc);
mvneta_stop_locked(sc);
mvneta_sc_unlock(sc);
}
STATIC int
mvneta_mediachange(if_t ifp)
{
struct mvneta_softc *sc;
sc = if_getsoftc(ifp);
if (!sc->phy_attached && !sc->use_inband_status) {
/* We shouldn't be here */
if_printf(ifp, "Cannot change media in fixed-link mode!\n");
return (0);
}
if (sc->use_inband_status) {
mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
return (0);
}
mvneta_sc_lock(sc);
/* Update PHY */
mii_mediachg(sc->mii);
mvneta_sc_unlock(sc);
return (0);
}
STATIC void
mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
{
uint32_t psr;
psr = MVNETA_READ(sc, MVNETA_PSR);
/* Speed */
if (psr & MVNETA_PSR_GMIISPEED)
ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
else if (psr & MVNETA_PSR_MIISPEED)
ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
else if (psr & MVNETA_PSR_LINKUP)
ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
/* Duplex */
if (psr & MVNETA_PSR_FULLDX)
ifmr->ifm_active |= IFM_FDX;
/* Link */
ifmr->ifm_status = IFM_AVALID;
if (psr & MVNETA_PSR_LINKUP)
ifmr->ifm_status |= IFM_ACTIVE;
}
STATIC void
mvneta_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct mvneta_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
if (!sc->phy_attached && !sc->use_inband_status) {
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
return;
}
mvneta_sc_lock(sc);
if (sc->use_inband_status) {
mvneta_get_media(sc, ifmr);
mvneta_sc_unlock(sc);
return;
}
mii = sc->mii;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
mvneta_sc_unlock(sc);
}
/*
* Link State Notify
*/
STATIC void
mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
{
int reg;
if (enable) {
reg = MVNETA_READ(sc, MVNETA_PANC);
reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
MVNETA_PANC_ANFCEN);
reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
MVNETA_PANC_INBANDANEN;
MVNETA_WRITE(sc, MVNETA_PANC, reg);
reg = MVNETA_READ(sc, MVNETA_PMACC2);
reg |= MVNETA_PMACC2_INBANDANMODE;
MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
reg |= MVNETA_PSOMSCD_ENABLE;
MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
} else {
reg = MVNETA_READ(sc, MVNETA_PANC);
reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
MVNETA_PANC_INBANDANEN);
MVNETA_WRITE(sc, MVNETA_PANC, reg);
reg = MVNETA_READ(sc, MVNETA_PMACC2);
reg &= ~MVNETA_PMACC2_INBANDANMODE;
MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
reg &= ~MVNETA_PSOMSCD_ENABLE;
MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
}
}
STATIC int
mvneta_update_media(struct mvneta_softc *sc, int media)
{
int reg, err;
boolean_t running;
err = 0;
mvneta_sc_lock(sc);
mvneta_linkreset(sc);
running = (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0;
if (running)
mvneta_stop_locked(sc);
sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
if (!sc->phy_attached || sc->use_inband_status)
mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
mvneta_update_eee(sc);
mvneta_update_fc(sc);
if (IFM_SUBTYPE(media) != IFM_AUTO) {
reg = MVNETA_READ(sc, MVNETA_PANC);
reg &= ~(MVNETA_PANC_SETGMIISPEED |
MVNETA_PANC_SETMIISPEED |
MVNETA_PANC_SETFULLDX);
if (IFM_SUBTYPE(media) == IFM_1000_T ||
IFM_SUBTYPE(media) == IFM_2500_T) {
if ((media & IFM_FDX) == 0) {
device_printf(sc->dev,
"%s half-duplex unsupported\n",
IFM_SUBTYPE(media) == IFM_1000_T ?
"1000Base-T" :
"2500Base-T");
err = EINVAL;
goto out;
}
reg |= MVNETA_PANC_SETGMIISPEED;
} else if (IFM_SUBTYPE(media) == IFM_100_TX)
reg |= MVNETA_PANC_SETMIISPEED;
if (media & IFM_FDX)
reg |= MVNETA_PANC_SETFULLDX;
MVNETA_WRITE(sc, MVNETA_PANC, reg);
}
out:
if (running)
mvneta_init_locked(sc);
mvneta_sc_unlock(sc);
return (err);
}
STATIC void
mvneta_adjust_link(struct mvneta_softc *sc)
{
boolean_t phy_linkup;
int reg;
/* Update eee/fc */
mvneta_update_eee(sc);
mvneta_update_fc(sc);
/* Check for link change */
phy_linkup = (sc->mii->mii_media_status &
(IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
if (sc->linkup != phy_linkup)
mvneta_linkupdate(sc, phy_linkup);
/* Don't update media on disabled link */
if (!phy_linkup)
return;
/* Check for media type change */
if (sc->mvneta_media != sc->mii->mii_media_active) {
sc->mvneta_media = sc->mii->mii_media_active;
reg = MVNETA_READ(sc, MVNETA_PANC);
reg &= ~(MVNETA_PANC_SETGMIISPEED |
MVNETA_PANC_SETMIISPEED |
MVNETA_PANC_SETFULLDX);
if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
reg |= MVNETA_PANC_SETGMIISPEED;
} else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
reg |= MVNETA_PANC_SETMIISPEED;
if (sc->mvneta_media & IFM_FDX)
reg |= MVNETA_PANC_SETFULLDX;
MVNETA_WRITE(sc, MVNETA_PANC, reg);
}
}
STATIC void
mvneta_link_isr(struct mvneta_softc *sc)
{
int linkup;
KASSERT_SC_MTX(sc);
linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
if (sc->linkup == linkup)
return;
if (linkup == TRUE)
mvneta_linkup(sc);
else
mvneta_linkdown(sc);
#ifdef DEBUG
device_printf(sc->dev,
"%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
#endif
}
STATIC void
mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
{
KASSERT_SC_MTX(sc);
if (linkup == TRUE)
mvneta_linkup(sc);
else
mvneta_linkdown(sc);
#ifdef DEBUG
device_printf(sc->dev,
"%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
#endif
}
STATIC void
mvneta_update_eee(struct mvneta_softc *sc)
{
uint32_t reg;
KASSERT_SC_MTX(sc);
/* set EEE parameters */
reg = MVNETA_READ(sc, MVNETA_LPIC1);
if (sc->cf_lpi)
reg |= MVNETA_LPIC1_LPIRE;
else
reg &= ~MVNETA_LPIC1_LPIRE;
MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
}
STATIC void
mvneta_update_fc(struct mvneta_softc *sc)
{
uint32_t reg;
KASSERT_SC_MTX(sc);
reg = MVNETA_READ(sc, MVNETA_PANC);
if (sc->cf_fc) {
/* Flow control negotiation */
reg |= MVNETA_PANC_PAUSEADV;
reg |= MVNETA_PANC_ANFCEN;
} else {
/* Disable flow control negotiation */
reg &= ~MVNETA_PANC_PAUSEADV;
reg &= ~MVNETA_PANC_ANFCEN;
}
MVNETA_WRITE(sc, MVNETA_PANC, reg);
}
STATIC void
mvneta_linkup(struct mvneta_softc *sc)
{
uint32_t reg;
KASSERT_SC_MTX(sc);
if (!sc->phy_attached || !sc->use_inband_status) {
reg = MVNETA_READ(sc, MVNETA_PANC);
reg |= MVNETA_PANC_FORCELINKPASS;
reg &= ~MVNETA_PANC_FORCELINKFAIL;
MVNETA_WRITE(sc, MVNETA_PANC, reg);
}
mvneta_qflush(sc->ifp);
mvneta_portup(sc);
sc->linkup = TRUE;
if_link_state_change(sc->ifp, LINK_STATE_UP);
}
STATIC void
mvneta_linkdown(struct mvneta_softc *sc)
{
uint32_t reg;
KASSERT_SC_MTX(sc);
if (!sc->phy_attached || !sc->use_inband_status) {
reg = MVNETA_READ(sc, MVNETA_PANC);
reg &= ~MVNETA_PANC_FORCELINKPASS;
reg |= MVNETA_PANC_FORCELINKFAIL;
MVNETA_WRITE(sc, MVNETA_PANC, reg);
}
mvneta_portdown(sc);
mvneta_qflush(sc->ifp);
sc->linkup = FALSE;
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
STATIC void
mvneta_linkreset(struct mvneta_softc *sc)
{
struct mii_softc *mii;
if (sc->phy_attached) {
/* Force reset PHY */
mii = LIST_FIRST(&sc->mii->mii_phys);
if (mii)
mii_phy_reset(mii);
}
}
/*
* Tx Subroutines
*/
STATIC int
mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
{
if_t ifp;
bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
struct mbuf *mtmp, *mbuf;
struct mvneta_tx_ring *tx;
struct mvneta_buf *txbuf;
struct mvneta_tx_desc *t;
uint32_t ptxsu;
int used, error, i, txnsegs;
mbuf = *mbufp;
tx = MVNETA_TX_RING(sc, q);
DASSERT(tx->used >= 0);
DASSERT(tx->used <= MVNETA_TX_RING_CNT);
t = NULL;
ifp = sc->ifp;
if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
if (mbuf == NULL) {
tx->drv_error++;
*mbufp = NULL;
return (ENOBUFS);
}
mbuf->m_flags &= ~M_VLANTAG;
*mbufp = mbuf;
}
if (__predict_false(mbuf->m_next != NULL &&
(mbuf->m_pkthdr.csum_flags &
(CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
if (M_WRITABLE(mbuf) == 0) {
mtmp = m_dup(mbuf, M_NOWAIT);
m_freem(mbuf);
if (mtmp == NULL) {
tx->drv_error++;
*mbufp = NULL;
return (ENOBUFS);
}
*mbufp = mbuf = mtmp;
}
}
/* load mbuf using dmamap of 1st descriptor */
txbuf = &tx->txbuf[tx->cpu];
error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
txbuf->dmap, mbuf, txsegs, &txnsegs,
BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", if_name(ifp), q, error);
#endif
/* This is the only recoverable error (except EFBIG). */
if (error != ENOMEM) {
tx->drv_error++;
m_freem(mbuf);
*mbufp = NULL;
return (ENOBUFS);
}
return (error);
}
if (__predict_false(txnsegs <= 0
|| (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
/* we have no enough descriptors or mbuf is broken */
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
if_name(ifp), q, txnsegs);
#endif
bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
return (ENOBUFS);
}
DASSERT(txbuf->m == NULL);
/* remember mbuf using 1st descriptor */
txbuf->m = mbuf;
bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
/* load to tx descriptors */
used = 0;
for (i = 0; i < txnsegs; i++) {
t = &tx->desc[tx->cpu];
t->command = 0;
t->l4ichk = 0;
t->flags = 0;
if (__predict_true(i == 0)) {
/* 1st descriptor */
t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
t->command |= MVNETA_TX_CMD_F;
mvneta_tx_set_csumflag(ifp, t, mbuf);
}
t->bufptr_pa = txsegs[i].ds_addr;
t->bytecnt = txsegs[i].ds_len;
tx->cpu = tx_counter_adv(tx->cpu, 1);
tx->used++;
used++;
}
/* t is last descriptor here */
DASSERT(t != NULL);
t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
while (__predict_false(used > 255)) {
ptxsu = MVNETA_PTXSU_NOWD(255);
MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
used -= 255;
}
if (__predict_true(used > 0)) {
ptxsu = MVNETA_PTXSU_NOWD(used);
MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
}
return (0);
}
STATIC void
mvneta_tx_set_csumflag(if_t ifp,
struct mvneta_tx_desc *t, struct mbuf *m)
{
struct ether_header *eh;
struct ether_vlan_header *evh;
int csum_flags;
uint32_t iphl, ipoff;
struct ip *ip;
iphl = ipoff = 0;
csum_flags = if_gethwassist(ifp) & m->m_pkthdr.csum_flags;
eh = mtod(m, struct ether_header *);
switch (ntohs(eh->ether_type)) {
case ETHERTYPE_IP:
ipoff = ETHER_HDR_LEN;
break;
case ETHERTYPE_VLAN:
ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
evh = mtod(m, struct ether_vlan_header *);
if (ntohs(evh->evl_proto) == ETHERTYPE_VLAN)
ipoff += ETHER_VLAN_ENCAP_LEN;
break;
default:
csum_flags = 0;
}
if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
ip = (struct ip *)(m->m_data + ipoff);
iphl = ip->ip_hl<<2;
t->command |= MVNETA_TX_CMD_L3_IP4;
} else {
t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
return;
}
/* L3 */
if (csum_flags & CSUM_IP) {
t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
}
/* L4 */
if (csum_flags & CSUM_IP_TCP) {
t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
t->command |= MVNETA_TX_CMD_L4_TCP;
} else if (csum_flags & CSUM_IP_UDP) {
t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
t->command |= MVNETA_TX_CMD_L4_UDP;
} else
t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
t->l4ichk = 0;
t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
}
STATIC void
mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
{
struct mvneta_tx_ring *tx;
struct mvneta_buf *txbuf;
struct mvneta_tx_desc *t __diagused;
uint32_t ptxs, ptxsu, ndesc;
int i;
KASSERT_TX_MTX(sc, q);
tx = MVNETA_TX_RING(sc, q);
if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
return;
ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
ndesc = MVNETA_PTXS_GET_TBC(ptxs);
if (__predict_false(ndesc == 0)) {
if (tx->used == 0)
tx->queue_status = MVNETA_QUEUE_IDLE;
else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
tx->queue_hung = TRUE;
return;
}
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
if_name(sc->ifp), q, ndesc);
#endif
bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
for (i = 0; i < ndesc; i++) {
t = &tx->desc[tx->dma];
#ifdef MVNETA_KTR
if (t->flags & MVNETA_TX_F_ES)
CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
if_name(sc->ifp), q, tx->dma);
#endif
txbuf = &tx->txbuf[tx->dma];
if (__predict_true(txbuf->m != NULL)) {
DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
m_freem(txbuf->m);
txbuf->m = NULL;
}
else
DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
tx->dma = tx_counter_adv(tx->dma, 1);
tx->used--;
}
DASSERT(tx->used >= 0);
DASSERT(tx->used <= MVNETA_TX_RING_CNT);
while (__predict_false(ndesc > 255)) {
ptxsu = MVNETA_PTXSU_NORB(255);
MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
ndesc -= 255;
}
if (__predict_true(ndesc > 0)) {
ptxsu = MVNETA_PTXSU_NORB(ndesc);
MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
}
#ifdef MVNETA_KTR
CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used);
#endif
tx->watchdog_time = ticks;
if (tx->used == 0)
tx->queue_status = MVNETA_QUEUE_IDLE;
}
/*
* Do a final TX complete when TX is idle.
*/
STATIC void
mvneta_tx_drain(struct mvneta_softc *sc)
{
struct mvneta_tx_ring *tx;
int q;
/*
* Handle trailing mbuf on TX queue.
* Check is done lockess to avoid TX path contention.
*/
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
tx = MVNETA_TX_RING(sc, q);
if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
tx->used > 0) {
mvneta_tx_lockq(sc, q);
mvneta_tx_queue_complete(sc, q);
mvneta_tx_unlockq(sc, q);
}
}
}
/*
* Rx Subroutines
*/
STATIC int
mvneta_rx(struct mvneta_softc *sc, int q, int count)
{
uint32_t prxs, npkt;
int more;
more = 0;
mvneta_rx_lockq(sc, q);
prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
npkt = MVNETA_PRXS_GET_ODC(prxs);
if (__predict_false(npkt == 0))
goto out;
if (count > 0 && npkt > count) {
more = 1;
npkt = count;
}
mvneta_rx_queue(sc, q, npkt);
out:
mvneta_rx_unlockq(sc, q);
return more;
}
/*
* Helper routine for updating PRXSU register of a given queue.
* Handles number of processed descriptors bigger than maximum acceptable value.
*/
STATIC __inline void
mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
{
uint32_t prxsu;
while (__predict_false(processed > 255)) {
prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
processed -= 255;
}
prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
}
static __inline void
mvneta_prefetch(void *p)
{
__builtin_prefetch(p);
}
STATIC void
mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
{
if_t ifp;
struct mvneta_rx_ring *rx;
struct mvneta_rx_desc *r;
struct mvneta_buf *rxbuf;
struct mbuf *m;
struct lro_ctrl *lro;
struct lro_entry *queued;
void *pktbuf;
int i, pktlen, processed, ndma;
KASSERT_RX_MTX(sc, q);
ifp = sc->ifp;
rx = MVNETA_RX_RING(sc, q);
processed = 0;
if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
return;
bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
for (i = 0; i < npkt; i++) {
/* Prefetch next desc, rxbuf. */
ndma = rx_counter_adv(rx->dma, 1);
mvneta_prefetch(&rx->desc[ndma]);
mvneta_prefetch(&rx->rxbuf[ndma]);
/* get descriptor and packet */
r = &rx->desc[rx->dma];
rxbuf = &rx->rxbuf[rx->dma];
m = rxbuf->m;
rxbuf->m = NULL;
DASSERT(m != NULL);
bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
/* Prefetch mbuf header. */
mvneta_prefetch(m);
processed++;
/* Drop desc with error status or not in a single buffer. */
DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
(MVNETA_RX_F|MVNETA_RX_L));
if (__predict_false((r->status & MVNETA_RX_ES) ||
(r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
(MVNETA_RX_F|MVNETA_RX_L)))
goto rx_error;
/*
* [ OFF | MH | PKT | CRC ]
* bytecnt cover MH, PKT, CRC
*/
pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
MVNETA_HWHEADER_SIZE;
/* Prefetch mbuf data. */
mvneta_prefetch(pktbuf);
/* Write value to mbuf (avoid read). */
m->m_data = pktbuf;
m->m_len = m->m_pkthdr.len = pktlen;
m->m_pkthdr.rcvif = ifp;
mvneta_rx_set_csumflag(ifp, r, m);
/* Increase rx_dma before releasing the lock. */
rx->dma = ndma;
if (__predict_false(rx->lro_enabled &&
((r->status & MVNETA_RX_L3_IP) != 0) &&
((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
(m->m_pkthdr.csum_flags &
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
if (rx->lro.lro_cnt != 0) {
if (tcp_lro_rx(&rx->lro, m, 0) == 0)
goto rx_done;
}
}
mvneta_rx_unlockq(sc, q);
if_input(ifp, m);
mvneta_rx_lockq(sc, q);
/*
* Check whether this queue has been disabled in the
* meantime. If yes, then clear LRO and exit.
*/
if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
goto rx_lro;
rx_done:
/* Refresh receive ring to avoid stall and minimize jitter. */
if (processed >= MVNETA_RX_REFILL_COUNT) {
mvneta_prxsu_update(sc, q, processed);
mvneta_rx_queue_refill(sc, q);
processed = 0;
}
continue;
rx_error:
m_freem(m);
rx->dma = ndma;
/* Refresh receive ring to avoid stall and minimize jitter. */
if (processed >= MVNETA_RX_REFILL_COUNT) {
mvneta_prxsu_update(sc, q, processed);
mvneta_rx_queue_refill(sc, q);
processed = 0;
}
}
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s:%u %u packets received", if_name(ifp), q, npkt);
#endif
/* DMA status update */
mvneta_prxsu_update(sc, q, processed);
/* Refill the rest of buffers if there are any to refill */
mvneta_rx_queue_refill(sc, q);
rx_lro:
/*
* Flush any outstanding LRO work
*/
lro = &rx->lro;
while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
tcp_lro_flush(lro, queued);
}
}
STATIC void
mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
{
bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
/* This will remove all data at once */
m_freem(rxbuf->m);
}
STATIC void
mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
{
struct mvneta_rx_ring *rx;
struct mvneta_rx_desc *r;
struct mvneta_buf *rxbuf;
bus_dma_segment_t segs;
struct mbuf *m;
uint32_t prxs, prxsu, ndesc;
int npkt, refill, nsegs, error;
KASSERT_RX_MTX(sc, q);
rx = MVNETA_RX_RING(sc, q);
prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
refill = MVNETA_RX_RING_CNT - ndesc;
#ifdef MVNETA_KTR
CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q,
refill);
#endif
if (__predict_false(refill <= 0))
return;
for (npkt = 0; npkt < refill; npkt++) {
rxbuf = &rx->rxbuf[rx->cpu];
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size);
if (__predict_false(m == NULL)) {
error = ENOBUFS;
break;
}
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
m, &segs, &nsegs, BUS_DMA_NOWAIT);
if (__predict_false(error != 0 || nsegs != 1)) {
KASSERT(1, ("Failed to load Rx mbuf DMA map"));
m_freem(m);
break;
}
/* Add the packet to the ring */
rxbuf->m = m;
r = &rx->desc[rx->cpu];
r->bufptr_pa = segs.ds_addr;
rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
rx->cpu = rx_counter_adv(rx->cpu, 1);
}
if (npkt == 0) {
if (refill == MVNETA_RX_RING_CNT)
rx->needs_refill = TRUE;
return;
}
rx->needs_refill = FALSE;
bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
while (__predict_false(npkt > 255)) {
prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
npkt -= 255;
}
if (__predict_true(npkt > 0)) {
prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
}
}
STATIC __inline void
mvneta_rx_set_csumflag(if_t ifp,
struct mvneta_rx_desc *r, struct mbuf *m)
{
uint32_t csum_flags;
csum_flags = 0;
if (__predict_false((r->status &
(MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
return; /* not a IP packet */
/* L3 */
if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
MVNETA_RX_IP_HEADER_OK))
csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
(MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
/* L4 */
switch (r->status & MVNETA_RX_L4_MASK) {
case MVNETA_RX_L4_TCP:
case MVNETA_RX_L4_UDP:
csum_flags |= CSUM_L4_CALC;
if (__predict_true((r->status &
MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
csum_flags |= CSUM_L4_VALID;
m->m_pkthdr.csum_data = htons(0xffff);
}
break;
case MVNETA_RX_L4_OTH:
default:
break;
}
}
m->m_pkthdr.csum_flags = csum_flags;
}
/*
* MAC address filter
*/
STATIC void
mvneta_filter_setup(struct mvneta_softc *sc)
{
if_t ifp;
uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
uint32_t pxc;
int i;
KASSERT_SC_MTX(sc);
memset(dfut, 0, sizeof(dfut));
memset(dfsmt, 0, sizeof(dfsmt));
memset(dfomt, 0, sizeof(dfomt));
ifp = sc->ifp;
if_setflagbits(ifp, IFF_ALLMULTI, 0);
if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
for (i = 0; i < MVNETA_NDFSMT; i++) {
dfsmt[i] = dfomt[i] =
MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
}
}
pxc = MVNETA_READ(sc, MVNETA_PXC);
pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
if (if_getflags(ifp) & IFF_BROADCAST) {
pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
}
if (if_getflags(ifp) & IFF_PROMISC) {
pxc |= MVNETA_PXC_UPM;
}
MVNETA_WRITE(sc, MVNETA_PXC, pxc);
/* Set Destination Address Filter Unicast Table */
if (if_getflags(ifp) & IFF_PROMISC) {
/* pass all unicast addresses */
for (i = 0; i < MVNETA_NDFUT; i++) {
dfut[i] =
MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
}
} else {
i = sc->enaddr[5] & 0xf; /* last nibble */
dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
}
MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
/* Set Destination Address Filter Multicast Tables */
MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
}
/*
* sysctl(9)
*/
STATIC int
sysctl_read_mib(SYSCTL_HANDLER_ARGS)
{
struct mvneta_sysctl_mib *arg;
struct mvneta_softc *sc;
uint64_t val;
arg = (struct mvneta_sysctl_mib *)arg1;
if (arg == NULL)
return (EINVAL);
sc = arg->sc;
if (sc == NULL)
return (EINVAL);
if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
return (EINVAL);
mvneta_sc_lock(sc);
val = arg->counter;
mvneta_sc_unlock(sc);
return sysctl_handle_64(oidp, &val, 0, req);
}
STATIC int
sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
{
struct mvneta_softc *sc;
int err, val;
val = 0;
sc = (struct mvneta_softc *)arg1;
if (sc == NULL)
return (EINVAL);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0)
return (err);
if (val < 0 || val > 1)
return (EINVAL);
if (val == 1) {
mvneta_sc_lock(sc);
mvneta_clear_mib(sc);
mvneta_sc_unlock(sc);
}
return (0);
}
STATIC int
sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
{
struct mvneta_sysctl_queue *arg;
struct mvneta_rx_ring *rx;
struct mvneta_softc *sc;
uint32_t reg, time_mvtclk;
int err, time_us;
rx = NULL;
arg = (struct mvneta_sysctl_queue *)arg1;
if (arg == NULL)
return (EINVAL);
if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
return (EINVAL);
if (arg->rxtx != MVNETA_SYSCTL_RX)
return (EINVAL);
sc = arg->sc;
if (sc == NULL)
return (EINVAL);
/* read queue length */
mvneta_sc_lock(sc);
mvneta_rx_lockq(sc, arg->queue);
rx = MVNETA_RX_RING(sc, arg->queue);
time_mvtclk = rx->queue_th_time;
time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / sc->clk_freq;
mvneta_rx_unlockq(sc, arg->queue);
mvneta_sc_unlock(sc);
err = sysctl_handle_int(oidp, &time_us, 0, req);
if (err != 0)
return (err);
mvneta_sc_lock(sc);
mvneta_rx_lockq(sc, arg->queue);
/* update queue length (0[sec] - 1[sec]) */
if (time_us < 0 || time_us > (1000 * 1000)) {
mvneta_rx_unlockq(sc, arg->queue);
mvneta_sc_unlock(sc);
return (EINVAL);
}
time_mvtclk = sc->clk_freq * (uint64_t)time_us / (1000ULL * 1000ULL);
rx->queue_th_time = time_mvtclk;
reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
mvneta_rx_unlockq(sc, arg->queue);
mvneta_sc_unlock(sc);
return (0);
}
STATIC void
sysctl_mvneta_init(struct mvneta_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
struct sysctl_oid_list *rxchildren;
struct sysctl_oid_list *qchildren, *mchildren;
struct sysctl_oid *tree;
int i, q;
struct mvneta_sysctl_queue *rxarg;
#define MVNETA_SYSCTL_NAME(num) "queue" # num
static const char *sysctl_queue_names[] = {
MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
};
#undef MVNETA_SYSCTL_NAME
#ifndef NO_SYSCTL_DESCR
#define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
static const char *sysctl_queue_descrs[] = {
MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
};
#undef MVNETA_SYSCTL_DESCR
#endif
ctx = device_get_sysctl_ctx(sc->dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX");
rxchildren = SYSCTL_CHILDREN(tree);
tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA MIB");
mchildren = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
/*
* MIB access
*/
/* dev.mvneta.[unit].mib.<mibs> */
for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
mib_arg->sc = sc;
mib_arg->index = i;
SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
mvneta_mib_list[i].sysctl_name,
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
(void *)mib_arg, 0, sysctl_read_mib, "I",
mvneta_mib_list[i].desc);
}
SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)sc, 0, sysctl_clear_mib, "I", "Reset MIB counters");
for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
rxarg = &sc->sysctl_rx_queue[q];
rxarg->sc = sc;
rxarg->queue = q;
rxarg->rxtx = MVNETA_SYSCTL_RX;
/* hw.mvneta.mvneta[unit].rx.[queue] */
tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
sysctl_queue_descrs[q]);
qchildren = SYSCTL_CHILDREN(tree);
/* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, rxarg, 0,
sysctl_set_queue_rxthtime, "I",
"interrupt coalescing threshold timer [us]");
}
}
/*
* MIB
*/
STATIC uint64_t
mvneta_read_mib(struct mvneta_softc *sc, int index)
{
struct mvneta_mib_def *mib;
uint64_t val;
mib = &mvneta_mib_list[index];
val = MVNETA_READ_MIB(sc, mib->regnum);
if (mib->reg64)
val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32;
return (val);
}
STATIC void
mvneta_clear_mib(struct mvneta_softc *sc)
{
int i;
KASSERT_SC_MTX(sc);
for (i = 0; i < nitems(mvneta_mib_list); i++) {
(void)mvneta_read_mib(sc, i);
sc->sysctl_mib[i].counter = 0;
}
MVNETA_READ(sc, MVNETA_PDFC);
sc->counter_pdfc = 0;
MVNETA_READ(sc, MVNETA_POFC);
sc->counter_pofc = 0;
sc->counter_watchdog = 0;
}
STATIC void
mvneta_update_mib(struct mvneta_softc *sc)
{
struct mvneta_tx_ring *tx;
int i;
uint64_t val;
uint32_t reg;
for (i = 0; i < nitems(mvneta_mib_list); i++) {
val = mvneta_read_mib(sc, i);
if (val == 0)
continue;
sc->sysctl_mib[i].counter += val;
switch (mvneta_mib_list[i].regnum) {
case MVNETA_MIB_RX_GOOD_OCT:
if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
break;
case MVNETA_MIB_RX_BAD_FRAME:
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
break;
case MVNETA_MIB_RX_GOOD_FRAME:
if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
break;
case MVNETA_MIB_RX_MCAST_FRAME:
if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
break;
case MVNETA_MIB_TX_GOOD_OCT:
if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
break;
case MVNETA_MIB_TX_GOOD_FRAME:
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
break;
case MVNETA_MIB_TX_MCAST_FRAME:
if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
break;
case MVNETA_MIB_MAC_COL:
if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
break;
case MVNETA_MIB_TX_MAC_TRNS_ERR:
case MVNETA_MIB_TX_EXCES_COL:
case MVNETA_MIB_MAC_LATE_COL:
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
break;
}
}
reg = MVNETA_READ(sc, MVNETA_PDFC);
sc->counter_pdfc += reg;
if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
reg = MVNETA_READ(sc, MVNETA_POFC);
sc->counter_pofc += reg;
if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
/* TX watchdog. */
if (sc->counter_watchdog_mib > 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
sc->counter_watchdog_mib = 0;
}
/*
* TX driver errors:
* We do not take queue locks to not disrupt TX path.
* We may only miss one drv error which will be fixed at
* next mib update. We may also clear counter when TX path
* is incrementing it but we only do it if counter was not zero
* thus we may only loose one error.
*/
for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
tx = MVNETA_TX_RING(sc, i);
if (tx->drv_error > 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
tx->drv_error = 0;
}
}
}
diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c
index 56d853eb7392..cea62517b2e6 100644
--- a/sys/dev/netmap/if_ptnet.c
+++ b/sys/dev/netmap/if_ptnet.c
@@ -1,1987 +1,1981 @@
/*-
* Copyright (c) 2016, Vincenzo Maffione
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for ptnet paravirtualized network device. */
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
#include <sys/time.h>
#include <machine/smp.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
#include <net/netmap_virt.h>
#include <dev/netmap/netmap_mem2.h>
#include <dev/virtio/network/virtio_net.h>
#ifdef WITH_PTNETMAP
#ifndef INET
#error "INET not defined, cannot support offloadings"
#endif
static uint64_t ptnet_get_counter(if_t, ift_counter);
//#define PTNETMAP_STATS
//#define DEBUG
#ifdef DEBUG
#define DBG(x) x
#else /* !DEBUG */
#define DBG(x)
#endif /* !DEBUG */
extern int ptnet_vnet_hdr; /* Tunable parameter */
struct ptnet_softc;
struct ptnet_queue_stats {
uint64_t packets; /* if_[io]packets */
uint64_t bytes; /* if_[io]bytes */
uint64_t errors; /* if_[io]errors */
uint64_t iqdrops; /* if_iqdrops */
uint64_t mcasts; /* if_[io]mcasts */
#ifdef PTNETMAP_STATS
uint64_t intrs;
uint64_t kicks;
#endif /* PTNETMAP_STATS */
};
struct ptnet_queue {
struct ptnet_softc *sc;
struct resource *irq;
void *cookie;
int kring_id;
struct nm_csb_atok *atok;
struct nm_csb_ktoa *ktoa;
unsigned int kick;
struct mtx lock;
struct buf_ring *bufring; /* for TX queues */
struct ptnet_queue_stats stats;
#ifdef PTNETMAP_STATS
struct ptnet_queue_stats last_stats;
#endif /* PTNETMAP_STATS */
struct taskqueue *taskq;
struct task task;
char lock_name[16];
};
#define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
#define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
#define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
struct ptnet_softc {
device_t dev;
if_t ifp;
struct ifmedia media;
struct mtx lock;
char lock_name[16];
char hwaddr[ETHER_ADDR_LEN];
/* Mirror of PTFEAT register. */
uint32_t ptfeatures;
unsigned int vnet_hdr_len;
/* PCI BARs support. */
struct resource *iomem;
struct resource *msix_mem;
unsigned int num_rings;
unsigned int num_tx_rings;
struct ptnet_queue *queues;
struct ptnet_queue *rxqueues;
struct nm_csb_atok *csb_gh;
struct nm_csb_ktoa *csb_hg;
unsigned int min_tx_space;
struct netmap_pt_guest_adapter *ptna;
struct callout tick;
#ifdef PTNETMAP_STATS
struct timeval last_ts;
#endif /* PTNETMAP_STATS */
};
#define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
#define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
static int ptnet_probe(device_t);
static int ptnet_attach(device_t);
static int ptnet_detach(device_t);
static int ptnet_suspend(device_t);
static int ptnet_resume(device_t);
static int ptnet_shutdown(device_t);
static void ptnet_init(void *opaque);
static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int ptnet_init_locked(struct ptnet_softc *sc);
static int ptnet_stop(struct ptnet_softc *sc);
static int ptnet_transmit(if_t ifp, struct mbuf *m);
static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
unsigned int budget,
bool may_resched);
static void ptnet_qflush(if_t ifp);
static void ptnet_tx_task(void *context, int pending);
static int ptnet_media_change(if_t ifp);
static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
#ifdef PTNETMAP_STATS
static void ptnet_tick(void *opaque);
#endif
static int ptnet_irqs_init(struct ptnet_softc *sc);
static void ptnet_irqs_fini(struct ptnet_softc *sc);
static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd);
static int ptnet_nm_config(struct netmap_adapter *na,
struct nm_config_info *info);
static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
static void ptnet_nm_intr(struct netmap_adapter *na, int onoff);
static void ptnet_tx_intr(void *opaque);
static void ptnet_rx_intr(void *opaque);
static unsigned ptnet_rx_discard(struct netmap_kring *kring,
unsigned int head);
static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
bool may_resched);
static void ptnet_rx_task(void *context, int pending);
#ifdef DEVICE_POLLING
static poll_handler_t ptnet_poll;
#endif
static device_method_t ptnet_methods[] = {
DEVMETHOD(device_probe, ptnet_probe),
DEVMETHOD(device_attach, ptnet_attach),
DEVMETHOD(device_detach, ptnet_detach),
DEVMETHOD(device_suspend, ptnet_suspend),
DEVMETHOD(device_resume, ptnet_resume),
DEVMETHOD(device_shutdown, ptnet_shutdown),
DEVMETHOD_END
};
static driver_t ptnet_driver = {
"ptnet",
ptnet_methods,
sizeof(struct ptnet_softc)
};
/* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, NULL, NULL,
SI_ORDER_MIDDLE + 2);
static int
ptnet_probe(device_t dev)
{
if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
return (ENXIO);
}
device_set_desc(dev, "ptnet network adapter");
return (BUS_PROBE_DEFAULT);
}
static inline void ptnet_kick(struct ptnet_queue *pq)
{
#ifdef PTNETMAP_STATS
pq->stats.kicks ++;
#endif /* PTNETMAP_STATS */
bus_write_4(pq->sc->iomem, pq->kick, 0);
}
#define PTNET_BUF_RING_SIZE 4096
#define PTNET_RX_BUDGET 512
#define PTNET_RX_BATCH 1
#define PTNET_TX_BUDGET 512
#define PTNET_TX_BATCH 64
#define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
#define PTNET_MAX_PKT_SIZE 65536
#define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
#define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
#define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
PTNET_CSUM_OFFLOAD_IPV6)
static int
ptnet_attach(device_t dev)
{
uint32_t ptfeatures = 0;
unsigned int num_rx_rings, num_tx_rings;
struct netmap_adapter na_arg;
unsigned int nifp_offset;
struct ptnet_softc *sc;
if_t ifp;
uint32_t macreg;
int err, rid;
int i;
sc = device_get_softc(dev);
sc->dev = dev;
/* Setup PCI resources. */
pci_enable_busmaster(dev);
rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
RF_ACTIVE);
if (sc->iomem == NULL) {
device_printf(dev, "Failed to map I/O BAR\n");
return (ENXIO);
}
/* Negotiate features with the hypervisor. */
if (ptnet_vnet_hdr) {
ptfeatures |= PTNETMAP_F_VNET_HDR;
}
bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
sc->ptfeatures = ptfeatures;
num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
sc->num_rings = num_tx_rings + num_rx_rings;
sc->num_tx_rings = num_tx_rings;
if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) {
device_printf(dev, "CSB cannot handle that many rings (%u)\n",
sc->num_rings);
err = ENOMEM;
goto err_path;
}
/* Allocate CSB and carry out CSB allocation protocol. */
sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO,
(size_t)0, -1UL, PAGE_SIZE, 0);
if (sc->csb_gh == NULL) {
device_printf(dev, "Failed to allocate CSB\n");
err = ENOMEM;
goto err_path;
}
sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE);
{
/*
* We use uint64_t rather than vm_paddr_t since we
* need 64 bit addresses even on 32 bit platforms.
*/
uint64_t paddr = vtophys(sc->csb_gh);
/* CSB allocation protocol: write to BAH first, then
* to BAL (for both GH and HG sections). */
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH,
(paddr >> 32) & 0xffffffff);
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL,
paddr & 0xffffffff);
paddr = vtophys(sc->csb_hg);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH,
(paddr >> 32) & 0xffffffff);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL,
paddr & 0xffffffff);
}
/* Allocate and initialize per-queue data structures. */
sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->queues == NULL) {
err = ENOMEM;
goto err_path;
}
sc->rxqueues = sc->queues + num_tx_rings;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
pq->sc = sc;
pq->kring_id = i;
pq->kick = PTNET_IO_KICK_BASE + 4 * i;
pq->atok = sc->csb_gh + i;
pq->ktoa = sc->csb_hg + i;
snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
device_get_nameunit(dev), i);
mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
if (i >= num_tx_rings) {
/* RX queue: fix kring_id. */
pq->kring_id -= num_tx_rings;
} else {
/* TX queue: allocate buf_ring. */
pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
M_DEVBUF, M_NOWAIT, &pq->lock);
if (pq->bufring == NULL) {
err = ENOMEM;
goto err_path;
}
}
}
sc->min_tx_space = 64; /* Safe initial value. */
err = ptnet_irqs_init(sc);
if (err) {
goto err_path;
}
/* Setup Ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Failed to allocate ifnet\n");
- err = ENOMEM;
- goto err_path;
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setbaudrate(ifp, IF_Gbps(10));
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX);
if_setinitfn(ifp, ptnet_init);
if_setioctlfn(ifp, ptnet_ioctl);
if_setget_counter(ifp, ptnet_get_counter);
if_settransmitfn(ifp, ptnet_transmit);
if_setqflushfn(ifp, ptnet_qflush);
ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
ptnet_media_status);
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
sc->hwaddr[0] = (macreg >> 8) & 0xff;
sc->hwaddr[1] = macreg & 0xff;
macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
sc->hwaddr[2] = (macreg >> 24) & 0xff;
sc->hwaddr[3] = (macreg >> 16) & 0xff;
sc->hwaddr[4] = (macreg >> 8) & 0xff;
sc->hwaddr[5] = macreg & 0xff;
ether_ifattach(ifp, sc->hwaddr);
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU, 0);
if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
/* Similarly to what the vtnet driver does, we can emulate
* VLAN offloadings by inserting and removing the 802.1Q
* header during transmit and receive. We are then able
* to do checksum offloading of VLAN frames. */
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
| IFCAP_VLAN_HWCSUM
| IFCAP_TSO | IFCAP_LRO
| IFCAP_VLAN_HWTSO
| IFCAP_VLAN_HWTAGGING, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
/* Don't enable polling by default. */
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
snprintf(sc->lock_name, sizeof(sc->lock_name),
"%s", device_get_nameunit(dev));
mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
callout_init_mtx(&sc->tick, &sc->lock, 0);
/* Prepare a netmap_adapter struct instance to do netmap_attach(). */
nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
memset(&na_arg, 0, sizeof(na_arg));
na_arg.ifp = ifp;
na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
na_arg.num_tx_rings = num_tx_rings;
na_arg.num_rx_rings = num_rx_rings;
na_arg.nm_config = ptnet_nm_config;
na_arg.nm_krings_create = ptnet_nm_krings_create;
na_arg.nm_krings_delete = ptnet_nm_krings_delete;
na_arg.nm_dtor = ptnet_nm_dtor;
na_arg.nm_intr = ptnet_nm_intr;
na_arg.nm_register = ptnet_nm_register;
na_arg.nm_txsync = ptnet_nm_txsync;
na_arg.nm_rxsync = ptnet_nm_rxsync;
netmap_pt_guest_attach(&na_arg, nifp_offset,
bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
/* Now a netmap adapter for this ifp has been allocated, and it
* can be accessed through NA(ifp). We also have to initialize the CSB
* pointer. */
sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
/* If virtio-net header was negotiated, set the virt_hdr_len field in
* the netmap adapter, to inform users that this netmap adapter requires
* the application to deal with the headers. */
ptnet_update_vnet_hdr(sc);
device_printf(dev, "%s() completed\n", __func__);
return (0);
err_path:
ptnet_detach(dev);
return err;
}
/* Stop host sync-kloop if it was running. */
static void
ptnet_device_shutdown(struct ptnet_softc *sc)
{
ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0);
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0);
}
static int
ptnet_detach(device_t dev)
{
struct ptnet_softc *sc = device_get_softc(dev);
int i;
ptnet_device_shutdown(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
ether_poll_deregister(sc->ifp);
}
#endif
callout_drain(&sc->tick);
if (sc->queues) {
/* Drain taskqueues before calling if_detach. */
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (pq->taskq) {
taskqueue_drain(pq->taskq, &pq->task);
}
}
}
if (sc->ifp) {
ether_ifdetach(sc->ifp);
/* Uninitialize netmap adapters for this device. */
netmap_detach(sc->ifp);
ifmedia_removeall(&sc->media);
if_free(sc->ifp);
sc->ifp = NULL;
}
ptnet_irqs_fini(sc);
if (sc->csb_gh) {
contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
sc->csb_gh = NULL;
sc->csb_hg = NULL;
}
if (sc->queues) {
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (mtx_initialized(&pq->lock)) {
mtx_destroy(&pq->lock);
}
if (pq->bufring != NULL) {
buf_ring_free(pq->bufring, M_DEVBUF);
}
}
free(sc->queues, M_DEVBUF);
sc->queues = NULL;
}
if (sc->iomem) {
bus_release_resource(dev, SYS_RES_IOPORT,
PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
sc->iomem = NULL;
}
mtx_destroy(&sc->lock);
device_printf(dev, "%s() completed\n", __func__);
return (0);
}
static int
ptnet_suspend(device_t dev)
{
struct ptnet_softc *sc = device_get_softc(dev);
(void)sc;
return (0);
}
static int
ptnet_resume(device_t dev)
{
struct ptnet_softc *sc = device_get_softc(dev);
(void)sc;
return (0);
}
static int
ptnet_shutdown(device_t dev)
{
struct ptnet_softc *sc = device_get_softc(dev);
ptnet_device_shutdown(sc);
return (0);
}
static int
ptnet_irqs_init(struct ptnet_softc *sc)
{
int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
int nvecs = sc->num_rings;
device_t dev = sc->dev;
int err = ENOSPC;
int cpu_cur;
int i;
if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
device_printf(dev, "Could not find MSI-X capability\n");
return (ENXIO);
}
sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->msix_mem == NULL) {
device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
return (ENXIO);
}
if (pci_msix_count(dev) < nvecs) {
device_printf(dev, "Not enough MSI-X vectors\n");
goto err_path;
}
err = pci_alloc_msix(dev, &nvecs);
if (err) {
device_printf(dev, "Failed to allocate MSI-X vectors\n");
goto err_path;
}
for (i = 0; i < nvecs; i++) {
struct ptnet_queue *pq = sc->queues + i;
rid = i + 1;
pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (pq->irq == NULL) {
device_printf(dev, "Failed to allocate interrupt "
"for queue #%d\n", i);
err = ENOSPC;
goto err_path;
}
}
cpu_cur = CPU_FIRST();
for (i = 0; i < nvecs; i++) {
struct ptnet_queue *pq = sc->queues + i;
void (*handler)(void *) = ptnet_tx_intr;
if (i >= sc->num_tx_rings) {
handler = ptnet_rx_intr;
}
err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL /* intr_filter */, handler,
pq, &pq->cookie);
if (err) {
device_printf(dev, "Failed to register intr handler "
"for queue #%d\n", i);
goto err_path;
}
bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
#if 0
bus_bind_intr(sc->dev, pq->irq, cpu_cur);
#endif
cpu_cur = CPU_NEXT(cpu_cur);
}
device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
cpu_cur = CPU_FIRST();
for (i = 0; i < nvecs; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (i < sc->num_tx_rings)
TASK_INIT(&pq->task, 0, ptnet_tx_task, pq);
else
NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq);
pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
taskqueue_thread_enqueue, &pq->taskq);
taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
device_get_nameunit(sc->dev), cpu_cur);
cpu_cur = CPU_NEXT(cpu_cur);
}
return 0;
err_path:
ptnet_irqs_fini(sc);
return err;
}
static void
ptnet_irqs_fini(struct ptnet_softc *sc)
{
device_t dev = sc->dev;
int i;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (pq->taskq) {
taskqueue_free(pq->taskq);
pq->taskq = NULL;
}
if (pq->cookie) {
bus_teardown_intr(dev, pq->irq, pq->cookie);
pq->cookie = NULL;
}
if (pq->irq) {
bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
pq->irq = NULL;
}
}
if (sc->msix_mem) {
pci_release_msi(dev);
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
sc->msix_mem);
sc->msix_mem = NULL;
}
}
static void
ptnet_init(void *opaque)
{
struct ptnet_softc *sc = opaque;
PTNET_CORE_LOCK(sc);
ptnet_init_locked(sc);
PTNET_CORE_UNLOCK(sc);
}
static int
ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
device_t dev = sc->dev;
struct ifreq *ifr = (struct ifreq *)data;
int mask __unused, err = 0;
switch (cmd) {
case SIOCSIFFLAGS:
device_printf(dev, "SIOCSIFFLAGS %x\n", if_getflags(ifp));
PTNET_CORE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
/* Network stack wants the iff to be up. */
err = ptnet_init_locked(sc);
} else {
/* Network stack wants the iff to be down. */
err = ptnet_stop(sc);
}
/* We don't need to do nothing to support IFF_PROMISC,
* since that is managed by the backend port. */
PTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFCAP:
device_printf(dev, "SIOCSIFCAP %x %x\n",
ifr->ifr_reqcap, if_getcapenable(ifp));
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
struct ptnet_queue *pq;
int i;
if (ifr->ifr_reqcap & IFCAP_POLLING) {
err = ether_poll_register(ptnet_poll, ifp);
if (err) {
break;
}
/* Stop queues and sync with taskqueues. */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
for (i = 0; i < sc->num_rings; i++) {
pq = sc-> queues + i;
/* Make sure the worker sees the
* IFF_DRV_RUNNING down. */
PTNET_Q_LOCK(pq);
pq->atok->appl_need_kick = 0;
PTNET_Q_UNLOCK(pq);
/* Wait for rescheduling to finish. */
if (pq->taskq) {
taskqueue_drain(pq->taskq,
&pq->task);
}
}
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
} else {
err = ether_poll_deregister(ifp);
for (i = 0; i < sc->num_rings; i++) {
pq = sc-> queues + i;
PTNET_Q_LOCK(pq);
pq->atok->appl_need_kick = 1;
PTNET_Q_UNLOCK(pq);
}
}
}
#endif /* DEVICE_POLLING */
if_setcapenable(ifp, ifr->ifr_reqcap);
break;
case SIOCSIFMTU:
/* We support any reasonable MTU. */
if (ifr->ifr_mtu < ETHERMIN ||
ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
err = EINVAL;
} else {
PTNET_CORE_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
PTNET_CORE_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
break;
default:
err = ether_ioctl(ifp, cmd, data);
break;
}
return err;
}
static int
ptnet_init_locked(struct ptnet_softc *sc)
{
if_t ifp = sc->ifp;
struct netmap_adapter *na_dr = &sc->ptna->dr.up;
struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
unsigned int nm_buf_size;
int ret;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
return 0; /* nothing to do */
}
device_printf(sc->dev, "%s\n", __func__);
/* Translate offload capabilities according to if_capenable. */
if_sethwassist(ifp, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD_IPV6, 0);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TSO6)
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
/*
* Prepare the interface for netmap mode access.
*/
netmap_update_config(na_dr);
ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
if (ret) {
device_printf(sc->dev, "netmap_mem_finalize() failed\n");
return ret;
}
if (sc->ptna->backend_users == 0) {
ret = ptnet_nm_krings_create(na_nm);
if (ret) {
device_printf(sc->dev, "ptnet_nm_krings_create() "
"failed\n");
goto err_mem_finalize;
}
ret = netmap_mem_rings_create(na_dr);
if (ret) {
device_printf(sc->dev, "netmap_mem_rings_create() "
"failed\n");
goto err_rings_create;
}
ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
if (ret) {
device_printf(sc->dev, "netmap_mem_get_lut() "
"failed\n");
goto err_get_lut;
}
}
ret = ptnet_nm_register(na_dr, 1 /* on */);
if (ret) {
goto err_register;
}
nm_buf_size = NETMAP_BUF_SIZE(na_dr);
KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
sc->min_tx_space);
#ifdef PTNETMAP_STATS
callout_reset(&sc->tick, hz, ptnet_tick, sc);
#endif
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
return 0;
err_register:
memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
err_get_lut:
netmap_mem_rings_delete(na_dr);
err_rings_create:
ptnet_nm_krings_delete(na_nm);
err_mem_finalize:
netmap_mem_deref(na_dr->nm_mem, na_dr);
return ret;
}
/* To be called under core lock. */
static int
ptnet_stop(struct ptnet_softc *sc)
{
if_t ifp = sc->ifp;
struct netmap_adapter *na_dr = &sc->ptna->dr.up;
struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
int i;
device_printf(sc->dev, "%s\n", __func__);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
return 0; /* nothing to do */
}
/* Clear the driver-ready flag, and synchronize with all the queues,
* so that after this loop we are sure nobody is working anymore with
* the device. This scheme is taken from the vtnet driver. */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
callout_stop(&sc->tick);
for (i = 0; i < sc->num_rings; i++) {
PTNET_Q_LOCK(sc->queues + i);
PTNET_Q_UNLOCK(sc->queues + i);
}
ptnet_nm_register(na_dr, 0 /* off */);
if (sc->ptna->backend_users == 0) {
netmap_mem_rings_delete(na_dr);
ptnet_nm_krings_delete(na_nm);
}
netmap_mem_deref(na_dr->nm_mem, na_dr);
return 0;
}
static void
ptnet_qflush(if_t ifp)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
int i;
/* Flush all the bufrings and do the interface flush. */
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
struct mbuf *m;
PTNET_Q_LOCK(pq);
if (pq->bufring) {
while ((m = buf_ring_dequeue_sc(pq->bufring))) {
m_freem(m);
}
}
PTNET_Q_UNLOCK(pq);
}
if_qflush(ifp);
}
static int
ptnet_media_change(if_t ifp)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
struct ifmedia *ifm = &sc->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
return EINVAL;
}
return 0;
}
static uint64_t
ptnet_get_counter(if_t ifp, ift_counter cnt)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
struct ptnet_queue_stats stats[2];
int i;
/* Accumulate statistics over the queues. */
memset(stats, 0, sizeof(stats));
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
int idx = (i < sc->num_tx_rings) ? 0 : 1;
stats[idx].packets += pq->stats.packets;
stats[idx].bytes += pq->stats.bytes;
stats[idx].errors += pq->stats.errors;
stats[idx].iqdrops += pq->stats.iqdrops;
stats[idx].mcasts += pq->stats.mcasts;
}
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (stats[1].packets);
case IFCOUNTER_IQDROPS:
return (stats[1].iqdrops);
case IFCOUNTER_IERRORS:
return (stats[1].errors);
case IFCOUNTER_OPACKETS:
return (stats[0].packets);
case IFCOUNTER_OBYTES:
return (stats[0].bytes);
case IFCOUNTER_OMCASTS:
return (stats[0].mcasts);
default:
return (if_get_counter_default(ifp, cnt));
}
}
#ifdef PTNETMAP_STATS
/* Called under core lock. */
static void
ptnet_tick(void *opaque)
{
struct ptnet_softc *sc = opaque;
int i;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
struct ptnet_queue_stats cur = pq->stats;
struct timeval now;
unsigned int delta;
microtime(&now);
delta = now.tv_usec - sc->last_ts.tv_usec +
(now.tv_sec - sc->last_ts.tv_sec) * 1000000;
delta /= 1000; /* in milliseconds */
if (delta == 0)
continue;
device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
"intr %lu\n", i, delta,
(cur.packets - pq->last_stats.packets),
(cur.kicks - pq->last_stats.kicks),
(cur.intrs - pq->last_stats.intrs));
pq->last_stats = cur;
}
microtime(&sc->last_ts);
callout_schedule(&sc->tick, hz);
}
#endif /* PTNETMAP_STATS */
static void
ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
{
/* We are always active, as the backend netmap port is
* always open in netmap mode. */
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
}
static uint32_t
ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd)
{
/*
* Write a command and read back error status,
* with zero meaning success.
*/
bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
}
static int
ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info)
{
struct ptnet_softc *sc = if_getsoftc(na->ifp);
info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n",
info->num_tx_rings, info->num_rx_rings,
info->num_tx_descs, info->num_rx_descs,
info->rx_buf_maxsize);
return 0;
}
static void
ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
{
int i;
/* Sync krings from the host, reading from
* CSB. */
for (i = 0; i < sc->num_rings; i++) {
struct nm_csb_atok *atok = sc->queues[i].atok;
struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa;
struct netmap_kring *kring;
if (i < na->num_tx_rings) {
kring = na->tx_rings[i];
} else {
kring = na->rx_rings[i - na->num_tx_rings];
}
kring->rhead = kring->ring->head = atok->head;
kring->rcur = kring->ring->cur = atok->cur;
kring->nr_hwcur = ktoa->hwcur;
kring->nr_hwtail = kring->rtail =
kring->ring->tail = ktoa->hwtail;
nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
ktoa->hwcur, atok->head, atok->cur,
ktoa->hwtail);
nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
kring->ring->head, kring->ring->cur, kring->nr_hwtail,
kring->rtail, kring->ring->tail);
}
}
static void
ptnet_update_vnet_hdr(struct ptnet_softc *sc)
{
unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
}
static int
ptnet_nm_register(struct netmap_adapter *na, int onoff)
{
/* device-specific */
if_t ifp = na->ifp;
struct ptnet_softc *sc = if_getsoftc(ifp);
int native = (na == &sc->ptna->hwup.up);
struct ptnet_queue *pq;
int ret = 0;
int i;
if (!onoff) {
sc->ptna->backend_users--;
}
/* If this is the last netmap client, guest interrupt enable flags may
* be in arbitrary state. Since these flags are going to be used also
* by the netdevice driver, we have to make sure to start with
* notifications enabled. Also, schedule NAPI to flush pending packets
* in the RX rings, since we will not receive further interrupts
* until these will be processed. */
if (native && !onoff && na->active_fds == 0) {
nm_prinf("Exit netmap mode, re-enable interrupts");
for (i = 0; i < sc->num_rings; i++) {
pq = sc->queues + i;
pq->atok->appl_need_kick = 1;
}
}
if (onoff) {
if (sc->ptna->backend_users == 0) {
/* Initialize notification enable fields in the CSB. */
for (i = 0; i < sc->num_rings; i++) {
pq = sc->queues + i;
pq->ktoa->kern_need_kick = 1;
pq->atok->appl_need_kick =
(!(if_getcapenable(ifp) & IFCAP_POLLING)
&& i >= sc->num_tx_rings);
}
/* Set the virtio-net header length. */
ptnet_update_vnet_hdr(sc);
/* Make sure the host adapter passed through is ready
* for txsync/rxsync. */
ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE);
if (ret) {
return ret;
}
/* Align the guest krings and rings to the state stored
* in the CSB. */
ptnet_sync_from_csb(sc, na);
}
/* If not native, don't call nm_set_native_flags, since we don't want
* to replace if_transmit method, nor set NAF_NETMAP_ON */
if (native) {
netmap_krings_mode_commit(na, onoff);
nm_set_native_flags(na);
}
} else {
if (native) {
nm_clear_native_flags(na);
netmap_krings_mode_commit(na, onoff);
}
if (sc->ptna->backend_users == 0) {
ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
}
}
if (onoff) {
sc->ptna->backend_users++;
}
return ret;
}
static int
ptnet_nm_txsync(struct netmap_kring *kring, int flags)
{
struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
struct ptnet_queue *pq = sc->queues + kring->ring_id;
bool notify;
notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
if (notify) {
ptnet_kick(pq);
}
return 0;
}
static int
ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
{
struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
bool notify;
notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
if (notify) {
ptnet_kick(pq);
}
return 0;
}
static void
ptnet_nm_intr(struct netmap_adapter *na, int onoff)
{
struct ptnet_softc *sc = if_getsoftc(na->ifp);
int i;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
pq->atok->appl_need_kick = onoff;
}
}
static void
ptnet_tx_intr(void *opaque)
{
struct ptnet_queue *pq = opaque;
struct ptnet_softc *sc = pq->sc;
DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
#ifdef PTNETMAP_STATS
pq->stats.intrs ++;
#endif /* PTNETMAP_STATS */
if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
return;
}
/* Schedule the tasqueue to flush process transmissions requests.
* However, vtnet, if_em and if_igb just call ptnet_transmit() here,
* at least when using MSI-X interrupts. The if_em driver, instead
* schedule taskqueue when using legacy interrupts. */
taskqueue_enqueue(pq->taskq, &pq->task);
}
static void
ptnet_rx_intr(void *opaque)
{
struct ptnet_queue *pq = opaque;
struct ptnet_softc *sc = pq->sc;
unsigned int unused;
DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
#ifdef PTNETMAP_STATS
pq->stats.intrs ++;
#endif /* PTNETMAP_STATS */
if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
return;
}
/* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
* receive-side processing is executed directly in the interrupt
* service routine. Alternatively, we may schedule the taskqueue. */
ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
}
static void
ptnet_vlan_tag_remove(struct mbuf *m)
{
struct ether_vlan_header *evh;
evh = mtod(m, struct ether_vlan_header *);
m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
m->m_flags |= M_VLANTAG;
/* Strip the 802.1Q header. */
bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
}
static void
ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
unsigned int head, unsigned int sync_flags)
{
struct netmap_ring *ring = kring->ring;
struct nm_csb_atok *atok = pq->atok;
struct nm_csb_ktoa *ktoa = pq->ktoa;
/* Some packets have been pushed to the netmap ring. We have
* to tell the host to process the new packets, updating cur
* and head in the CSB. */
ring->head = ring->cur = head;
/* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
kring->rcur = kring->rhead = head;
nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
/* Kick the host if needed. */
if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
atok->sync_flags = sync_flags;
ptnet_kick(pq);
}
}
#define PTNET_TX_NOSPACE(_h, _k, _min) \
((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
(_k)->rtail - (_h)) < (_min)
/* This function may be called by the network stack, or by
* by the taskqueue thread. */
static int
ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
bool may_resched)
{
struct ptnet_softc *sc = pq->sc;
bool have_vnet_hdr = sc->vnet_hdr_len;
struct netmap_adapter *na = &sc->ptna->dr.up;
if_t ifp = sc->ifp;
unsigned int batch_count = 0;
struct nm_csb_atok *atok;
struct nm_csb_ktoa *ktoa;
struct netmap_kring *kring;
struct netmap_ring *ring;
struct netmap_slot *slot;
unsigned int count = 0;
unsigned int minspace;
unsigned int head;
unsigned int lim;
struct mbuf *mhead;
struct mbuf *mf;
int nmbuf_bytes;
uint8_t *nmbuf;
if (!PTNET_Q_TRYLOCK(pq)) {
/* We failed to acquire the lock, schedule the taskqueue. */
nm_prlim(1, "Deferring TX work");
if (may_resched) {
taskqueue_enqueue(pq->taskq, &pq->task);
}
return 0;
}
if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
PTNET_Q_UNLOCK(pq);
nm_prlim(1, "Interface is down");
return ENETDOWN;
}
atok = pq->atok;
ktoa = pq->ktoa;
kring = na->tx_rings[pq->kring_id];
ring = kring->ring;
lim = kring->nkr_num_slots - 1;
head = ring->head;
minspace = sc->min_tx_space;
while (count < budget) {
if (PTNET_TX_NOSPACE(head, kring, minspace)) {
/* We ran out of slot, let's see if the host has
* freed up some, by reading hwcur and hwtail from
* the CSB. */
ptnet_sync_tail(ktoa, kring);
if (PTNET_TX_NOSPACE(head, kring, minspace)) {
/* Still no slots available. Reactivate the
* interrupts so that we can be notified
* when some free slots are made available by
* the host. */
atok->appl_need_kick = 1;
/* Double check. We need a full barrier to
* prevent the store to atok->appl_need_kick
* to be reordered with the load from
* ktoa->hwcur and ktoa->hwtail (store-load
* barrier). */
nm_stld_barrier();
ptnet_sync_tail(ktoa, kring);
if (likely(PTNET_TX_NOSPACE(head, kring,
minspace))) {
break;
}
nm_prlim(1, "Found more slots by doublecheck");
/* More slots were freed before reactivating
* the interrupts. */
atok->appl_need_kick = 0;
}
}
mhead = drbr_peek(ifp, pq->bufring);
if (!mhead) {
break;
}
/* Initialize transmission state variables. */
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_bytes = 0;
/* If needed, prepare the virtio-net header at the beginning
* of the first slot. */
if (have_vnet_hdr) {
struct virtio_net_hdr *vh =
(struct virtio_net_hdr *)nmbuf;
/* For performance, we could replace this memset() with
* two 8-bytes-wide writes. */
memset(nmbuf, 0, PTNET_HDR_SIZE);
if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
mhead = virtio_net_tx_offload(ifp, mhead, false,
vh);
if (unlikely(!mhead)) {
/* Packet dropped because errors
* occurred while preparing the vnet
* header. Let's go ahead with the next
* packet. */
pq->stats.errors ++;
drbr_advance(ifp, pq->bufring);
continue;
}
}
nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
"csum_start %u csum_ofs %u hdr_len = %u "
"gso_size %u gso_type %x", __func__,
mhead->m_pkthdr.csum_flags, vh->flags,
vh->csum_start, vh->csum_offset, vh->hdr_len,
vh->gso_size, vh->gso_type);
nmbuf += PTNET_HDR_SIZE;
nmbuf_bytes += PTNET_HDR_SIZE;
}
for (mf = mhead; mf; mf = mf->m_next) {
uint8_t *mdata = mf->m_data;
int mlen = mf->m_len;
for (;;) {
int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
if (mlen < copy) {
copy = mlen;
}
memcpy(nmbuf, mdata, copy);
mdata += copy;
mlen -= copy;
nmbuf += copy;
nmbuf_bytes += copy;
if (!mlen) {
break;
}
slot->len = nmbuf_bytes;
slot->flags = NS_MOREFRAG;
head = nm_next(head, lim);
KASSERT(head != ring->tail,
("Unexpectedly run out of TX space"));
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_bytes = 0;
}
}
/* Complete last slot and update head. */
slot->len = nmbuf_bytes;
slot->flags = 0;
head = nm_next(head, lim);
/* Consume the packet just processed. */
drbr_advance(ifp, pq->bufring);
/* Copy the packet to listeners. */
ETHER_BPF_MTAP(ifp, mhead);
pq->stats.packets ++;
pq->stats.bytes += mhead->m_pkthdr.len;
if (mhead->m_flags & M_MCAST) {
pq->stats.mcasts ++;
}
m_freem(mhead);
count ++;
if (++batch_count == PTNET_TX_BATCH) {
ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
batch_count = 0;
}
}
if (batch_count) {
ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
}
if (count >= budget && may_resched) {
DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n",
drbr_inuse(ifp, pq->bufring)));
taskqueue_enqueue(pq->taskq, &pq->task);
}
PTNET_Q_UNLOCK(pq);
return count;
}
static int
ptnet_transmit(if_t ifp, struct mbuf *m)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
struct ptnet_queue *pq;
unsigned int queue_idx;
int err;
DBG(device_printf(sc->dev, "transmit %p\n", m));
/* Insert 802.1Q header if needed. */
if (m->m_flags & M_VLANTAG) {
m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
if (m == NULL) {
return ENOBUFS;
}
m->m_flags &= ~M_VLANTAG;
}
/* Get the flow-id if available. */
queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
m->m_pkthdr.flowid : curcpu;
if (unlikely(queue_idx >= sc->num_tx_rings)) {
queue_idx %= sc->num_tx_rings;
}
pq = sc->queues + queue_idx;
err = drbr_enqueue(ifp, pq->bufring, m);
if (err) {
/* ENOBUFS when the bufring is full */
nm_prlim(1, "%s: drbr_enqueue() failed %d\n",
__func__, err);
pq->stats.errors ++;
return err;
}
if (if_getcapenable(ifp) & IFCAP_POLLING) {
/* If polling is on, the transmit queues will be
* drained by the poller. */
return 0;
}
err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
return (err < 0) ? err : 0;
}
static unsigned int
ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
{
struct netmap_ring *ring = kring->ring;
struct netmap_slot *slot = ring->slot + head;
for (;;) {
head = nm_next(head, kring->nkr_num_slots - 1);
if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
break;
}
slot = ring->slot + head;
}
return head;
}
static inline struct mbuf *
ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
{
uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
do {
unsigned int copy;
if (mtail->m_len == MCLBYTES) {
struct mbuf *mf;
mf = m_getcl(M_NOWAIT, MT_DATA, 0);
if (unlikely(!mf)) {
return NULL;
}
mtail->m_next = mf;
mtail = mf;
mdata = mtod(mtail, uint8_t *);
mtail->m_len = 0;
}
copy = MCLBYTES - mtail->m_len;
if (nmbuf_len < copy) {
copy = nmbuf_len;
}
memcpy(mdata, nmbuf, copy);
nmbuf += copy;
nmbuf_len -= copy;
mdata += copy;
mtail->m_len += copy;
} while (nmbuf_len);
return mtail;
}
static int
ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
{
struct ptnet_softc *sc = pq->sc;
bool have_vnet_hdr = sc->vnet_hdr_len;
struct nm_csb_atok *atok = pq->atok;
struct nm_csb_ktoa *ktoa = pq->ktoa;
struct netmap_adapter *na = &sc->ptna->dr.up;
struct netmap_kring *kring = na->rx_rings[pq->kring_id];
struct netmap_ring *ring = kring->ring;
unsigned int const lim = kring->nkr_num_slots - 1;
unsigned int batch_count = 0;
if_t ifp = sc->ifp;
unsigned int count = 0;
uint32_t head;
PTNET_Q_LOCK(pq);
if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
goto unlock;
}
kring->nr_kflags &= ~NKR_PENDINTR;
head = ring->head;
while (count < budget) {
uint32_t prev_head = head;
struct mbuf *mhead, *mtail;
struct virtio_net_hdr *vh;
struct netmap_slot *slot;
unsigned int nmbuf_len;
uint8_t *nmbuf;
int deliver = 1; /* the mbuf to the network stack. */
host_sync:
if (head == ring->tail) {
/* We ran out of slot, let's see if the host has
* added some, by reading hwcur and hwtail from
* the CSB. */
ptnet_sync_tail(ktoa, kring);
if (head == ring->tail) {
/* Still no slots available. Reactivate
* interrupts as they were disabled by the
* host thread right before issuing the
* last interrupt. */
atok->appl_need_kick = 1;
/* Double check for more completed RX slots.
* We need a full barrier to prevent the store
* to atok->appl_need_kick to be reordered with
* the load from ktoa->hwcur and ktoa->hwtail
* (store-load barrier). */
nm_stld_barrier();
ptnet_sync_tail(ktoa, kring);
if (likely(head == ring->tail)) {
break;
}
atok->appl_need_kick = 0;
}
}
/* Initialize ring state variables, possibly grabbing the
* virtio-net header. */
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_len = slot->len;
vh = (struct virtio_net_hdr *)nmbuf;
if (have_vnet_hdr) {
if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
/* There is no good reason why host should
* put the header in multiple netmap slots.
* If this is the case, discard. */
nm_prlim(1, "Fragmented vnet-hdr: dropping");
head = ptnet_rx_discard(kring, head);
pq->stats.iqdrops ++;
deliver = 0;
goto skip;
}
nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u "
"csum_ofs %u hdr_len = %u gso_size %u "
"gso_type %x", __func__, vh->flags,
vh->csum_start, vh->csum_offset, vh->hdr_len,
vh->gso_size, vh->gso_type);
nmbuf += PTNET_HDR_SIZE;
nmbuf_len -= PTNET_HDR_SIZE;
}
/* Allocate the head of a new mbuf chain.
* We use m_getcl() to allocate an mbuf with standard cluster
* size (MCLBYTES). In the future we could use m_getjcl()
* to choose different sizes. */
mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (unlikely(mhead == NULL)) {
device_printf(sc->dev, "%s: failed to allocate mbuf "
"head\n", __func__);
pq->stats.errors ++;
break;
}
/* Initialize the mbuf state variables. */
mhead->m_pkthdr.len = nmbuf_len;
mtail->m_len = 0;
/* Scan all the netmap slots containing the current packet. */
for (;;) {
DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
"len %u, flags %u\n", __func__,
head, ring->tail, slot->len,
slot->flags));
mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
if (unlikely(!mtail)) {
/* Ouch. We ran out of memory while processing
* a packet. We have to restore the previous
* head position, free the mbuf chain, and
* schedule the taskqueue to give the packet
* another chance. */
device_printf(sc->dev, "%s: failed to allocate"
" mbuf frag, reset head %u --> %u\n",
__func__, head, prev_head);
head = prev_head;
m_freem(mhead);
pq->stats.errors ++;
if (may_resched) {
taskqueue_enqueue(pq->taskq,
&pq->task);
}
goto escape;
}
/* We have to increment head irrespective of the
* NS_MOREFRAG being set or not. */
head = nm_next(head, lim);
if (!(slot->flags & NS_MOREFRAG)) {
break;
}
if (unlikely(head == ring->tail)) {
/* The very last slot prepared by the host has
* the NS_MOREFRAG set. Drop it and continue
* the outer cycle (to do the double-check). */
nm_prlim(1, "Incomplete packet: dropping");
m_freem(mhead);
pq->stats.iqdrops ++;
goto host_sync;
}
slot = ring->slot + head;
nmbuf = NMB(na, slot);
nmbuf_len = slot->len;
mhead->m_pkthdr.len += nmbuf_len;
}
mhead->m_pkthdr.rcvif = ifp;
mhead->m_pkthdr.csum_flags = 0;
/* Store the queue idx in the packet header. */
mhead->m_pkthdr.flowid = pq->kring_id;
M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
struct ether_header *eh;
eh = mtod(mhead, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ptnet_vlan_tag_remove(mhead);
/*
* With the 802.1Q header removed, update the
* checksum starting location accordingly.
*/
if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
}
}
if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) {
m_freem(mhead);
nm_prlim(1, "Csum offload error: dropping");
pq->stats.iqdrops ++;
deliver = 0;
}
skip:
count ++;
if (++batch_count >= PTNET_RX_BATCH) {
/* Some packets have been (or will be) pushed to the network
* stack. We need to update the CSB to tell the host about
* the new ring->cur and ring->head (RX buffer refill). */
ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
batch_count = 0;
}
if (likely(deliver)) {
pq->stats.packets ++;
pq->stats.bytes += mhead->m_pkthdr.len;
PTNET_Q_UNLOCK(pq);
if_input(ifp, mhead);
PTNET_Q_LOCK(pq);
/* The ring->head index (and related indices) are
* updated under pq lock by ptnet_ring_update().
* Since we dropped the lock to call if_input(), we
* must reload ring->head and restart processing the
* ring from there. */
head = ring->head;
if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
/* The interface has gone down while we didn't
* have the lock. Stop any processing and exit. */
goto unlock;
}
}
}
escape:
if (batch_count) {
ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
}
if (count >= budget && may_resched) {
/* If we ran out of budget or the double-check found new
* slots to process, schedule the taskqueue. */
DBG(nm_prlim(1, "out of budget: resched h %u t %u\n",
head, ring->tail));
taskqueue_enqueue(pq->taskq, &pq->task);
}
unlock:
PTNET_Q_UNLOCK(pq);
return count;
}
static void
ptnet_rx_task(void *context, int pending)
{
struct ptnet_queue *pq = context;
DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
}
static void
ptnet_tx_task(void *context, int pending)
{
struct ptnet_queue *pq = context;
DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
}
#ifdef DEVICE_POLLING
/* We don't need to handle differently POLL_AND_CHECK_STATUS and
* POLL_ONLY, since we don't have an Interrupt Status Register. */
static int
ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
{
struct ptnet_softc *sc = if_getsoftc(ifp);
unsigned int queue_budget;
unsigned int count = 0;
bool borrow = false;
int i;
KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
queue_budget = MAX(budget / sc->num_rings, 1);
nm_prlim(1, "Per-queue budget is %d", queue_budget);
while (budget) {
unsigned int rcnt = 0;
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_queue *pq = sc->queues + i;
if (borrow) {
queue_budget = MIN(queue_budget, budget);
if (queue_budget == 0) {
break;
}
}
if (i < sc->num_tx_rings) {
rcnt += ptnet_drain_transmit_queue(pq,
queue_budget, false);
} else {
rcnt += ptnet_rx_eof(pq, queue_budget,
false);
}
}
if (!rcnt) {
/* A scan of the queues gave no result, we can
* stop here. */
break;
}
if (rcnt > budget) {
/* This may happen when initial budget < sc->num_rings,
* since one packet budget is given to each queue
* anyway. Just pretend we didn't eat "so much". */
rcnt = budget;
}
count += rcnt;
budget -= rcnt;
borrow = true;
}
return count;
}
#endif /* DEVICE_POLLING */
#endif /* WITH_PTNETMAP */
diff --git a/sys/dev/netmap/netmap_freebsd.c b/sys/dev/netmap/netmap_freebsd.c
index cf3578ddc7fe..215b1f7bd09e 100644
--- a/sys/dev/netmap/netmap_freebsd.c
+++ b/sys/dev/netmap/netmap_freebsd.c
@@ -1,1589 +1,1585 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/module.h>
#include <sys/errno.h>
#include <sys/eventhandler.h>
#include <sys/jail.h>
#include <sys/poll.h> /* POLLIN, POLLOUT */
#include <sys/kernel.h> /* types used in module initialization */
#include <sys/conf.h> /* DEV_MODULE_ORDERED */
#include <sys/endian.h>
#include <sys/syscallsubr.h> /* kern_ioctl() */
#include <sys/rwlock.h>
#include <vm/vm.h> /* vtophys */
#include <vm/pmap.h> /* vtophys */
#include <vm/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/uma.h>
#include <sys/malloc.h>
#include <sys/socket.h> /* sockaddrs */
#include <sys/selinfo.h>
#include <sys/kthread.h> /* kthread_add() */
#include <sys/proc.h> /* PROC_LOCK() */
#include <sys/unistd.h> /* RFNOWAIT */
#include <sys/sched.h> /* sched_bind() */
#include <sys/smp.h> /* mp_maxid */
#include <sys/taskqueue.h> /* taskqueue_enqueue(), taskqueue_create(), ... */
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h> /* IFT_ETHER */
#include <net/ethernet.h> /* ether_ifdetach */
#include <net/if_dl.h> /* LLADDR */
#include <machine/bus.h> /* bus_dmamap_* */
#include <netinet/in.h> /* in6_cksum_pseudo() */
#include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
#include <net/netmap_virt.h>
#include <dev/netmap/netmap_mem2.h>
/* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
static void
nm_kqueue_notify(void *opaque, int pending)
{
struct nm_selinfo *si = opaque;
/* We use a non-zero hint to distinguish this notification call
* from the call done in kqueue_scan(), which uses hint=0.
*/
KNOTE_UNLOCKED(&si->si.si_note, /*hint=*/0x100);
}
int nm_os_selinfo_init(NM_SELINFO_T *si, const char *name) {
int err;
TASK_INIT(&si->ntfytask, 0, nm_kqueue_notify, si);
si->ntfytq = taskqueue_create(name, M_NOWAIT,
taskqueue_thread_enqueue, &si->ntfytq);
if (si->ntfytq == NULL)
return -ENOMEM;
err = taskqueue_start_threads(&si->ntfytq, 1, PI_NET, "tq %s", name);
if (err) {
taskqueue_free(si->ntfytq);
si->ntfytq = NULL;
return err;
}
snprintf(si->mtxname, sizeof(si->mtxname), "nmkl%s", name);
mtx_init(&si->m, si->mtxname, NULL, MTX_DEF);
knlist_init_mtx(&si->si.si_note, &si->m);
si->kqueue_users = 0;
return (0);
}
void
nm_os_selinfo_uninit(NM_SELINFO_T *si)
{
if (si->ntfytq == NULL) {
return; /* si was not initialized */
}
taskqueue_drain(si->ntfytq, &si->ntfytask);
taskqueue_free(si->ntfytq);
si->ntfytq = NULL;
knlist_delete(&si->si.si_note, curthread, /*islocked=*/0);
knlist_destroy(&si->si.si_note);
/* now we don't need the mutex anymore */
mtx_destroy(&si->m);
}
void *
nm_os_malloc(size_t size)
{
return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
}
void *
nm_os_realloc(void *addr, size_t new_size, size_t old_size __unused)
{
return realloc(addr, new_size, M_DEVBUF, M_NOWAIT | M_ZERO);
}
void
nm_os_free(void *addr)
{
free(addr, M_DEVBUF);
}
void
nm_os_ifnet_lock(void)
{
IFNET_RLOCK();
}
void
nm_os_ifnet_unlock(void)
{
IFNET_RUNLOCK();
}
static int netmap_use_count = 0;
void
nm_os_get_module(void)
{
netmap_use_count++;
}
void
nm_os_put_module(void)
{
netmap_use_count--;
}
static void
netmap_ifnet_arrival_handler(void *arg __unused, if_t ifp)
{
netmap_undo_zombie(ifp);
}
static void
netmap_ifnet_departure_handler(void *arg __unused, if_t ifp)
{
netmap_make_zombie(ifp);
}
static eventhandler_tag nm_ifnet_ah_tag;
static eventhandler_tag nm_ifnet_dh_tag;
int
nm_os_ifnet_init(void)
{
nm_ifnet_ah_tag =
EVENTHANDLER_REGISTER(ifnet_arrival_event,
netmap_ifnet_arrival_handler,
NULL, EVENTHANDLER_PRI_ANY);
nm_ifnet_dh_tag =
EVENTHANDLER_REGISTER(ifnet_departure_event,
netmap_ifnet_departure_handler,
NULL, EVENTHANDLER_PRI_ANY);
return 0;
}
void
nm_os_ifnet_fini(void)
{
EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
nm_ifnet_ah_tag);
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
nm_ifnet_dh_tag);
}
unsigned
nm_os_ifnet_mtu(if_t ifp)
{
return if_getmtu(ifp);
}
rawsum_t
nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
{
/* TODO XXX please use the FreeBSD implementation for this. */
uint16_t *words = (uint16_t *)data;
int nw = len / 2;
int i;
for (i = 0; i < nw; i++)
cur_sum += be16toh(words[i]);
if (len & 1)
cur_sum += (data[len-1] << 8);
return cur_sum;
}
/* Fold a raw checksum: 'cur_sum' is in host byte order, while the
* return value is in network byte order.
*/
uint16_t
nm_os_csum_fold(rawsum_t cur_sum)
{
/* TODO XXX please use the FreeBSD implementation for this. */
while (cur_sum >> 16)
cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16);
return htobe16((~cur_sum) & 0xFFFF);
}
uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph)
{
#if 0
return in_cksum_hdr((void *)iph);
#else
return nm_os_csum_fold(nm_os_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0));
#endif
}
void
nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
size_t datalen, uint16_t *check)
{
#ifdef INET
uint16_t pseudolen = datalen + iph->protocol;
/* Compute and insert the pseudo-header checksum. */
*check = in_pseudo(iph->saddr, iph->daddr,
htobe16(pseudolen));
/* Compute the checksum on TCP/UDP header + payload
* (includes the pseudo-header).
*/
*check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
#else
static int notsupported = 0;
if (!notsupported) {
notsupported = 1;
nm_prerr("inet4 segmentation not supported");
}
#endif
}
void
nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
size_t datalen, uint16_t *check)
{
#ifdef INET6
*check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0);
*check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
#else
static int notsupported = 0;
if (!notsupported) {
notsupported = 1;
nm_prerr("inet6 segmentation not supported");
}
#endif
}
/* on FreeBSD we send up one packet at a time */
void *
nm_os_send_up(if_t ifp, struct mbuf *m, struct mbuf *prev)
{
NA(ifp)->if_input(ifp, m);
return NULL;
}
int
nm_os_mbuf_has_csum_offld(struct mbuf *m)
{
return m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_SCTP |
CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |
CSUM_SCTP_IPV6);
}
int
nm_os_mbuf_has_seg_offld(struct mbuf *m)
{
return m->m_pkthdr.csum_flags & CSUM_TSO;
}
static void
freebsd_generic_rx_handler(if_t ifp, struct mbuf *m)
{
int stolen;
if (unlikely(!NM_NA_VALID(ifp))) {
nm_prlim(1, "Warning: RX packet intercepted, but no"
" emulated adapter");
return;
}
do {
struct mbuf *n;
n = m->m_nextpkt;
m->m_nextpkt = NULL;
stolen = generic_rx_handler(ifp, m);
if (!stolen) {
NA(ifp)->if_input(ifp, m);
}
m = n;
} while (m != NULL);
}
/*
* Intercept the rx routine in the standard device driver.
* Second argument is non-zero to intercept, 0 to restore
*/
int
nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept)
{
struct netmap_adapter *na = &gna->up.up;
if_t ifp = na->ifp;
int ret = 0;
nm_os_ifnet_lock();
if (intercept) {
if_setcapenablebit(ifp, IFCAP_NETMAP, 0);
if_setinputfn(ifp, freebsd_generic_rx_handler);
} else {
if_setcapenablebit(ifp, 0, IFCAP_NETMAP);
if_setinputfn(ifp, na->if_input);
}
nm_os_ifnet_unlock();
return ret;
}
/*
* Intercept the packet steering routine in the tx path,
* so that we can decide which queue is used for an mbuf.
* Second argument is non-zero to intercept, 0 to restore.
* On freebsd we just intercept if_transmit.
*/
int
nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept)
{
struct netmap_adapter *na = &gna->up.up;
if_t ifp = netmap_generic_getifp(gna);
nm_os_ifnet_lock();
if (intercept) {
na->if_transmit = if_gettransmitfn(ifp);
if_settransmitfn(ifp, netmap_transmit);
} else {
if_settransmitfn(ifp, na->if_transmit);
}
nm_os_ifnet_unlock();
return 0;
}
/*
* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
* and non-zero on error (which may be packet drops or other errors).
* addr and len identify the netmap buffer, m is the (preallocated)
* mbuf to use for transmissions.
*
* Zero-copy transmission is possible if netmap is attached directly to a
* hardware interface: when cleaning we simply wait for the mbuf cluster
* refcount to decrement to 1, indicating that the driver has completed
* transmission and is done with the buffer. However, this approach can
* lead to queue deadlocks when attaching to software interfaces (e.g.,
* if_bridge) since we cannot rely on member ports to promptly reclaim
* transmitted mbufs. Since there is no easy way to distinguish these
* cases, we currently always copy the buffer.
*
* On multiqueue cards, we can force the queue using
* if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
* i = m->m_pkthdr.flowid % adapter->num_queues;
* else
* i = curcpu % adapter->num_queues;
*/
int
nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
{
int ret;
u_int len = a->len;
if_t ifp = a->ifp;
struct mbuf *m = a->m;
M_ASSERTPKTHDR(m);
KASSERT((m->m_flags & M_EXT) != 0,
("%s: mbuf %p has no cluster", __func__, m));
if (MBUF_REFCNT(m) != 1) {
nm_prerr("invalid refcnt %d for %p", MBUF_REFCNT(m), m);
panic("in generic_xmit_frame");
}
if (unlikely(m->m_ext.ext_size < len)) {
nm_prlim(2, "size %d < len %d", m->m_ext.ext_size, len);
len = m->m_ext.ext_size;
}
m_copyback(m, 0, len, a->addr);
m->m_len = m->m_pkthdr.len = len;
SET_MBUF_REFCNT(m, 2);
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
m->m_pkthdr.flowid = a->ring_nr;
m->m_pkthdr.rcvif = ifp; /* used for tx notification */
CURVNET_SET(if_getvnet(ifp));
ret = NA(ifp)->if_transmit(ifp, m);
CURVNET_RESTORE();
return ret ? -1 : 0;
}
struct netmap_adapter *
netmap_getna(if_t ifp)
{
return (NA(ifp));
}
/*
* The following two functions are empty until we have a generic
* way to extract the info from the ifp
*/
int
nm_os_generic_find_num_desc(if_t ifp, unsigned int *tx, unsigned int *rx)
{
return 0;
}
void
nm_os_generic_find_num_queues(if_t ifp, u_int *txq, u_int *rxq)
{
unsigned num_rings = netmap_generic_rings ? netmap_generic_rings : 1;
*txq = num_rings;
*rxq = num_rings;
}
void
nm_os_generic_set_features(struct netmap_generic_adapter *gna)
{
gna->rxsg = 1; /* Supported through m_copydata. */
gna->txqdisc = 0; /* Not supported. */
}
void
nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na)
{
mit->mit_pending = 0;
mit->mit_ring_idx = idx;
mit->mit_na = na;
}
void
nm_os_mitigation_start(struct nm_generic_mit *mit)
{
}
void
nm_os_mitigation_restart(struct nm_generic_mit *mit)
{
}
int
nm_os_mitigation_active(struct nm_generic_mit *mit)
{
return 0;
}
void
nm_os_mitigation_cleanup(struct nm_generic_mit *mit)
{
}
static int
nm_vi_dummy(if_t ifp, u_long cmd, caddr_t addr)
{
return EINVAL;
}
static void
nm_vi_start(if_t ifp)
{
panic("nm_vi_start() must not be called");
}
/*
* Index manager of persistent virtual interfaces.
* It is used to decide the lowest byte of the MAC address.
* We use the same algorithm with management of bridge port index.
*/
#define NM_VI_MAX 255
static struct {
uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */
uint8_t active;
struct mtx lock;
} nm_vi_indices;
void
nm_os_vi_init_index(void)
{
int i;
for (i = 0; i < NM_VI_MAX; i++)
nm_vi_indices.index[i] = i;
nm_vi_indices.active = 0;
mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF);
}
/* return -1 if no index available */
static int
nm_vi_get_index(void)
{
int ret;
mtx_lock(&nm_vi_indices.lock);
ret = nm_vi_indices.active == NM_VI_MAX ? -1 :
nm_vi_indices.index[nm_vi_indices.active++];
mtx_unlock(&nm_vi_indices.lock);
return ret;
}
static void
nm_vi_free_index(uint8_t val)
{
int i, lim;
mtx_lock(&nm_vi_indices.lock);
lim = nm_vi_indices.active;
for (i = 0; i < lim; i++) {
if (nm_vi_indices.index[i] == val) {
/* swap index[lim-1] and j */
int tmp = nm_vi_indices.index[lim-1];
nm_vi_indices.index[lim-1] = val;
nm_vi_indices.index[i] = tmp;
nm_vi_indices.active--;
break;
}
}
if (lim == nm_vi_indices.active)
nm_prerr("Index %u not found", val);
mtx_unlock(&nm_vi_indices.lock);
}
#undef NM_VI_MAX
/*
* Implementation of a netmap-capable virtual interface that
* registered to the system.
* It is based on if_tap.c and ip_fw_log.c in FreeBSD 9.
*
* Note: Linux sets refcount to 0 on allocation of net_device,
* then increments it on registration to the system.
* FreeBSD sets refcount to 1 on if_alloc(), and does not
* increment this refcount on if_attach().
*/
int
nm_os_vi_persist(const char *name, if_t *ret)
{
if_t ifp;
u_short macaddr_hi;
uint32_t macaddr_mid;
u_char eaddr[6];
int unit = nm_vi_get_index(); /* just to decide MAC address */
if (unit < 0)
return EBUSY;
/*
* We use the same MAC address generation method with tap
* except for the highest octet is 00:be instead of 00:bd
*/
macaddr_hi = htons(0x00be); /* XXX tap + 1 */
macaddr_mid = (uint32_t) ticks;
bcopy(&macaddr_hi, eaddr, sizeof(short));
bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
eaddr[5] = (uint8_t)unit;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- nm_prerr("if_alloc failed");
- return ENOMEM;
- }
if_initname(ifp, name, IF_DUNIT_NONE);
if_setflags(ifp, IFF_UP | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, (void *)nm_vi_dummy);
if_setioctlfn(ifp, nm_vi_dummy);
if_setstartfn(ifp, nm_vi_start);
if_setmtu(ifp, ETHERMTU);
if_setsendqlen(ifp, ifqmaxlen);
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
if_setcapenablebit(ifp, IFCAP_LINKSTATE, 0);
ether_ifattach(ifp, eaddr);
*ret = ifp;
return 0;
}
/* unregister from the system and drop the final refcount */
void
nm_os_vi_detach(if_t ifp)
{
nm_vi_free_index(((char *)if_getlladdr(ifp))[5]);
ether_ifdetach(ifp);
if_free(ifp);
}
#ifdef WITH_EXTMEM
#include <vm/vm_map.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
struct nm_os_extmem {
vm_object_t obj;
vm_offset_t kva;
vm_offset_t size;
uintptr_t scan;
};
void
nm_os_extmem_delete(struct nm_os_extmem *e)
{
nm_prinf("freeing %zx bytes", (size_t)e->size);
vm_map_remove(kernel_map, e->kva, e->kva + e->size);
nm_os_free(e);
}
char *
nm_os_extmem_nextpage(struct nm_os_extmem *e)
{
char *rv = NULL;
if (e->scan < e->kva + e->size) {
rv = (char *)e->scan;
e->scan += PAGE_SIZE;
}
return rv;
}
int
nm_os_extmem_isequal(struct nm_os_extmem *e1, struct nm_os_extmem *e2)
{
return (e1->obj == e2->obj);
}
int
nm_os_extmem_nr_pages(struct nm_os_extmem *e)
{
return e->size >> PAGE_SHIFT;
}
struct nm_os_extmem *
nm_os_extmem_create(unsigned long p, struct nmreq_pools_info *pi, int *perror)
{
vm_map_t map;
vm_map_entry_t entry;
vm_object_t obj;
vm_prot_t prot;
vm_pindex_t index;
boolean_t wired;
struct nm_os_extmem *e = NULL;
int rv, error = 0;
e = nm_os_malloc(sizeof(*e));
if (e == NULL) {
error = ENOMEM;
goto out;
}
map = &curthread->td_proc->p_vmspace->vm_map;
rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry,
&obj, &index, &prot, &wired);
if (rv != KERN_SUCCESS) {
nm_prerr("address %lx not found", p);
error = vm_mmap_to_errno(rv);
goto out_free;
}
vm_object_reference(obj);
/* check that we are given the whole vm_object ? */
vm_map_lookup_done(map, entry);
e->obj = obj;
/* Wire the memory and add the vm_object to the kernel map,
* to make sure that it is not freed even if all the processes
* that are mmap()ing should munmap() it.
*/
e->kva = vm_map_min(kernel_map);
e->size = obj->size << PAGE_SHIFT;
rv = vm_map_find(kernel_map, obj, 0, &e->kva, e->size, 0,
VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE, 0);
if (rv != KERN_SUCCESS) {
nm_prerr("vm_map_find(%zx) failed", (size_t)e->size);
error = vm_mmap_to_errno(rv);
goto out_rel;
}
rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size,
VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
if (rv != KERN_SUCCESS) {
nm_prerr("vm_map_wire failed");
error = vm_mmap_to_errno(rv);
goto out_rem;
}
e->scan = e->kva;
return e;
out_rem:
vm_map_remove(kernel_map, e->kva, e->kva + e->size);
out_rel:
vm_object_deallocate(e->obj);
e->obj = NULL;
out_free:
nm_os_free(e);
out:
if (perror)
*perror = error;
return NULL;
}
#endif /* WITH_EXTMEM */
/* ================== PTNETMAP GUEST SUPPORT ==================== */
#ifdef WITH_PTNETMAP
#include <sys/bus.h>
#include <sys/rman.h>
#include <machine/bus.h> /* bus_dmamap_* */
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
/*
* ptnetmap memory device (memdev) for freebsd guest,
* ssed to expose host netmap memory to the guest through a PCI BAR.
*/
/*
* ptnetmap memdev private data structure
*/
struct ptnetmap_memdev {
device_t dev;
struct resource *pci_io;
struct resource *pci_mem;
struct netmap_mem_d *nm_mem;
};
static int ptn_memdev_probe(device_t);
static int ptn_memdev_attach(device_t);
static int ptn_memdev_detach(device_t);
static int ptn_memdev_shutdown(device_t);
static device_method_t ptn_memdev_methods[] = {
DEVMETHOD(device_probe, ptn_memdev_probe),
DEVMETHOD(device_attach, ptn_memdev_attach),
DEVMETHOD(device_detach, ptn_memdev_detach),
DEVMETHOD(device_shutdown, ptn_memdev_shutdown),
DEVMETHOD_END
};
static driver_t ptn_memdev_driver = {
PTNETMAP_MEMDEV_NAME,
ptn_memdev_methods,
sizeof(struct ptnetmap_memdev),
};
/* We use (SI_ORDER_MIDDLE+1) here, see DEV_MODULE_ORDERED() invocation
* below. */
DRIVER_MODULE_ORDERED(ptn_memdev, pci, ptn_memdev_driver, NULL, NULL,
SI_ORDER_MIDDLE + 1);
/*
* Map host netmap memory through PCI-BAR in the guest OS,
* returning physical (nm_paddr) and virtual (nm_addr) addresses
* of the netmap memory mapped in the guest.
*/
int
nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev, vm_paddr_t *nm_paddr,
void **nm_addr, uint64_t *mem_size)
{
int rid;
nm_prinf("ptn_memdev_driver iomap");
rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR);
*mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI);
*mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_LO) |
(*mem_size << 32);
/* map memory allocator */
ptn_dev->pci_mem = bus_alloc_resource(ptn_dev->dev, SYS_RES_MEMORY,
&rid, 0, ~0, *mem_size, RF_ACTIVE);
if (ptn_dev->pci_mem == NULL) {
*nm_paddr = 0;
*nm_addr = NULL;
return ENOMEM;
}
*nm_paddr = rman_get_start(ptn_dev->pci_mem);
*nm_addr = rman_get_virtual(ptn_dev->pci_mem);
nm_prinf("=== BAR %d start %lx len %lx mem_size %lx ===",
PTNETMAP_MEM_PCI_BAR,
(unsigned long)(*nm_paddr),
(unsigned long)rman_get_size(ptn_dev->pci_mem),
(unsigned long)*mem_size);
return (0);
}
uint32_t
nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev, unsigned int reg)
{
return bus_read_4(ptn_dev->pci_io, reg);
}
/* Unmap host netmap memory. */
void
nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev)
{
nm_prinf("ptn_memdev_driver iounmap");
if (ptn_dev->pci_mem) {
bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY,
PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
ptn_dev->pci_mem = NULL;
}
}
/* Device identification routine, return BUS_PROBE_DEFAULT on success,
* positive on failure */
static int
ptn_memdev_probe(device_t dev)
{
if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID)
return (ENXIO);
if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID)
return (ENXIO);
device_set_descf(dev, "%s PCI adapter", PTNETMAP_MEMDEV_NAME);
return (BUS_PROBE_DEFAULT);
}
/* Device initialization routine. */
static int
ptn_memdev_attach(device_t dev)
{
struct ptnetmap_memdev *ptn_dev;
int rid;
uint16_t mem_id;
ptn_dev = device_get_softc(dev);
ptn_dev->dev = dev;
pci_enable_busmaster(dev);
rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
ptn_dev->pci_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
RF_ACTIVE);
if (ptn_dev->pci_io == NULL) {
device_printf(dev, "cannot map I/O space\n");
return (ENXIO);
}
mem_id = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMID);
/* create guest allocator */
ptn_dev->nm_mem = netmap_mem_pt_guest_attach(ptn_dev, mem_id);
if (ptn_dev->nm_mem == NULL) {
ptn_memdev_detach(dev);
return (ENOMEM);
}
netmap_mem_get(ptn_dev->nm_mem);
nm_prinf("ptnetmap memdev attached, host memid: %u", mem_id);
return (0);
}
/* Device removal routine. */
static int
ptn_memdev_detach(device_t dev)
{
struct ptnetmap_memdev *ptn_dev;
ptn_dev = device_get_softc(dev);
if (ptn_dev->nm_mem) {
nm_prinf("ptnetmap memdev detached, host memid %u",
netmap_mem_get_id(ptn_dev->nm_mem));
netmap_mem_put(ptn_dev->nm_mem);
ptn_dev->nm_mem = NULL;
}
if (ptn_dev->pci_mem) {
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
ptn_dev->pci_mem = NULL;
}
if (ptn_dev->pci_io) {
bus_release_resource(dev, SYS_RES_IOPORT,
PCIR_BAR(PTNETMAP_IO_PCI_BAR), ptn_dev->pci_io);
ptn_dev->pci_io = NULL;
}
return (0);
}
static int
ptn_memdev_shutdown(device_t dev)
{
return bus_generic_shutdown(dev);
}
#endif /* WITH_PTNETMAP */
/*
* In order to track whether pages are still mapped, we hook into
* the standard cdev_pager and intercept the constructor and
* destructor.
*/
struct netmap_vm_handle_t {
struct cdev *dev;
struct netmap_priv_d *priv;
};
static int
netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred, u_short *color)
{
struct netmap_vm_handle_t *vmh = handle;
if (netmap_verbose)
nm_prinf("handle %p size %jd prot %d foff %jd",
handle, (intmax_t)size, prot, (intmax_t)foff);
if (color)
*color = 0;
dev_ref(vmh->dev);
return 0;
}
static void
netmap_dev_pager_dtor(void *handle)
{
struct netmap_vm_handle_t *vmh = handle;
struct cdev *dev = vmh->dev;
struct netmap_priv_d *priv = vmh->priv;
if (netmap_verbose)
nm_prinf("handle %p", handle);
netmap_dtor(priv);
free(vmh, M_DEVBUF);
dev_rel(dev);
}
static int
netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
int prot, vm_page_t *mres)
{
struct netmap_vm_handle_t *vmh = object->handle;
struct netmap_priv_d *priv = vmh->priv;
struct netmap_adapter *na = priv->np_na;
vm_paddr_t paddr;
vm_page_t page;
vm_memattr_t memattr;
nm_prdis("object %p offset %jd prot %d mres %p",
object, (intmax_t)offset, prot, mres);
memattr = object->memattr;
paddr = netmap_mem_ofstophys(na->nm_mem, offset);
if (paddr == 0)
return VM_PAGER_FAIL;
if (((*mres)->flags & PG_FICTITIOUS) != 0) {
/*
* If the passed in result page is a fake page, update it with
* the new physical address.
*/
page = *mres;
vm_page_updatefake(page, paddr, memattr);
} else {
/*
* Replace the passed in reqpage page with our own fake page and
* free up the all of the original pages.
*/
VM_OBJECT_WUNLOCK(object);
page = vm_page_getfake(paddr, memattr);
VM_OBJECT_WLOCK(object);
vm_page_replace(page, object, (*mres)->pindex, *mres);
*mres = page;
}
page->valid = VM_PAGE_BITS_ALL;
return (VM_PAGER_OK);
}
static struct cdev_pager_ops netmap_cdev_pager_ops = {
.cdev_pg_ctor = netmap_dev_pager_ctor,
.cdev_pg_dtor = netmap_dev_pager_dtor,
.cdev_pg_fault = netmap_dev_pager_fault,
};
static int
netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
vm_size_t objsize, vm_object_t *objp, int prot)
{
int error;
struct netmap_vm_handle_t *vmh;
struct netmap_priv_d *priv;
vm_object_t obj;
if (netmap_verbose)
nm_prinf("cdev %p foff %jd size %jd objp %p prot %d", cdev,
(intmax_t )*foff, (intmax_t )objsize, objp, prot);
vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (vmh == NULL)
return ENOMEM;
vmh->dev = cdev;
NMG_LOCK();
error = devfs_get_cdevpriv((void**)&priv);
if (error)
goto err_unlock;
if (priv->np_nifp == NULL) {
error = EINVAL;
goto err_unlock;
}
vmh->priv = priv;
priv->np_refs++;
NMG_UNLOCK();
obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
&netmap_cdev_pager_ops, objsize, prot,
*foff, NULL);
if (obj == NULL) {
nm_prerr("cdev_pager_allocate failed");
error = EINVAL;
goto err_deref;
}
*objp = obj;
return 0;
err_deref:
NMG_LOCK();
priv->np_refs--;
err_unlock:
NMG_UNLOCK();
// err:
free(vmh, M_DEVBUF);
return error;
}
/*
* On FreeBSD the close routine is only called on the last close on
* the device (/dev/netmap) so we cannot do anything useful.
* To track close() on individual file descriptors we pass netmap_dtor() to
* devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor
* when the last fd pointing to the device is closed.
*
* Note that FreeBSD does not even munmap() on close() so we also have
* to track mmap() ourselves, and postpone the call to
* netmap_dtor() is called when the process has no open fds and no active
* memory maps on /dev/netmap, as in linux.
*/
static int
netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
if (netmap_verbose)
nm_prinf("dev %p fflag 0x%x devtype %d td %p",
dev, fflag, devtype, td);
return 0;
}
static int
netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
struct netmap_priv_d *priv;
int error;
(void)dev;
(void)oflags;
(void)devtype;
(void)td;
NMG_LOCK();
priv = netmap_priv_new();
if (priv == NULL) {
error = ENOMEM;
goto out;
}
error = devfs_set_cdevpriv(priv, netmap_dtor);
if (error) {
netmap_priv_delete(priv);
}
out:
NMG_UNLOCK();
return error;
}
/******************** kthread wrapper ****************/
#include <sys/sysproto.h>
u_int
nm_os_ncpus(void)
{
return mp_maxid + 1;
}
struct nm_kctx_ctx {
/* Userspace thread (kthread creator). */
struct thread *user_td;
/* worker function and parameter */
nm_kctx_worker_fn_t worker_fn;
void *worker_private;
struct nm_kctx *nmk;
/* integer to manage multiple worker contexts (e.g., RX or TX on ptnetmap) */
long type;
};
struct nm_kctx {
struct thread *worker;
struct mtx worker_lock;
struct nm_kctx_ctx worker_ctx;
int run; /* used to stop kthread */
int attach_user; /* kthread attached to user_process */
int affinity;
};
static void
nm_kctx_worker(void *data)
{
struct nm_kctx *nmk = data;
struct nm_kctx_ctx *ctx = &nmk->worker_ctx;
if (nmk->affinity >= 0) {
thread_lock(curthread);
sched_bind(curthread, nmk->affinity);
thread_unlock(curthread);
}
while (nmk->run) {
/*
* check if the parent process dies
* (when kthread is attached to user process)
*/
if (ctx->user_td) {
PROC_LOCK(curproc);
thread_suspend_check(0);
PROC_UNLOCK(curproc);
} else {
kthread_suspend_check();
}
/* Continuously execute worker process. */
ctx->worker_fn(ctx->worker_private); /* worker body */
}
kthread_exit();
}
void
nm_os_kctx_worker_setaff(struct nm_kctx *nmk, int affinity)
{
nmk->affinity = affinity;
}
struct nm_kctx *
nm_os_kctx_create(struct nm_kctx_cfg *cfg, void *opaque)
{
struct nm_kctx *nmk = NULL;
nmk = malloc(sizeof(*nmk), M_DEVBUF, M_NOWAIT | M_ZERO);
if (!nmk)
return NULL;
mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF);
nmk->worker_ctx.worker_fn = cfg->worker_fn;
nmk->worker_ctx.worker_private = cfg->worker_private;
nmk->worker_ctx.type = cfg->type;
nmk->affinity = -1;
/* attach kthread to user process (ptnetmap) */
nmk->attach_user = cfg->attach_user;
return nmk;
}
int
nm_os_kctx_worker_start(struct nm_kctx *nmk)
{
struct proc *p = NULL;
int error = 0;
/* Temporarily disable this function as it is currently broken
* and causes kernel crashes. The failure can be triggered by
* the "vale_polling_enable_disable" test in ctrl-api-test.c. */
return EOPNOTSUPP;
if (nmk->worker)
return EBUSY;
/* check if we want to attach kthread to user process */
if (nmk->attach_user) {
nmk->worker_ctx.user_td = curthread;
p = curthread->td_proc;
}
/* enable kthread main loop */
nmk->run = 1;
/* create kthread */
if((error = kthread_add(nm_kctx_worker, nmk, p,
&nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld",
nmk->worker_ctx.type))) {
goto err;
}
nm_prinf("nm_kthread started td %p", nmk->worker);
return 0;
err:
nm_prerr("nm_kthread start failed err %d", error);
nmk->worker = NULL;
return error;
}
void
nm_os_kctx_worker_stop(struct nm_kctx *nmk)
{
if (!nmk->worker)
return;
/* tell to kthread to exit from main loop */
nmk->run = 0;
/* wake up kthread if it sleeps */
kthread_resume(nmk->worker);
nmk->worker = NULL;
}
void
nm_os_kctx_destroy(struct nm_kctx *nmk)
{
if (!nmk)
return;
if (nmk->worker)
nm_os_kctx_worker_stop(nmk);
free(nmk, M_DEVBUF);
}
/******************** kqueue support ****************/
/*
* In addition to calling selwakeuppri(), nm_os_selwakeup() also
* needs to call knote() to wake up kqueue listeners.
* This operation is deferred to a taskqueue in order to avoid possible
* lock order reversals; these may happen because knote() grabs a
* private lock associated to the 'si' (see struct selinfo,
* struct nm_selinfo, and nm_os_selinfo_init), and nm_os_selwakeup()
* can be called while holding the lock associated to a different
* 'si'.
* When calling knote() we use a non-zero 'hint' argument to inform
* the netmap_knrw() function that it is being called from
* 'nm_os_selwakeup'; this is necessary because when netmap_knrw() is
* called by the kevent subsystem (i.e. kevent_scan()) we also need to
* call netmap_poll().
*
* The netmap_kqfilter() function registers one or another f_event
* depending on read or write mode. A pointer to the struct
* 'netmap_priv_d' is stored into kn->kn_hook, so that it can later
* be passed to netmap_poll(). We pass NULL as a third argument to
* netmap_poll(), so that the latter only runs the txsync/rxsync
* (if necessary), and skips the nm_os_selrecord() calls.
*/
void
nm_os_selwakeup(struct nm_selinfo *si)
{
selwakeuppri(&si->si, PI_NET);
if (si->kqueue_users > 0) {
taskqueue_enqueue(si->ntfytq, &si->ntfytask);
}
}
void
nm_os_selrecord(struct thread *td, struct nm_selinfo *si)
{
selrecord(td, &si->si);
}
static void
netmap_knrdetach(struct knote *kn)
{
struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
struct nm_selinfo *si = priv->np_si[NR_RX];
knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
NMG_LOCK();
KASSERT(si->kqueue_users > 0, ("kqueue_user underflow on %s",
si->mtxname));
si->kqueue_users--;
nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
NMG_UNLOCK();
}
static void
netmap_knwdetach(struct knote *kn)
{
struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
struct nm_selinfo *si = priv->np_si[NR_TX];
knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
NMG_LOCK();
si->kqueue_users--;
nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
NMG_UNLOCK();
}
/*
* Callback triggered by netmap notifications (see netmap_notify()),
* and by the application calling kevent(). In the former case we
* just return 1 (events ready), since we are not able to do better.
* In the latter case we use netmap_poll() to see which events are
* ready.
*/
static int
netmap_knrw(struct knote *kn, long hint, int events)
{
struct netmap_priv_d *priv;
int revents;
if (hint != 0) {
/* Called from netmap_notify(), typically from a
* thread different from the one issuing kevent().
* Assume we are ready. */
return 1;
}
/* Called from kevent(). */
priv = kn->kn_hook;
revents = netmap_poll(priv, events, /*thread=*/NULL);
return (events & revents) ? 1 : 0;
}
static int
netmap_knread(struct knote *kn, long hint)
{
return netmap_knrw(kn, hint, POLLIN);
}
static int
netmap_knwrite(struct knote *kn, long hint)
{
return netmap_knrw(kn, hint, POLLOUT);
}
static struct filterops netmap_rfiltops = {
.f_isfd = 1,
.f_detach = netmap_knrdetach,
.f_event = netmap_knread,
};
static struct filterops netmap_wfiltops = {
.f_isfd = 1,
.f_detach = netmap_knwdetach,
.f_event = netmap_knwrite,
};
/*
* This is called when a thread invokes kevent() to record
* a change in the configuration of the kqueue().
* The 'priv' is the one associated to the open netmap device.
*/
static int
netmap_kqfilter(struct cdev *dev, struct knote *kn)
{
struct netmap_priv_d *priv;
int error;
struct netmap_adapter *na;
struct nm_selinfo *si;
int ev = kn->kn_filter;
if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
nm_prerr("bad filter request %d", ev);
return 1;
}
error = devfs_get_cdevpriv((void**)&priv);
if (error) {
nm_prerr("device not yet setup");
return 1;
}
na = priv->np_na;
if (na == NULL) {
nm_prerr("no netmap adapter for this file descriptor");
return 1;
}
/* the si is indicated in the priv */
si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX];
kn->kn_fop = (ev == EVFILT_WRITE) ?
&netmap_wfiltops : &netmap_rfiltops;
kn->kn_hook = priv;
NMG_LOCK();
si->kqueue_users++;
nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
NMG_UNLOCK();
knlist_add(&si->si.si_note, kn, /*islocked=*/0);
return 0;
}
static int
freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td)
{
struct netmap_priv_d *priv;
if (devfs_get_cdevpriv((void **)&priv)) {
return POLLERR;
}
return netmap_poll(priv, events, td);
}
static int
freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
int ffla __unused, struct thread *td)
{
int error;
struct netmap_priv_d *priv;
CURVNET_SET(TD_TO_VNET(td));
error = devfs_get_cdevpriv((void **)&priv);
if (error) {
/* XXX ENOENT should be impossible, since the priv
* is now created in the open */
if (error == ENOENT)
error = ENXIO;
goto out;
}
error = netmap_ioctl(priv, cmd, data, td, /*nr_body_is_user=*/1);
out:
CURVNET_RESTORE();
return error;
}
void
nm_os_onattach(if_t ifp)
{
if_setcapabilitiesbit(ifp, IFCAP_NETMAP, 0);
}
void
nm_os_onenter(if_t ifp)
{
struct netmap_adapter *na = NA(ifp);
na->if_transmit = if_gettransmitfn(ifp);
if_settransmitfn(ifp, netmap_transmit);
if_setcapenablebit(ifp, IFCAP_NETMAP, 0);
}
void
nm_os_onexit(if_t ifp)
{
struct netmap_adapter *na = NA(ifp);
if_settransmitfn(ifp, na->if_transmit);
if_setcapenablebit(ifp, 0, IFCAP_NETMAP);
}
extern struct cdevsw netmap_cdevsw; /* XXX used in netmap.c, should go elsewhere */
struct cdevsw netmap_cdevsw = {
.d_version = D_VERSION,
.d_name = "netmap",
.d_open = netmap_open,
.d_mmap_single = netmap_mmap_single,
.d_ioctl = freebsd_netmap_ioctl,
.d_poll = freebsd_netmap_poll,
.d_kqfilter = netmap_kqfilter,
.d_close = netmap_close,
};
/*--- end of kqueue support ----*/
/*
* Kernel entry point.
*
* Initialize/finalize the module and return.
*
* Return 0 on success, errno on failure.
*/
static int
netmap_loader(__unused struct module *module, int event, __unused void *arg)
{
int error = 0;
switch (event) {
case MOD_LOAD:
error = netmap_init();
break;
case MOD_UNLOAD:
/*
* if some one is still using netmap,
* then the module can not be unloaded.
*/
if (netmap_use_count) {
nm_prerr("netmap module can not be unloaded - netmap_use_count: %d",
netmap_use_count);
error = EBUSY;
break;
}
netmap_fini();
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
#ifdef DEV_MODULE_ORDERED
/*
* The netmap module contains three drivers: (i) the netmap character device
* driver; (ii) the ptnetmap memdev PCI device driver, (iii) the ptnet PCI
* device driver. The attach() routines of both (ii) and (iii) need the
* lock of the global allocator, and such lock is initialized in netmap_init(),
* which is part of (i).
* Therefore, we make sure that (i) is loaded before (ii) and (iii), using
* the 'order' parameter of driver declaration macros. For (i), we specify
* SI_ORDER_MIDDLE, while higher orders are used with the DRIVER_MODULE_ORDERED
* macros for (ii) and (iii).
*/
DEV_MODULE_ORDERED(netmap, netmap_loader, NULL, SI_ORDER_MIDDLE);
#else /* !DEV_MODULE_ORDERED */
DEV_MODULE(netmap, netmap_loader, NULL);
#endif /* DEV_MODULE_ORDERED */
MODULE_DEPEND(netmap, pci, 1, 1, 1);
MODULE_VERSION(netmap, 1);
/* reduce conditional code */
// linux API, use for the knlist in FreeBSD
/* use a private mutex for the knlist */
diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c
index 901bdd5e1e6e..23a743759bdd 100644
--- a/sys/dev/nfe/if_nfe.c
+++ b/sys/dev/nfe/if_nfe.c
@@ -1,3349 +1,3344 @@
/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
/*-
* Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
* Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
#include <sys/cdefs.h>
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/nfe/if_nfereg.h>
#include <dev/nfe/if_nfevar.h>
MODULE_DEPEND(nfe, pci, 1, 1, 1);
MODULE_DEPEND(nfe, ether, 1, 1, 1);
MODULE_DEPEND(nfe, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
static int nfe_probe(device_t);
static int nfe_attach(device_t);
static int nfe_detach(device_t);
static int nfe_suspend(device_t);
static int nfe_resume(device_t);
static int nfe_shutdown(device_t);
static int nfe_can_use_msix(struct nfe_softc *);
static int nfe_detect_msik9(struct nfe_softc *);
static void nfe_power(struct nfe_softc *);
static int nfe_miibus_readreg(device_t, int, int);
static int nfe_miibus_writereg(device_t, int, int, int);
static void nfe_miibus_statchg(device_t);
static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
static void nfe_set_intr(struct nfe_softc *);
static __inline void nfe_enable_intr(struct nfe_softc *);
static __inline void nfe_disable_intr(struct nfe_softc *);
static int nfe_ioctl(if_t, u_long, caddr_t);
static void nfe_alloc_msix(struct nfe_softc *, int);
static int nfe_intr(void *);
static void nfe_int_task(void *, int);
static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
static int nfe_newbuf(struct nfe_softc *, int);
static int nfe_jnewbuf(struct nfe_softc *, int);
static int nfe_rxeof(struct nfe_softc *, int, int *);
static int nfe_jrxeof(struct nfe_softc *, int, int *);
static void nfe_txeof(struct nfe_softc *);
static int nfe_encap(struct nfe_softc *, struct mbuf **);
static void nfe_setmulti(struct nfe_softc *);
static void nfe_start(if_t);
static void nfe_start_locked(if_t);
static void nfe_watchdog(if_t);
static void nfe_init(void *);
static void nfe_init_locked(void *);
static void nfe_stop(if_t);
static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
static int nfe_ifmedia_upd(if_t);
static void nfe_ifmedia_sts(if_t, struct ifmediareq *);
static void nfe_tick(void *);
static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
static void nfe_sysctl_node(struct nfe_softc *);
static void nfe_stats_clear(struct nfe_softc *);
static void nfe_stats_update(struct nfe_softc *);
static void nfe_set_linkspeed(struct nfe_softc *);
static void nfe_set_wol(struct nfe_softc *);
#ifdef NFE_DEBUG
static int nfedebug = 0;
#define DPRINTF(sc, ...) do { \
if (nfedebug) \
device_printf((sc)->nfe_dev, __VA_ARGS__); \
} while (0)
#define DPRINTFN(sc, n, ...) do { \
if (nfedebug >= (n)) \
device_printf((sc)->nfe_dev, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, ...)
#define DPRINTFN(sc, n, ...)
#endif
#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
/* Tunables. */
static int msi_disable = 0;
static int msix_disable = 0;
static int jumbo_disable = 0;
TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
static device_method_t nfe_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, nfe_probe),
DEVMETHOD(device_attach, nfe_attach),
DEVMETHOD(device_detach, nfe_detach),
DEVMETHOD(device_suspend, nfe_suspend),
DEVMETHOD(device_resume, nfe_resume),
DEVMETHOD(device_shutdown, nfe_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
DEVMETHOD_END
};
static driver_t nfe_driver = {
"nfe",
nfe_methods,
sizeof(struct nfe_softc)
};
DRIVER_MODULE(nfe, pci, nfe_driver, 0, 0);
DRIVER_MODULE(miibus, nfe, miibus_driver, 0, 0);
static struct nfe_type nfe_devs[] = {
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
"NVIDIA nForce MCP Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
"NVIDIA nForce2 MCP2 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
"NVIDIA nForce2 400 MCP4 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
"NVIDIA nForce2 400 MCP5 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
"NVIDIA nForce3 MCP3 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
"NVIDIA nForce3 250 MCP6 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
"NVIDIA nForce3 MCP7 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
"NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
"NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
"NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
"NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
"NVIDIA nForce 430 MCP12 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
"NVIDIA nForce 430 MCP13 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
"NVIDIA nForce MCP55 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
"NVIDIA nForce MCP55 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
"NVIDIA nForce MCP61 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
"NVIDIA nForce MCP61 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
"NVIDIA nForce MCP61 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
"NVIDIA nForce MCP61 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
"NVIDIA nForce MCP65 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
"NVIDIA nForce MCP65 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
"NVIDIA nForce MCP65 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
"NVIDIA nForce MCP65 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
"NVIDIA nForce MCP67 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
"NVIDIA nForce MCP67 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
"NVIDIA nForce MCP67 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
"NVIDIA nForce MCP67 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
"NVIDIA nForce MCP73 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
"NVIDIA nForce MCP73 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
"NVIDIA nForce MCP73 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
"NVIDIA nForce MCP73 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
"NVIDIA nForce MCP77 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
"NVIDIA nForce MCP77 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
"NVIDIA nForce MCP77 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
"NVIDIA nForce MCP77 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
"NVIDIA nForce MCP79 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
"NVIDIA nForce MCP79 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
"NVIDIA nForce MCP79 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
"NVIDIA nForce MCP79 Networking Adapter"},
{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN,
"NVIDIA nForce MCP89 Networking Adapter"},
{0, 0, NULL}
};
/* Probe for supported hardware ID's */
static int
nfe_probe(device_t dev)
{
struct nfe_type *t;
t = nfe_devs;
/* Check for matching PCI DEVICE ID's */
while (t->name != NULL) {
if ((pci_get_vendor(dev) == t->vid_id) &&
(pci_get_device(dev) == t->dev_id)) {
device_set_desc(dev, t->name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
static void
nfe_alloc_msix(struct nfe_softc *sc, int count)
{
int rid;
rid = PCIR_BAR(2);
sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->nfe_msix_res == NULL) {
device_printf(sc->nfe_dev,
"couldn't allocate MSIX table resource\n");
return;
}
rid = PCIR_BAR(3);
sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->nfe_msix_pba_res == NULL) {
device_printf(sc->nfe_dev,
"couldn't allocate MSIX PBA resource\n");
bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
sc->nfe_msix_res);
sc->nfe_msix_res = NULL;
return;
}
if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
if (count == NFE_MSI_MESSAGES) {
if (bootverbose)
device_printf(sc->nfe_dev,
"Using %d MSIX messages\n", count);
sc->nfe_msix = 1;
} else {
if (bootverbose)
device_printf(sc->nfe_dev,
"couldn't allocate MSIX\n");
pci_release_msi(sc->nfe_dev);
bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
PCIR_BAR(3), sc->nfe_msix_pba_res);
bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
PCIR_BAR(2), sc->nfe_msix_res);
sc->nfe_msix_pba_res = NULL;
sc->nfe_msix_res = NULL;
}
}
}
static int
nfe_detect_msik9(struct nfe_softc *sc)
{
static const char *maker = "MSI";
static const char *product = "K9N6PGM2-V2 (MS-7309)";
char *m, *p;
int found;
found = 0;
m = kern_getenv("smbios.planar.maker");
p = kern_getenv("smbios.planar.product");
if (m != NULL && p != NULL) {
if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
found = 1;
}
if (m != NULL)
freeenv(m);
if (p != NULL)
freeenv(p);
return (found);
}
static int
nfe_attach(device_t dev)
{
struct nfe_softc *sc;
if_t ifp;
bus_addr_t dma_addr_max;
int error = 0, i, msic, phyloc, reg, rid;
sc = device_get_softc(dev);
sc->nfe_dev = dev;
mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
pci_enable_busmaster(dev);
rid = PCIR_BAR(0);
sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->nfe_res[0] == NULL) {
device_printf(dev, "couldn't map memory resources\n");
mtx_destroy(&sc->nfe_mtx);
return (ENXIO);
}
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
uint16_t v, width;
v = pci_read_config(dev, reg + 0x08, 2);
/* Change max. read request size to 4096. */
v &= ~(7 << 12);
v |= (5 << 12);
pci_write_config(dev, reg + 0x08, v, 2);
v = pci_read_config(dev, reg + 0x0c, 2);
/* link capability */
v = (v >> 4) & 0x0f;
width = pci_read_config(dev, reg + 0x12, 2);
/* negotiated link width */
width = (width >> 4) & 0x3f;
if (v != width)
device_printf(sc->nfe_dev,
"warning, negotiated width of link(x%d) != "
"max. width of link(x%d)\n", width, v);
}
if (nfe_can_use_msix(sc) == 0) {
device_printf(sc->nfe_dev,
"MSI/MSI-X capability black-listed, will use INTx\n");
msix_disable = 1;
msi_disable = 1;
}
/* Allocate interrupt */
if (msix_disable == 0 || msi_disable == 0) {
if (msix_disable == 0 &&
(msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
nfe_alloc_msix(sc, msic);
if (msi_disable == 0 && sc->nfe_msix == 0 &&
(msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
pci_alloc_msi(dev, &msic) == 0) {
if (msic == NFE_MSI_MESSAGES) {
if (bootverbose)
device_printf(dev,
"Using %d MSI messages\n", msic);
sc->nfe_msi = 1;
} else
pci_release_msi(dev);
}
}
if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
rid = 0;
sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->nfe_irq[0] == NULL) {
device_printf(dev, "couldn't allocate IRQ resources\n");
error = ENXIO;
goto fail;
}
} else {
for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
sc->nfe_irq[i] = bus_alloc_resource_any(dev,
SYS_RES_IRQ, &rid, RF_ACTIVE);
if (sc->nfe_irq[i] == NULL) {
device_printf(dev,
"couldn't allocate IRQ resources for "
"message %d\n", rid);
error = ENXIO;
goto fail;
}
}
/* Map interrupts to vector 0. */
if (sc->nfe_msix != 0) {
NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
} else if (sc->nfe_msi != 0) {
NFE_WRITE(sc, NFE_MSI_MAP0, 0);
NFE_WRITE(sc, NFE_MSI_MAP1, 0);
}
}
/* Set IRQ status/mask register. */
sc->nfe_irq_status = NFE_IRQ_STATUS;
sc->nfe_irq_mask = NFE_IRQ_MASK;
sc->nfe_intrs = NFE_IRQ_WANTED;
sc->nfe_nointrs = 0;
if (sc->nfe_msix != 0) {
sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
sc->nfe_nointrs = NFE_IRQ_WANTED;
} else if (sc->nfe_msi != 0) {
sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
}
sc->nfe_devid = pci_get_device(dev);
sc->nfe_revid = pci_get_revid(dev);
sc->nfe_flags = 0;
switch (sc->nfe_devid) {
case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
break;
case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
break;
case PCI_PRODUCT_NVIDIA_CK804_LAN1:
case PCI_PRODUCT_NVIDIA_CK804_LAN2:
case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
NFE_MIB_V1;
break;
case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
break;
case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
break;
case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
/* XXX flow control */
sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
NFE_CORRECT_MACADDR | NFE_MIB_V3;
break;
case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
case PCI_PRODUCT_NVIDIA_MCP89_LAN:
/* XXX flow control */
sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
break;
case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
NFE_MIB_V2;
break;
}
nfe_power(sc);
/* Check for reversed ethernet address */
if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
sc->nfe_flags |= NFE_CORRECT_MACADDR;
nfe_get_macaddr(sc, sc->eaddr);
/*
* Allocate the parent bus DMA tag appropriate for PCI.
*/
dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
dma_addr_max = NFE_DMA_MAXADDR;
error = bus_dma_tag_create(
bus_get_dma_tag(sc->nfe_dev), /* parent */
1, 0, /* alignment, boundary */
dma_addr_max, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->nfe_parent_tag);
if (error)
goto fail;
ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_gethandle()\n");
- error = ENOSPC;
- goto fail;
- }
/*
* Allocate Tx and Rx rings.
*/
if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
goto fail;
if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
goto fail;
nfe_alloc_jrx_ring(sc, &sc->jrxq);
/* Create sysctl node. */
nfe_sysctl_node(sc);
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, nfe_ioctl);
if_setstartfn(ifp, nfe_start);
if_sethwassist(ifp, 0);
if_setcapabilities(ifp, 0);
if_setinitfn(ifp, nfe_init);
if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1);
if_setsendqready(ifp);
if (sc->nfe_flags & NFE_HW_CSUM) {
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0)
if_setcapabilitiesbit(ifp,
(IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
}
if (pci_find_cap(dev, PCIY_PMG, &reg) == 0)
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/* Do MII setup */
phyloc = MII_PHY_ANY;
if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
if (nfe_detect_msik9(sc) != 0)
phyloc = 0;
}
error = mii_attach(dev, &sc->nfe_miibus, ifp,
(ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts,
BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->eaddr);
NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->nfe_tq);
taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->nfe_dev));
error = 0;
if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
error = bus_setup_intr(dev, sc->nfe_irq[0],
INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
&sc->nfe_intrhand[0]);
} else {
for (i = 0; i < NFE_MSI_MESSAGES; i++) {
error = bus_setup_intr(dev, sc->nfe_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
&sc->nfe_intrhand[i]);
if (error != 0)
break;
}
}
if (error) {
device_printf(dev, "couldn't set up irq\n");
taskqueue_free(sc->nfe_tq);
sc->nfe_tq = NULL;
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
nfe_detach(dev);
return (error);
}
static int
nfe_detach(device_t dev)
{
struct nfe_softc *sc;
if_t ifp;
uint8_t eaddr[ETHER_ADDR_LEN];
int i, rid;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
ifp = sc->nfe_ifp;
#ifdef DEVICE_POLLING
if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
if (device_is_attached(dev)) {
NFE_LOCK(sc);
nfe_stop(ifp);
if_setflagbits(ifp, 0, IFF_UP);
NFE_UNLOCK(sc);
callout_drain(&sc->nfe_stat_ch);
ether_ifdetach(ifp);
}
if (ifp) {
/* restore ethernet address */
if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
for (i = 0; i < ETHER_ADDR_LEN; i++) {
eaddr[i] = sc->eaddr[5 - i];
}
} else
bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
nfe_set_macaddr(sc, eaddr);
if_free(ifp);
}
if (sc->nfe_miibus)
device_delete_child(dev, sc->nfe_miibus);
bus_generic_detach(dev);
if (sc->nfe_tq != NULL) {
taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
taskqueue_free(sc->nfe_tq);
sc->nfe_tq = NULL;
}
for (i = 0; i < NFE_MSI_MESSAGES; i++) {
if (sc->nfe_intrhand[i] != NULL) {
bus_teardown_intr(dev, sc->nfe_irq[i],
sc->nfe_intrhand[i]);
sc->nfe_intrhand[i] = NULL;
}
}
if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
if (sc->nfe_irq[0] != NULL)
bus_release_resource(dev, SYS_RES_IRQ, 0,
sc->nfe_irq[0]);
} else {
for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
if (sc->nfe_irq[i] != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid,
sc->nfe_irq[i]);
sc->nfe_irq[i] = NULL;
}
}
pci_release_msi(dev);
}
if (sc->nfe_msix_pba_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
sc->nfe_msix_pba_res);
sc->nfe_msix_pba_res = NULL;
}
if (sc->nfe_msix_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
sc->nfe_msix_res);
sc->nfe_msix_res = NULL;
}
if (sc->nfe_res[0] != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
sc->nfe_res[0]);
sc->nfe_res[0] = NULL;
}
nfe_free_tx_ring(sc, &sc->txq);
nfe_free_rx_ring(sc, &sc->rxq);
nfe_free_jrx_ring(sc, &sc->jrxq);
if (sc->nfe_parent_tag) {
bus_dma_tag_destroy(sc->nfe_parent_tag);
sc->nfe_parent_tag = NULL;
}
mtx_destroy(&sc->nfe_mtx);
return (0);
}
static int
nfe_suspend(device_t dev)
{
struct nfe_softc *sc;
sc = device_get_softc(dev);
NFE_LOCK(sc);
nfe_stop(sc->nfe_ifp);
nfe_set_wol(sc);
sc->nfe_suspended = 1;
NFE_UNLOCK(sc);
return (0);
}
static int
nfe_resume(device_t dev)
{
struct nfe_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
NFE_LOCK(sc);
nfe_power(sc);
ifp = sc->nfe_ifp;
if (if_getflags(ifp) & IFF_UP)
nfe_init_locked(sc);
sc->nfe_suspended = 0;
NFE_UNLOCK(sc);
return (0);
}
static int
nfe_can_use_msix(struct nfe_softc *sc)
{
static struct msix_blacklist {
char *maker;
char *product;
} msix_blacklists[] = {
{ "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
};
struct msix_blacklist *mblp;
char *maker, *product;
int count, n, use_msix;
/*
* Search base board manufacturer and product name table
* to see this system has a known MSI/MSI-X issue.
*/
maker = kern_getenv("smbios.planar.maker");
product = kern_getenv("smbios.planar.product");
use_msix = 1;
if (maker != NULL && product != NULL) {
count = nitems(msix_blacklists);
mblp = msix_blacklists;
for (n = 0; n < count; n++) {
if (strcmp(maker, mblp->maker) == 0 &&
strcmp(product, mblp->product) == 0) {
use_msix = 0;
break;
}
mblp++;
}
}
if (maker != NULL)
freeenv(maker);
if (product != NULL)
freeenv(product);
return (use_msix);
}
/* Take PHY/NIC out of powerdown, from Linux */
static void
nfe_power(struct nfe_softc *sc)
{
uint32_t pwr;
if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
return;
NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
DELAY(100);
NFE_WRITE(sc, NFE_MAC_RESET, 0);
DELAY(100);
NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
pwr = NFE_READ(sc, NFE_PWR2_CTL);
pwr &= ~NFE_PWR2_WAKEUP_MASK;
if (sc->nfe_revid >= 0xa3 &&
(sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
pwr |= NFE_PWR2_REVA3;
NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
}
static void
nfe_miibus_statchg(device_t dev)
{
struct nfe_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t rxctl, txctl;
sc = device_get_softc(dev);
mii = device_get_softc(sc->nfe_miibus);
ifp = sc->nfe_ifp;
sc->nfe_link = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
case IFM_1000_T:
sc->nfe_link = 1;
break;
default:
break;
}
}
nfe_mac_config(sc, mii);
txctl = NFE_READ(sc, NFE_TX_CTL);
rxctl = NFE_READ(sc, NFE_RX_CTL);
if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
txctl |= NFE_TX_START;
rxctl |= NFE_RX_START;
} else {
txctl &= ~NFE_TX_START;
rxctl &= ~NFE_RX_START;
}
NFE_WRITE(sc, NFE_TX_CTL, txctl);
NFE_WRITE(sc, NFE_RX_CTL, rxctl);
}
static void
nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
{
uint32_t link, misc, phy, seed;
uint32_t val;
NFE_LOCK_ASSERT(sc);
phy = NFE_READ(sc, NFE_PHY_IFACE);
phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
seed = NFE_READ(sc, NFE_RNDSEED);
seed &= ~NFE_SEED_MASK;
misc = NFE_MISC1_MAGIC;
link = NFE_MEDIA_SET;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
phy |= NFE_PHY_HDX; /* half-duplex */
misc |= NFE_MISC1_HDX;
}
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T: /* full-duplex only */
link |= NFE_MEDIA_1000T;
seed |= NFE_SEED_1000T;
phy |= NFE_PHY_1000T;
break;
case IFM_100_TX:
link |= NFE_MEDIA_100TX;
seed |= NFE_SEED_100TX;
phy |= NFE_PHY_100TX;
break;
case IFM_10_T:
link |= NFE_MEDIA_10T;
seed |= NFE_SEED_10T;
break;
}
if ((phy & 0x10000000) != 0) {
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
val = NFE_R1_MAGIC_1000;
else
val = NFE_R1_MAGIC_10_100;
} else
val = NFE_R1_MAGIC_DEFAULT;
NFE_WRITE(sc, NFE_SETUP_R1, val);
NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
NFE_WRITE(sc, NFE_PHY_IFACE, phy);
NFE_WRITE(sc, NFE_MISC1, misc);
NFE_WRITE(sc, NFE_LINKSPEED, link);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
/* It seems all hardwares supports Rx pause frames. */
val = NFE_READ(sc, NFE_RXFILTER);
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
val |= NFE_PFF_RX_PAUSE;
else
val &= ~NFE_PFF_RX_PAUSE;
NFE_WRITE(sc, NFE_RXFILTER, val);
if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
val = NFE_READ(sc, NFE_MISC1);
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_TXPAUSE) != 0) {
NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
NFE_TX_PAUSE_FRAME_ENABLE);
val |= NFE_MISC1_TX_PAUSE;
} else {
val &= ~NFE_MISC1_TX_PAUSE;
NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
NFE_TX_PAUSE_FRAME_DISABLE);
}
NFE_WRITE(sc, NFE_MISC1, val);
}
} else {
/* disable rx/tx pause frames */
val = NFE_READ(sc, NFE_RXFILTER);
val &= ~NFE_PFF_RX_PAUSE;
NFE_WRITE(sc, NFE_RXFILTER, val);
if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
NFE_TX_PAUSE_FRAME_DISABLE);
val = NFE_READ(sc, NFE_MISC1);
val &= ~NFE_MISC1_TX_PAUSE;
NFE_WRITE(sc, NFE_MISC1, val);
}
}
}
static int
nfe_miibus_readreg(device_t dev, int phy, int reg)
{
struct nfe_softc *sc = device_get_softc(dev);
uint32_t val;
int ntries;
NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
DELAY(100);
}
NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
DELAY(100);
if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
break;
}
if (ntries == NFE_TIMEOUT) {
DPRINTFN(sc, 2, "timeout waiting for PHY\n");
return 0;
}
if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
DPRINTFN(sc, 2, "could not read PHY\n");
return 0;
}
val = NFE_READ(sc, NFE_PHY_DATA);
if (val != 0xffffffff && val != 0)
sc->mii_phyaddr = phy;
DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
return (val);
}
static int
nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct nfe_softc *sc = device_get_softc(dev);
uint32_t ctl;
int ntries;
NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
DELAY(100);
}
NFE_WRITE(sc, NFE_PHY_DATA, val);
ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
NFE_WRITE(sc, NFE_PHY_CTL, ctl);
for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
DELAY(100);
if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
break;
}
#ifdef NFE_DEBUG
if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
device_printf(sc->nfe_dev, "could not write to PHY\n");
#endif
return (0);
}
struct nfe_dmamap_arg {
bus_addr_t nfe_busaddr;
};
static int
nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
{
struct nfe_dmamap_arg ctx;
struct nfe_rx_data *data;
void *desc;
int i, error, descsize;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->desc64;
descsize = sizeof (struct nfe_desc64);
} else {
desc = ring->desc32;
descsize = sizeof (struct nfe_desc32);
}
ring->cur = ring->next = 0;
error = bus_dma_tag_create(sc->nfe_parent_tag,
NFE_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
NFE_RX_RING_COUNT * descsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ring->rx_desc_tag);
if (error != 0) {
device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
goto fail;
}
/* allocate memory to desc */
error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
if (error != 0) {
device_printf(sc->nfe_dev, "could not create desc DMA map\n");
goto fail;
}
if (sc->nfe_flags & NFE_40BIT_ADDR)
ring->desc64 = desc;
else
ring->desc32 = desc;
/* map desc to device visible address space */
ctx.nfe_busaddr = 0;
error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
if (error != 0) {
device_printf(sc->nfe_dev, "could not load desc DMA map\n");
goto fail;
}
ring->physaddr = ctx.nfe_busaddr;
error = bus_dma_tag_create(sc->nfe_parent_tag,
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, 1, /* maxsize, nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ring->rx_data_tag);
if (error != 0) {
device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
goto fail;
}
error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not create Rx DMA spare map\n");
goto fail;
}
/*
* Pre-allocate Rx buffers and populate Rx ring.
*/
for (i = 0; i < NFE_RX_RING_COUNT; i++) {
data = &sc->rxq.data[i];
data->rx_data_map = NULL;
data->m = NULL;
error = bus_dmamap_create(ring->rx_data_tag, 0,
&data->rx_data_map);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not create Rx DMA map\n");
goto fail;
}
}
fail:
return (error);
}
static void
nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
{
struct nfe_dmamap_arg ctx;
struct nfe_rx_data *data;
void *desc;
int i, error, descsize;
if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
return;
if (jumbo_disable != 0) {
device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
sc->nfe_jumbo_disable = 1;
return;
}
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->jdesc64;
descsize = sizeof (struct nfe_desc64);
} else {
desc = ring->jdesc32;
descsize = sizeof (struct nfe_desc32);
}
ring->jcur = ring->jnext = 0;
/* Create DMA tag for jumbo Rx ring. */
error = bus_dma_tag_create(sc->nfe_parent_tag,
NFE_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1, /* nsegments */
NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ring->jrx_desc_tag);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not create jumbo ring DMA tag\n");
goto fail;
}
/* Create DMA tag for jumbo Rx buffers. */
error = bus_dma_tag_create(sc->nfe_parent_tag,
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUM9BYTES, /* maxsize */
1, /* nsegments */
MJUM9BYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ring->jrx_data_tag);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not create jumbo Rx buffer DMA tag\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not allocate DMA'able memory for jumbo Rx ring\n");
goto fail;
}
if (sc->nfe_flags & NFE_40BIT_ADDR)
ring->jdesc64 = desc;
else
ring->jdesc32 = desc;
ctx.nfe_busaddr = 0;
error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not load DMA'able memory for jumbo Rx ring\n");
goto fail;
}
ring->jphysaddr = ctx.nfe_busaddr;
/* Create DMA maps for jumbo Rx buffers. */
error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not create jumbo Rx DMA spare map\n");
goto fail;
}
for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
data = &sc->jrxq.jdata[i];
data->rx_data_map = NULL;
data->m = NULL;
error = bus_dmamap_create(ring->jrx_data_tag, 0,
&data->rx_data_map);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not create jumbo Rx DMA map\n");
goto fail;
}
}
return;
fail:
/*
* Running without jumbo frame support is ok for most cases
* so don't fail on creating dma tag/map for jumbo frame.
*/
nfe_free_jrx_ring(sc, ring);
device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
"resource shortage\n");
sc->nfe_jumbo_disable = 1;
}
static int
nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
{
void *desc;
size_t descsize;
int i;
ring->cur = ring->next = 0;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->desc64;
descsize = sizeof (struct nfe_desc64);
} else {
desc = ring->desc32;
descsize = sizeof (struct nfe_desc32);
}
bzero(desc, descsize * NFE_RX_RING_COUNT);
for (i = 0; i < NFE_RX_RING_COUNT; i++) {
if (nfe_newbuf(sc, i) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static int
nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
{
void *desc;
size_t descsize;
int i;
ring->jcur = ring->jnext = 0;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->jdesc64;
descsize = sizeof (struct nfe_desc64);
} else {
desc = ring->jdesc32;
descsize = sizeof (struct nfe_desc32);
}
bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
if (nfe_jnewbuf(sc, i) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
{
struct nfe_rx_data *data;
void *desc;
int i;
if (sc->nfe_flags & NFE_40BIT_ADDR)
desc = ring->desc64;
else
desc = ring->desc32;
for (i = 0; i < NFE_RX_RING_COUNT; i++) {
data = &ring->data[i];
if (data->rx_data_map != NULL) {
bus_dmamap_destroy(ring->rx_data_tag,
data->rx_data_map);
data->rx_data_map = NULL;
}
if (data->m != NULL) {
m_freem(data->m);
data->m = NULL;
}
}
if (ring->rx_data_tag != NULL) {
if (ring->rx_spare_map != NULL) {
bus_dmamap_destroy(ring->rx_data_tag,
ring->rx_spare_map);
ring->rx_spare_map = NULL;
}
bus_dma_tag_destroy(ring->rx_data_tag);
ring->rx_data_tag = NULL;
}
if (desc != NULL) {
bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
ring->desc64 = NULL;
ring->desc32 = NULL;
}
if (ring->rx_desc_tag != NULL) {
bus_dma_tag_destroy(ring->rx_desc_tag);
ring->rx_desc_tag = NULL;
}
}
static void
nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
{
struct nfe_rx_data *data;
void *desc;
int i;
if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
return;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->jdesc64;
} else {
desc = ring->jdesc32;
}
for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
data = &ring->jdata[i];
if (data->rx_data_map != NULL) {
bus_dmamap_destroy(ring->jrx_data_tag,
data->rx_data_map);
data->rx_data_map = NULL;
}
if (data->m != NULL) {
m_freem(data->m);
data->m = NULL;
}
}
if (ring->jrx_data_tag != NULL) {
if (ring->jrx_spare_map != NULL) {
bus_dmamap_destroy(ring->jrx_data_tag,
ring->jrx_spare_map);
ring->jrx_spare_map = NULL;
}
bus_dma_tag_destroy(ring->jrx_data_tag);
ring->jrx_data_tag = NULL;
}
if (desc != NULL) {
bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
ring->jdesc64 = NULL;
ring->jdesc32 = NULL;
}
if (ring->jrx_desc_tag != NULL) {
bus_dma_tag_destroy(ring->jrx_desc_tag);
ring->jrx_desc_tag = NULL;
}
}
static int
nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
{
struct nfe_dmamap_arg ctx;
int i, error;
void *desc;
int descsize;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->desc64;
descsize = sizeof (struct nfe_desc64);
} else {
desc = ring->desc32;
descsize = sizeof (struct nfe_desc32);
}
ring->queued = 0;
ring->cur = ring->next = 0;
error = bus_dma_tag_create(sc->nfe_parent_tag,
NFE_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
NFE_TX_RING_COUNT * descsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ring->tx_desc_tag);
if (error != 0) {
device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
if (error != 0) {
device_printf(sc->nfe_dev, "could not create desc DMA map\n");
goto fail;
}
if (sc->nfe_flags & NFE_40BIT_ADDR)
ring->desc64 = desc;
else
ring->desc32 = desc;
ctx.nfe_busaddr = 0;
error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
if (error != 0) {
device_printf(sc->nfe_dev, "could not load desc DMA map\n");
goto fail;
}
ring->physaddr = ctx.nfe_busaddr;
error = bus_dma_tag_create(sc->nfe_parent_tag,
1, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
NFE_TSO_MAXSIZE,
NFE_MAX_SCATTER,
NFE_TSO_MAXSGSIZE,
0,
NULL, NULL,
&ring->tx_data_tag);
if (error != 0) {
device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
goto fail;
}
for (i = 0; i < NFE_TX_RING_COUNT; i++) {
error = bus_dmamap_create(ring->tx_data_tag, 0,
&ring->data[i].tx_data_map);
if (error != 0) {
device_printf(sc->nfe_dev,
"could not create Tx DMA map\n");
goto fail;
}
}
fail:
return (error);
}
static void
nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
{
void *desc;
size_t descsize;
sc->nfe_force_tx = 0;
ring->queued = 0;
ring->cur = ring->next = 0;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->desc64;
descsize = sizeof (struct nfe_desc64);
} else {
desc = ring->desc32;
descsize = sizeof (struct nfe_desc32);
}
bzero(desc, descsize * NFE_TX_RING_COUNT);
bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
{
struct nfe_tx_data *data;
void *desc;
int i;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc = ring->desc64;
} else {
desc = ring->desc32;
}
for (i = 0; i < NFE_TX_RING_COUNT; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
m_freem(data->m);
data->m = NULL;
}
if (data->tx_data_map != NULL) {
bus_dmamap_destroy(ring->tx_data_tag,
data->tx_data_map);
data->tx_data_map = NULL;
}
}
if (ring->tx_data_tag != NULL) {
bus_dma_tag_destroy(ring->tx_data_tag);
ring->tx_data_tag = NULL;
}
if (desc != NULL) {
bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
ring->desc64 = NULL;
ring->desc32 = NULL;
bus_dma_tag_destroy(ring->tx_desc_tag);
ring->tx_desc_tag = NULL;
}
}
#ifdef DEVICE_POLLING
static poll_handler_t nfe_poll;
static int
nfe_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct nfe_softc *sc = if_getsoftc(ifp);
uint32_t r;
int rx_npkts = 0;
NFE_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
NFE_UNLOCK(sc);
return (rx_npkts);
}
if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
else
rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
nfe_txeof(sc);
if (!if_sendq_empty(ifp))
nfe_start_locked(ifp);
if (cmd == POLL_AND_CHECK_STATUS) {
if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
NFE_UNLOCK(sc);
return (rx_npkts);
}
NFE_WRITE(sc, sc->nfe_irq_status, r);
if (r & NFE_IRQ_LINK) {
NFE_READ(sc, NFE_PHY_STATUS);
NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
DPRINTF(sc, "link state changed\n");
}
}
NFE_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
nfe_set_intr(struct nfe_softc *sc)
{
if (sc->nfe_msi != 0)
NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
}
/* In MSIX, a write to mask reegisters behaves as XOR. */
static __inline void
nfe_enable_intr(struct nfe_softc *sc)
{
if (sc->nfe_msix != 0) {
/* XXX Should have a better way to enable interrupts! */
if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
} else
NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
}
static __inline void
nfe_disable_intr(struct nfe_softc *sc)
{
if (sc->nfe_msix != 0) {
/* XXX Should have a better way to disable interrupts! */
if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
} else
NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
}
static int
nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct nfe_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error, init, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *) data;
error = 0;
init = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
(sc->nfe_jumbo_disable != 0)) &&
ifr->ifr_mtu > ETHERMTU)
error = EINVAL;
else {
NFE_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nfe_init_locked(sc);
}
NFE_UNLOCK(sc);
}
}
break;
case SIOCSIFFLAGS:
NFE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
/*
* If only the PROMISC or ALLMULTI flag changes, then
* don't do a full re-init of the chip, just update
* the Rx filter.
*/
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
((if_getflags(ifp) ^ sc->nfe_if_flags) &
(IFF_ALLMULTI | IFF_PROMISC)) != 0)
nfe_setmulti(sc);
else
nfe_init_locked(sc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
nfe_stop(ifp);
}
sc->nfe_if_flags = if_getflags(ifp);
NFE_UNLOCK(sc);
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
NFE_LOCK(sc);
nfe_setmulti(sc);
NFE_UNLOCK(sc);
error = 0;
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->nfe_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if ((mask & IFCAP_POLLING) != 0) {
if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
error = ether_poll_register(nfe_poll, ifp);
if (error)
break;
NFE_LOCK(sc);
nfe_disable_intr(sc);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
NFE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupt even in error case */
NFE_LOCK(sc);
nfe_enable_intr(sc);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
NFE_UNLOCK(sc);
}
}
#endif /* DEVICE_POLLING */
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
init++;
}
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
init++;
}
/*
* XXX
* It seems that VLAN stripping requires Rx checksum offload.
* Unfortunately FreeBSD has no way to disable only Rx side
* VLAN stripping. So when we know Rx checksum offload is
* disabled turn entire hardware VLAN assist off.
*/
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) {
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
init++;
if_setcapenablebit(ifp, 0,
(IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO));
}
if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nfe_init(sc);
}
if_vlancap(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static int
nfe_intr(void *arg)
{
struct nfe_softc *sc;
uint32_t status;
sc = (struct nfe_softc *)arg;
status = NFE_READ(sc, sc->nfe_irq_status);
if (status == 0 || status == 0xffffffff)
return (FILTER_STRAY);
nfe_disable_intr(sc);
taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
return (FILTER_HANDLED);
}
static void
nfe_int_task(void *arg, int pending)
{
struct nfe_softc *sc = arg;
if_t ifp = sc->nfe_ifp;
uint32_t r;
int domore;
NFE_LOCK(sc);
if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
nfe_enable_intr(sc);
NFE_UNLOCK(sc);
return; /* not for us */
}
NFE_WRITE(sc, sc->nfe_irq_status, r);
DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
NFE_UNLOCK(sc);
return;
}
#endif
if (r & NFE_IRQ_LINK) {
NFE_READ(sc, NFE_PHY_STATUS);
NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
DPRINTF(sc, "link state changed\n");
}
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
NFE_UNLOCK(sc);
nfe_disable_intr(sc);
return;
}
domore = 0;
/* check Rx ring */
if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
else
domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
/* check Tx ring */
nfe_txeof(sc);
if (!if_sendq_empty(ifp))
nfe_start_locked(ifp);
NFE_UNLOCK(sc);
if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
return;
}
/* Reenable interrupts. */
nfe_enable_intr(sc);
}
static __inline void
nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
{
struct nfe_desc32 *desc32;
struct nfe_desc64 *desc64;
struct nfe_rx_data *data;
struct mbuf *m;
data = &sc->rxq.data[idx];
m = data->m;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->rxq.desc64[idx];
/* VLAN packet may have overwritten it. */
desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
desc64->length = htole16(m->m_len);
desc64->flags = htole16(NFE_RX_READY);
} else {
desc32 = &sc->rxq.desc32[idx];
desc32->length = htole16(m->m_len);
desc32->flags = htole16(NFE_RX_READY);
}
}
static __inline void
nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
{
struct nfe_desc32 *desc32;
struct nfe_desc64 *desc64;
struct nfe_rx_data *data;
struct mbuf *m;
data = &sc->jrxq.jdata[idx];
m = data->m;
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->jrxq.jdesc64[idx];
/* VLAN packet may have overwritten it. */
desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
desc64->length = htole16(m->m_len);
desc64->flags = htole16(NFE_RX_READY);
} else {
desc32 = &sc->jrxq.jdesc32[idx];
desc32->length = htole16(m->m_len);
desc32->flags = htole16(NFE_RX_READY);
}
}
static int
nfe_newbuf(struct nfe_softc *sc, int idx)
{
struct nfe_rx_data *data;
struct nfe_desc32 *desc32;
struct nfe_desc64 *desc64;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, ETHER_ALIGN);
if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
data = &sc->rxq.data[idx];
if (data->m != NULL) {
bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
}
map = data->rx_data_map;
data->rx_data_map = sc->rxq.rx_spare_map;
sc->rxq.rx_spare_map = map;
bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
BUS_DMASYNC_PREREAD);
data->paddr = segs[0].ds_addr;
data->m = m;
/* update mapping address in h/w descriptor */
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->rxq.desc64[idx];
desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
desc64->length = htole16(segs[0].ds_len);
desc64->flags = htole16(NFE_RX_READY);
} else {
desc32 = &sc->rxq.desc32[idx];
desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
desc32->length = htole16(segs[0].ds_len);
desc32->flags = htole16(NFE_RX_READY);
}
return (0);
}
static int
nfe_jnewbuf(struct nfe_softc *sc, int idx)
{
struct nfe_rx_data *data;
struct nfe_desc32 *desc32;
struct nfe_desc64 *desc64;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_pkthdr.len = m->m_len = MJUM9BYTES;
m_adj(m, ETHER_ALIGN);
if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
data = &sc->jrxq.jdata[idx];
if (data->m != NULL) {
bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
}
map = data->rx_data_map;
data->rx_data_map = sc->jrxq.jrx_spare_map;
sc->jrxq.jrx_spare_map = map;
bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
BUS_DMASYNC_PREREAD);
data->paddr = segs[0].ds_addr;
data->m = m;
/* update mapping address in h/w descriptor */
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->jrxq.jdesc64[idx];
desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
desc64->length = htole16(segs[0].ds_len);
desc64->flags = htole16(NFE_RX_READY);
} else {
desc32 = &sc->jrxq.jdesc32[idx];
desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
desc32->length = htole16(segs[0].ds_len);
desc32->flags = htole16(NFE_RX_READY);
}
return (0);
}
static int
nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
{
if_t ifp = sc->nfe_ifp;
struct nfe_desc32 *desc32;
struct nfe_desc64 *desc64;
struct nfe_rx_data *data;
struct mbuf *m;
uint16_t flags;
int len, prog, rx_npkts;
uint32_t vtag = 0;
rx_npkts = 0;
NFE_LOCK_ASSERT(sc);
bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
BUS_DMASYNC_POSTREAD);
for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
if (count <= 0)
break;
count--;
data = &sc->rxq.data[sc->rxq.cur];
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->rxq.desc64[sc->rxq.cur];
vtag = le32toh(desc64->physaddr[1]);
flags = le16toh(desc64->flags);
len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
} else {
desc32 = &sc->rxq.desc32[sc->rxq.cur];
flags = le16toh(desc32->flags);
len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
}
if (flags & NFE_RX_READY)
break;
prog++;
if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
if (!(flags & NFE_RX_VALID_V1)) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
nfe_discard_rxbuf(sc, sc->rxq.cur);
continue;
}
if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
flags &= ~NFE_RX_ERROR;
len--; /* fix buffer length */
}
} else {
if (!(flags & NFE_RX_VALID_V2)) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
nfe_discard_rxbuf(sc, sc->rxq.cur);
continue;
}
if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
flags &= ~NFE_RX_ERROR;
len--; /* fix buffer length */
}
}
if (flags & NFE_RX_ERROR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
nfe_discard_rxbuf(sc, sc->rxq.cur);
continue;
}
m = data->m;
if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
nfe_discard_rxbuf(sc, sc->rxq.cur);
continue;
}
if ((vtag & NFE_RX_VTAG) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag = vtag & 0xffff;
m->m_flags |= M_VLANTAG;
}
m->m_pkthdr.len = m->m_len = len;
m->m_pkthdr.rcvif = ifp;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
if ((flags & NFE_RX_IP_CSUMOK) != 0) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
(flags & NFE_RX_UDP_CSUMOK) != 0) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
NFE_UNLOCK(sc);
if_input(ifp, m);
NFE_LOCK(sc);
rx_npkts++;
}
if (prog > 0)
bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (rx_npktsp != NULL)
*rx_npktsp = rx_npkts;
return (count > 0 ? 0 : EAGAIN);
}
static int
nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
{
if_t ifp = sc->nfe_ifp;
struct nfe_desc32 *desc32;
struct nfe_desc64 *desc64;
struct nfe_rx_data *data;
struct mbuf *m;
uint16_t flags;
int len, prog, rx_npkts;
uint32_t vtag = 0;
rx_npkts = 0;
NFE_LOCK_ASSERT(sc);
bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
BUS_DMASYNC_POSTREAD);
for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
vtag = 0) {
if (count <= 0)
break;
count--;
data = &sc->jrxq.jdata[sc->jrxq.jcur];
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
vtag = le32toh(desc64->physaddr[1]);
flags = le16toh(desc64->flags);
len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
} else {
desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
flags = le16toh(desc32->flags);
len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
}
if (flags & NFE_RX_READY)
break;
prog++;
if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
if (!(flags & NFE_RX_VALID_V1)) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
continue;
}
if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
flags &= ~NFE_RX_ERROR;
len--; /* fix buffer length */
}
} else {
if (!(flags & NFE_RX_VALID_V2)) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
continue;
}
if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
flags &= ~NFE_RX_ERROR;
len--; /* fix buffer length */
}
}
if (flags & NFE_RX_ERROR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
continue;
}
m = data->m;
if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
continue;
}
if ((vtag & NFE_RX_VTAG) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag = vtag & 0xffff;
m->m_flags |= M_VLANTAG;
}
m->m_pkthdr.len = m->m_len = len;
m->m_pkthdr.rcvif = ifp;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
if ((flags & NFE_RX_IP_CSUMOK) != 0) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
(flags & NFE_RX_UDP_CSUMOK) != 0) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
NFE_UNLOCK(sc);
if_input(ifp, m);
NFE_LOCK(sc);
rx_npkts++;
}
if (prog > 0)
bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (rx_npktsp != NULL)
*rx_npktsp = rx_npkts;
return (count > 0 ? 0 : EAGAIN);
}
static void
nfe_txeof(struct nfe_softc *sc)
{
if_t ifp = sc->nfe_ifp;
struct nfe_desc32 *desc32;
struct nfe_desc64 *desc64;
struct nfe_tx_data *data = NULL;
uint16_t flags;
int cons, prog;
NFE_LOCK_ASSERT(sc);
bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
BUS_DMASYNC_POSTREAD);
prog = 0;
for (cons = sc->txq.next; cons != sc->txq.cur;
NFE_INC(cons, NFE_TX_RING_COUNT)) {
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->txq.desc64[cons];
flags = le16toh(desc64->flags);
} else {
desc32 = &sc->txq.desc32[cons];
flags = le16toh(desc32->flags);
}
if (flags & NFE_TX_VALID)
break;
prog++;
sc->txq.queued--;
data = &sc->txq.data[cons];
if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
if ((flags & NFE_TX_LASTFRAG_V1) == 0)
continue;
if ((flags & NFE_TX_ERROR_V1) != 0) {
device_printf(sc->nfe_dev,
"tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
} else {
if ((flags & NFE_TX_LASTFRAG_V2) == 0)
continue;
if ((flags & NFE_TX_ERROR_V2) != 0) {
device_printf(sc->nfe_dev,
"tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
/* last fragment of the mbuf chain transmitted */
KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
m_freem(data->m);
data->m = NULL;
}
if (prog > 0) {
sc->nfe_force_tx = 0;
sc->txq.next = cons;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (sc->txq.queued == 0)
sc->nfe_watchdog_timer = 0;
}
}
static int
nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
{
struct nfe_desc32 *desc32 = NULL;
struct nfe_desc64 *desc64 = NULL;
bus_dmamap_t map;
bus_dma_segment_t segs[NFE_MAX_SCATTER];
int error, i, nsegs, prod, si;
uint32_t tsosegsz;
uint16_t cflags, flags;
struct mbuf *m;
prod = si = sc->txq.cur;
map = sc->txq.data[prod].tx_data_map;
error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
*m_head, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
bus_dmamap_unload(sc->txq.tx_data_tag, map);
return (ENOBUFS);
}
m = *m_head;
cflags = flags = 0;
tsosegsz = 0;
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
NFE_TX_TSO_SHIFT;
cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
cflags |= NFE_TX_TSO;
} else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
cflags |= NFE_TX_IP_CSUM;
if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
cflags |= NFE_TX_TCP_UDP_CSUM;
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
cflags |= NFE_TX_TCP_UDP_CSUM;
}
for (i = 0; i < nsegs; i++) {
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64 = &sc->txq.desc64[prod];
desc64->physaddr[0] =
htole32(NFE_ADDR_HI(segs[i].ds_addr));
desc64->physaddr[1] =
htole32(NFE_ADDR_LO(segs[i].ds_addr));
desc64->vtag = 0;
desc64->length = htole16(segs[i].ds_len - 1);
desc64->flags = htole16(flags);
} else {
desc32 = &sc->txq.desc32[prod];
desc32->physaddr =
htole32(NFE_ADDR_LO(segs[i].ds_addr));
desc32->length = htole16(segs[i].ds_len - 1);
desc32->flags = htole16(flags);
}
/*
* Setting of the valid bit in the first descriptor is
* deferred until the whole chain is fully setup.
*/
flags |= NFE_TX_VALID;
sc->txq.queued++;
NFE_INC(prod, NFE_TX_RING_COUNT);
}
/*
* the whole mbuf chain has been DMA mapped, fix last/first descriptor.
* csum flags, vtag and TSO belong to the first fragment only.
*/
if (sc->nfe_flags & NFE_40BIT_ADDR) {
desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
desc64 = &sc->txq.desc64[si];
if ((m->m_flags & M_VLANTAG) != 0)
desc64->vtag = htole32(NFE_TX_VTAG |
m->m_pkthdr.ether_vtag);
if (tsosegsz != 0) {
/*
* XXX
* The following indicates the descriptor element
* is a 32bit quantity.
*/
desc64->length |= htole16((uint16_t)tsosegsz);
desc64->flags |= htole16(tsosegsz >> 16);
}
/*
* finally, set the valid/checksum/TSO bit in the first
* descriptor.
*/
desc64->flags |= htole16(NFE_TX_VALID | cflags);
} else {
if (sc->nfe_flags & NFE_JUMBO_SUP)
desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
else
desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
desc32 = &sc->txq.desc32[si];
if (tsosegsz != 0) {
/*
* XXX
* The following indicates the descriptor element
* is a 32bit quantity.
*/
desc32->length |= htole16((uint16_t)tsosegsz);
desc32->flags |= htole16(tsosegsz >> 16);
}
/*
* finally, set the valid/checksum/TSO bit in the first
* descriptor.
*/
desc32->flags |= htole16(NFE_TX_VALID | cflags);
}
sc->txq.cur = prod;
prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
sc->txq.data[prod].tx_data_map = map;
sc->txq.data[prod].m = m;
bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
return (0);
}
struct nfe_hash_maddr_ctx {
uint8_t addr[ETHER_ADDR_LEN];
uint8_t mask[ETHER_ADDR_LEN];
};
static u_int
nfe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct nfe_hash_maddr_ctx *ctx = arg;
uint8_t *addrp, mcaddr;
int j;
addrp = LLADDR(sdl);
for (j = 0; j < ETHER_ADDR_LEN; j++) {
mcaddr = addrp[j];
ctx->addr[j] &= mcaddr;
ctx->mask[j] &= ~mcaddr;
}
return (1);
}
static void
nfe_setmulti(struct nfe_softc *sc)
{
if_t ifp = sc->nfe_ifp;
struct nfe_hash_maddr_ctx ctx;
uint32_t filter;
uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
int i;
NFE_LOCK_ASSERT(sc);
if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
bzero(ctx.addr, ETHER_ADDR_LEN);
bzero(ctx.mask, ETHER_ADDR_LEN);
goto done;
}
bcopy(etherbroadcastaddr, ctx.addr, ETHER_ADDR_LEN);
bcopy(etherbroadcastaddr, ctx.mask, ETHER_ADDR_LEN);
if_foreach_llmaddr(ifp, nfe_hash_maddr, &ctx);
for (i = 0; i < ETHER_ADDR_LEN; i++) {
ctx.mask[i] |= ctx.addr[i];
}
done:
ctx.addr[0] |= 0x01; /* make sure multicast bit is set */
NFE_WRITE(sc, NFE_MULTIADDR_HI, ctx.addr[3] << 24 | ctx.addr[2] << 16 |
ctx.addr[1] << 8 | ctx.addr[0]);
NFE_WRITE(sc, NFE_MULTIADDR_LO,
ctx.addr[5] << 8 | ctx.addr[4]);
NFE_WRITE(sc, NFE_MULTIMASK_HI, ctx.mask[3] << 24 | ctx.mask[2] << 16 |
ctx.mask[1] << 8 | ctx.mask[0]);
NFE_WRITE(sc, NFE_MULTIMASK_LO,
ctx.mask[5] << 8 | ctx.mask[4]);
filter = NFE_READ(sc, NFE_RXFILTER);
filter &= NFE_PFF_RX_PAUSE;
filter |= NFE_RXFILTER_MAGIC;
filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
NFE_WRITE(sc, NFE_RXFILTER, filter);
}
static void
nfe_start(if_t ifp)
{
struct nfe_softc *sc = if_getsoftc(ifp);
NFE_LOCK(sc);
nfe_start_locked(ifp);
NFE_UNLOCK(sc);
}
static void
nfe_start_locked(if_t ifp)
{
struct nfe_softc *sc = if_getsoftc(ifp);
struct mbuf *m0;
int enq = 0;
NFE_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || sc->nfe_link == 0)
return;
while (!if_sendq_empty(ifp)) {
m0 = if_dequeue(ifp);
if (m0 == NULL)
break;
if (nfe_encap(sc, &m0) != 0) {
if (m0 == NULL)
break;
if_sendq_prepend(ifp, m0);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
ether_bpf_mtap_if(ifp, m0);
}
if (enq > 0) {
bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* kick Tx */
NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->nfe_watchdog_timer = 5;
}
}
static void
nfe_watchdog(if_t ifp)
{
struct nfe_softc *sc = if_getsoftc(ifp);
if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
return;
/* Check if we've lost Tx completion interrupt. */
nfe_txeof(sc);
if (sc->txq.queued == 0) {
if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
"-- recovering\n");
if (!if_sendq_empty(ifp))
nfe_start_locked(ifp);
return;
}
/* Check if we've lost start Tx command. */
sc->nfe_force_tx++;
if (sc->nfe_force_tx <= 3) {
/*
* If this is the case for watchdog timeout, the following
* code should go to nfe_txeof().
*/
NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
return;
}
sc->nfe_force_tx = 0;
if_printf(ifp, "watchdog timeout\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
nfe_init_locked(sc);
}
static void
nfe_init(void *xsc)
{
struct nfe_softc *sc = xsc;
NFE_LOCK(sc);
nfe_init_locked(sc);
NFE_UNLOCK(sc);
}
static void
nfe_init_locked(void *xsc)
{
struct nfe_softc *sc = xsc;
if_t ifp = sc->nfe_ifp;
struct mii_data *mii;
uint32_t val;
int error;
NFE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->nfe_miibus);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
nfe_stop(ifp);
sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
nfe_init_tx_ring(sc, &sc->txq);
if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
error = nfe_init_jrx_ring(sc, &sc->jrxq);
else
error = nfe_init_rx_ring(sc, &sc->rxq);
if (error != 0) {
device_printf(sc->nfe_dev,
"initialization failed: no memory for rx buffers\n");
nfe_stop(ifp);
return;
}
val = 0;
if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
val |= NFE_MAC_ADDR_INORDER;
NFE_WRITE(sc, NFE_TX_UNK, val);
NFE_WRITE(sc, NFE_STATUS, 0);
if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
sc->rxtxctl = NFE_RXTX_BIT2;
if (sc->nfe_flags & NFE_40BIT_ADDR)
sc->rxtxctl |= NFE_RXTX_V3MAGIC;
else if (sc->nfe_flags & NFE_JUMBO_SUP)
sc->rxtxctl |= NFE_RXTX_V2MAGIC;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
sc->rxtxctl |= NFE_RXTX_RXCSUM;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
DELAY(10);
NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
else
NFE_WRITE(sc, NFE_VTAG_CTL, 0);
NFE_WRITE(sc, NFE_SETUP_R6, 0);
/* set MAC address */
nfe_set_macaddr(sc, if_getlladdr(ifp));
/* tell MAC where rings are in memory */
if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
NFE_ADDR_HI(sc->jrxq.jphysaddr));
NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
NFE_ADDR_LO(sc->jrxq.jphysaddr));
} else {
NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
NFE_ADDR_HI(sc->rxq.physaddr));
NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
NFE_ADDR_LO(sc->rxq.physaddr));
}
NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
NFE_WRITE(sc, NFE_RING_SIZE,
(NFE_RX_RING_COUNT - 1) << 16 |
(NFE_TX_RING_COUNT - 1));
NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
/* force MAC to wakeup */
val = NFE_READ(sc, NFE_PWR_STATE);
if ((val & NFE_PWR_WAKEUP) == 0)
NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
DELAY(10);
val = NFE_READ(sc, NFE_PWR_STATE);
NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
#if 1
/* configure interrupts coalescing/mitigation */
NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
#else
/* no interrupt mitigation: one interrupt per packet */
NFE_WRITE(sc, NFE_IMTIMER, 970);
#endif
NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
/* Disable WOL. */
NFE_WRITE(sc, NFE_WOL_CTL, 0);
sc->rxtxctl &= ~NFE_RXTX_BIT2;
NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
DELAY(10);
NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
/* set Rx filter */
nfe_setmulti(sc);
/* enable Rx */
NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
/* enable Tx */
NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
/* Clear hardware stats. */
nfe_stats_clear(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
nfe_disable_intr(sc);
else
#endif
nfe_set_intr(sc);
nfe_enable_intr(sc); /* enable interrupts */
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->nfe_link = 0;
mii_mediachg(mii);
callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
}
static void
nfe_stop(if_t ifp)
{
struct nfe_softc *sc = if_getsoftc(ifp);
struct nfe_rx_ring *rx_ring;
struct nfe_jrx_ring *jrx_ring;
struct nfe_tx_ring *tx_ring;
struct nfe_rx_data *rdata;
struct nfe_tx_data *tdata;
int i;
NFE_LOCK_ASSERT(sc);
sc->nfe_watchdog_timer = 0;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
callout_stop(&sc->nfe_stat_ch);
/* abort Tx */
NFE_WRITE(sc, NFE_TX_CTL, 0);
/* disable Rx */
NFE_WRITE(sc, NFE_RX_CTL, 0);
/* disable interrupts */
nfe_disable_intr(sc);
sc->nfe_link = 0;
/* free Rx and Tx mbufs still in the queues. */
rx_ring = &sc->rxq;
for (i = 0; i < NFE_RX_RING_COUNT; i++) {
rdata = &rx_ring->data[i];
if (rdata->m != NULL) {
bus_dmamap_sync(rx_ring->rx_data_tag,
rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rx_ring->rx_data_tag,
rdata->rx_data_map);
m_freem(rdata->m);
rdata->m = NULL;
}
}
if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
jrx_ring = &sc->jrxq;
for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
rdata = &jrx_ring->jdata[i];
if (rdata->m != NULL) {
bus_dmamap_sync(jrx_ring->jrx_data_tag,
rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(jrx_ring->jrx_data_tag,
rdata->rx_data_map);
m_freem(rdata->m);
rdata->m = NULL;
}
}
}
tx_ring = &sc->txq;
for (i = 0; i < NFE_RX_RING_COUNT; i++) {
tdata = &tx_ring->data[i];
if (tdata->m != NULL) {
bus_dmamap_sync(tx_ring->tx_data_tag,
tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(tx_ring->tx_data_tag,
tdata->tx_data_map);
m_freem(tdata->m);
tdata->m = NULL;
}
}
/* Update hardware stats. */
nfe_stats_update(sc);
}
static int
nfe_ifmedia_upd(if_t ifp)
{
struct nfe_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
NFE_LOCK(sc);
mii = device_get_softc(sc->nfe_miibus);
mii_mediachg(mii);
NFE_UNLOCK(sc);
return (0);
}
static void
nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct nfe_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
NFE_LOCK(sc);
mii = device_get_softc(sc->nfe_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
NFE_UNLOCK(sc);
}
void
nfe_tick(void *xsc)
{
struct nfe_softc *sc;
struct mii_data *mii;
if_t ifp;
sc = (struct nfe_softc *)xsc;
NFE_LOCK_ASSERT(sc);
ifp = sc->nfe_ifp;
mii = device_get_softc(sc->nfe_miibus);
mii_tick(mii);
nfe_stats_update(sc);
nfe_watchdog(ifp);
callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
}
static int
nfe_shutdown(device_t dev)
{
return (nfe_suspend(dev));
}
static void
nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
{
uint32_t val;
if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
val = NFE_READ(sc, NFE_MACADDR_LO);
addr[0] = (val >> 8) & 0xff;
addr[1] = (val & 0xff);
val = NFE_READ(sc, NFE_MACADDR_HI);
addr[2] = (val >> 24) & 0xff;
addr[3] = (val >> 16) & 0xff;
addr[4] = (val >> 8) & 0xff;
addr[5] = (val & 0xff);
} else {
val = NFE_READ(sc, NFE_MACADDR_LO);
addr[5] = (val >> 8) & 0xff;
addr[4] = (val & 0xff);
val = NFE_READ(sc, NFE_MACADDR_HI);
addr[3] = (val >> 24) & 0xff;
addr[2] = (val >> 16) & 0xff;
addr[1] = (val >> 8) & 0xff;
addr[0] = (val & 0xff);
}
}
static void
nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
{
NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
addr[1] << 8 | addr[0]);
}
/*
* Map a single buffer address.
*/
static void
nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct nfe_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
ctx = (struct nfe_dmamap_arg *)arg;
ctx->nfe_busaddr = segs[0].ds_addr;
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (!arg1)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || !req->newptr)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
NFE_PROC_MAX));
}
#define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
#define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
static void
nfe_sysctl_node(struct nfe_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct nfe_hw_stats *stats;
int error;
stats = &sc->nfe_stats;
ctx = device_get_sysctl_ctx(sc->nfe_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
"max number of Rx events to process");
sc->nfe_process_limit = NFE_PROC_DEFAULT;
error = resource_int_value(device_get_name(sc->nfe_dev),
device_get_unit(sc->nfe_dev), "process_limit",
&sc->nfe_process_limit);
if (error == 0) {
if (sc->nfe_process_limit < NFE_PROC_MIN ||
sc->nfe_process_limit > NFE_PROC_MAX) {
device_printf(sc->nfe_dev,
"process_limit value out of range; "
"using default: %d\n", NFE_PROC_DEFAULT);
sc->nfe_process_limit = NFE_PROC_DEFAULT;
}
}
if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
return;
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NFE statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
child = SYSCTL_CHILDREN(tree);
NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
&stats->rx_frame_errors, "Framing Errors");
NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
&stats->rx_extra_bytes, "Extra Bytes");
NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
&stats->rx_late_cols, "Late Collisions");
NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
&stats->rx_runts, "Runts");
NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
&stats->rx_jumbos, "Jumbos");
NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
&stats->rx_fifo_overuns, "FIFO Overruns");
NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
&stats->rx_crc_errors, "CRC Errors");
NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
&stats->rx_fae, "Frame Alignment Errors");
NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
&stats->rx_len_errors, "Length Errors");
NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
&stats->rx_unicast, "Unicast Frames");
NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
&stats->rx_multicast, "Multicast Frames");
NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
&stats->rx_broadcast, "Broadcast Frames");
if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
&stats->rx_octets, "Octets");
NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
&stats->rx_pause, "Pause frames");
NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
&stats->rx_drops, "Drop frames");
}
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
&stats->tx_octets, "Octets");
NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
&stats->tx_zero_rexmits, "Zero Retransmits");
NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
&stats->tx_one_rexmits, "One Retransmits");
NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
&stats->tx_multi_rexmits, "Multiple Retransmits");
NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
&stats->tx_late_cols, "Late Collisions");
NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
&stats->tx_fifo_underuns, "FIFO Underruns");
NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
&stats->tx_carrier_losts, "Carrier Losts");
NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
&stats->tx_excess_deferals, "Excess Deferrals");
NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
&stats->tx_retry_errors, "Retry Errors");
if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
&stats->tx_deferals, "Deferrals");
NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
&stats->tx_frames, "Frames");
NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
&stats->tx_pause, "Pause Frames");
}
if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
&stats->tx_deferals, "Unicast Frames");
NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
&stats->tx_frames, "Multicast Frames");
NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
&stats->tx_pause, "Broadcast Frames");
}
}
#undef NFE_SYSCTL_STAT_ADD32
#undef NFE_SYSCTL_STAT_ADD64
static void
nfe_stats_clear(struct nfe_softc *sc)
{
int i, mib_cnt;
if ((sc->nfe_flags & NFE_MIB_V1) != 0)
mib_cnt = NFE_NUM_MIB_STATV1;
else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
mib_cnt = NFE_NUM_MIB_STATV2;
else
return;
for (i = 0; i < mib_cnt; i++)
NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
NFE_READ(sc, NFE_TX_UNICAST);
NFE_READ(sc, NFE_TX_MULTICAST);
NFE_READ(sc, NFE_TX_BROADCAST);
}
}
static void
nfe_stats_update(struct nfe_softc *sc)
{
struct nfe_hw_stats *stats;
NFE_LOCK_ASSERT(sc);
if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
return;
stats = &sc->nfe_stats;
stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
}
if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
}
}
static void
nfe_set_linkspeed(struct nfe_softc *sc)
{
struct mii_softc *miisc;
struct mii_data *mii;
int aneg, i, phyno;
NFE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->nfe_miibus);
mii_pollstat(mii);
aneg = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch IFM_SUBTYPE(mii->mii_media_active) {
case IFM_10_T:
case IFM_100_TX:
return;
case IFM_1000_T:
aneg++;
break;
default:
break;
}
}
miisc = LIST_FIRST(&mii->mii_phys);
phyno = miisc->mii_phy;
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
nfe_miibus_writereg(sc->nfe_dev, phyno,
MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
nfe_miibus_writereg(sc->nfe_dev, phyno,
MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
DELAY(1000);
if (aneg != 0) {
/*
* Poll link state until nfe(4) get a 10/100Mbps link.
*/
for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
mii_pollstat(mii);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
== (IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
nfe_mac_config(sc, mii);
return;
default:
break;
}
}
NFE_UNLOCK(sc);
pause("nfelnk", hz);
NFE_LOCK(sc);
}
if (i == MII_ANEGTICKS_GIGE)
device_printf(sc->nfe_dev,
"establishing a link failed, WOL may not work!");
}
/*
* No link, force MAC to have 100Mbps, full-duplex link.
* This is the last resort and may/may not work.
*/
mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
nfe_mac_config(sc, mii);
}
static void
nfe_set_wol(struct nfe_softc *sc)
{
if_t ifp;
uint32_t wolctl;
int pmc;
uint16_t pmstat;
NFE_LOCK_ASSERT(sc);
if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
return;
ifp = sc->nfe_ifp;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
wolctl = NFE_WOL_MAGIC;
else
wolctl = 0;
NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
nfe_set_linkspeed(sc);
if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
NFE_WRITE(sc, NFE_PWR2_CTL,
NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
/* Enable RX. */
NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
NFE_RX_START);
}
/* Request PME if WOL is requested. */
pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
diff --git a/sys/dev/nge/if_nge.c b/sys/dev/nge/if_nge.c
index dc3be913f89b..c7c2b3c99d5a 100644
--- a/sys/dev/nge/if_nge.c
+++ b/sys/dev/nge/if_nge.c
@@ -1,2735 +1,2730 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2001 Wind River Systems
* Copyright (c) 1997, 1998, 1999, 2000, 2001
* Bill Paul <wpaul@bsdi.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* National Semiconductor DP83820/DP83821 gigabit ethernet driver
* for FreeBSD. Datasheets are available from:
*
* http://www.national.com/ds/DP/DP83820.pdf
* http://www.national.com/ds/DP/DP83821.pdf
*
* These chips are used on several low cost gigabit ethernet NICs
* sold by D-Link, Addtron, SMC and Asante. Both parts are
* virtually the same, except the 83820 is a 64-bit/32-bit part,
* while the 83821 is 32-bit only.
*
* Many cards also use National gigE transceivers, such as the
* DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
* contains a full register description that applies to all of these
* components:
*
* http://www.national.com/ds/DP/DP83861.pdf
*
* Written by Bill Paul <wpaul@bsdi.com>
* BSDi Open Source Solutions
*/
/*
* The NatSemi DP83820 and 83821 controllers are enhanced versions
* of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
* and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
* ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
* hardware checksum offload (IPv4 only), VLAN tagging and filtering,
* priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
* matching buffers, one perfect address filter buffer and interrupt
* moderation. The 83820 supports both 64-bit and 32-bit addressing
* and data transfers: the 64-bit support can be toggled on or off
* via software. This affects the size of certain fields in the DMA
* descriptors.
*
* There are two bugs/misfeatures in the 83820/83821 that I have
* discovered so far:
*
* - Receive buffers must be aligned on 64-bit boundaries, which means
* you must resort to copying data in order to fix up the payload
* alignment.
*
* - In order to transmit jumbo frames larger than 8170 bytes, you have
* to turn off transmit checksum offloading, because the chip can't
* compute the checksum on an outgoing frame unless it fits entirely
* within the TX FIFO, which is only 8192 bytes in size. If you have
* TX checksum offload enabled and you transmit attempt to transmit a
* frame larger than 8170 bytes, the transmitter will wedge.
*
* To work around the latter problem, TX checksum offload is disabled
* if the user selects an MTU larger than 8152 (8170 - 18).
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <dev/nge/if_ngereg.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
MODULE_DEPEND(nge, pci, 1, 1, 1);
MODULE_DEPEND(nge, ether, 1, 1, 1);
MODULE_DEPEND(nge, miibus, 1, 1, 1);
#define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
/*
* Various supported device vendors/types and their names.
*/
static const struct nge_type nge_devs[] = {
{ NGE_VENDORID, NGE_DEVICEID,
"National Semiconductor Gigabit Ethernet" },
{ 0, 0, NULL }
};
static int nge_probe(device_t);
static int nge_attach(device_t);
static int nge_detach(device_t);
static int nge_shutdown(device_t);
static int nge_suspend(device_t);
static int nge_resume(device_t);
static __inline void nge_discard_rxbuf(struct nge_softc *, int);
static int nge_newbuf(struct nge_softc *, int);
static int nge_encap(struct nge_softc *, struct mbuf **);
#ifndef __NO_STRICT_ALIGNMENT
static __inline void nge_fixup_rx(struct mbuf *);
#endif
static int nge_rxeof(struct nge_softc *);
static void nge_txeof(struct nge_softc *);
static void nge_intr(void *);
static void nge_tick(void *);
static void nge_stats_update(struct nge_softc *);
static void nge_start(if_t);
static void nge_start_locked(if_t);
static int nge_ioctl(if_t, u_long, caddr_t);
static void nge_init(void *);
static void nge_init_locked(struct nge_softc *);
static int nge_stop_mac(struct nge_softc *);
static void nge_stop(struct nge_softc *);
static void nge_wol(struct nge_softc *);
static void nge_watchdog(struct nge_softc *);
static int nge_mediachange(if_t);
static void nge_mediastatus(if_t, struct ifmediareq *);
static void nge_delay(struct nge_softc *);
static void nge_eeprom_idle(struct nge_softc *);
static void nge_eeprom_putbyte(struct nge_softc *, int);
static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *);
static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int);
static int nge_miibus_readreg(device_t, int, int);
static int nge_miibus_writereg(device_t, int, int, int);
static void nge_miibus_statchg(device_t);
static void nge_rxfilter(struct nge_softc *);
static void nge_reset(struct nge_softc *);
static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int nge_dma_alloc(struct nge_softc *);
static void nge_dma_free(struct nge_softc *);
static int nge_list_rx_init(struct nge_softc *);
static int nge_list_tx_init(struct nge_softc *);
static void nge_sysctl_node(struct nge_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS);
/*
* MII bit-bang glue
*/
static uint32_t nge_mii_bitbang_read(device_t);
static void nge_mii_bitbang_write(device_t, uint32_t);
static const struct mii_bitbang_ops nge_mii_bitbang_ops = {
nge_mii_bitbang_read,
nge_mii_bitbang_write,
{
NGE_MEAR_MII_DATA, /* MII_BIT_MDO */
NGE_MEAR_MII_DATA, /* MII_BIT_MDI */
NGE_MEAR_MII_CLK, /* MII_BIT_MDC */
NGE_MEAR_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
0, /* MII_BIT_DIR_PHY_HOST */
}
};
static device_method_t nge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, nge_probe),
DEVMETHOD(device_attach, nge_attach),
DEVMETHOD(device_detach, nge_detach),
DEVMETHOD(device_shutdown, nge_shutdown),
DEVMETHOD(device_suspend, nge_suspend),
DEVMETHOD(device_resume, nge_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, nge_miibus_readreg),
DEVMETHOD(miibus_writereg, nge_miibus_writereg),
DEVMETHOD(miibus_statchg, nge_miibus_statchg),
DEVMETHOD_END
};
static driver_t nge_driver = {
"nge",
nge_methods,
sizeof(struct nge_softc)
};
DRIVER_MODULE(nge, pci, nge_driver, 0, 0);
DRIVER_MODULE(miibus, nge, miibus_driver, 0, 0);
#define NGE_SETBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, \
CSR_READ_4(sc, reg) | (x))
#define NGE_CLRBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, \
CSR_READ_4(sc, reg) & ~(x))
#define SIO_SET(x) \
CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
#define SIO_CLR(x) \
CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
static void
nge_delay(struct nge_softc *sc)
{
int idx;
for (idx = (300 / 33) + 1; idx > 0; idx--)
CSR_READ_4(sc, NGE_CSR);
}
static void
nge_eeprom_idle(struct nge_softc *sc)
{
int i;
SIO_SET(NGE_MEAR_EE_CSEL);
nge_delay(sc);
SIO_SET(NGE_MEAR_EE_CLK);
nge_delay(sc);
for (i = 0; i < 25; i++) {
SIO_CLR(NGE_MEAR_EE_CLK);
nge_delay(sc);
SIO_SET(NGE_MEAR_EE_CLK);
nge_delay(sc);
}
SIO_CLR(NGE_MEAR_EE_CLK);
nge_delay(sc);
SIO_CLR(NGE_MEAR_EE_CSEL);
nge_delay(sc);
CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
}
/*
* Send a read command and address to the EEPROM, check for ACK.
*/
static void
nge_eeprom_putbyte(struct nge_softc *sc, int addr)
{
int d, i;
d = addr | NGE_EECMD_READ;
/*
* Feed in each bit and stobe the clock.
*/
for (i = 0x400; i; i >>= 1) {
if (d & i) {
SIO_SET(NGE_MEAR_EE_DIN);
} else {
SIO_CLR(NGE_MEAR_EE_DIN);
}
nge_delay(sc);
SIO_SET(NGE_MEAR_EE_CLK);
nge_delay(sc);
SIO_CLR(NGE_MEAR_EE_CLK);
nge_delay(sc);
}
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
nge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest)
{
int i;
uint16_t word = 0;
/* Force EEPROM to idle state. */
nge_eeprom_idle(sc);
/* Enter EEPROM access mode. */
nge_delay(sc);
SIO_CLR(NGE_MEAR_EE_CLK);
nge_delay(sc);
SIO_SET(NGE_MEAR_EE_CSEL);
nge_delay(sc);
/*
* Send address of word we want to read.
*/
nge_eeprom_putbyte(sc, addr);
/*
* Start reading bits from EEPROM.
*/
for (i = 0x8000; i; i >>= 1) {
SIO_SET(NGE_MEAR_EE_CLK);
nge_delay(sc);
if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
word |= i;
nge_delay(sc);
SIO_CLR(NGE_MEAR_EE_CLK);
nge_delay(sc);
}
/* Turn off EEPROM access mode. */
nge_eeprom_idle(sc);
*dest = word;
}
/*
* Read a sequence of words from the EEPROM.
*/
static void
nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt)
{
int i;
uint16_t word = 0, *ptr;
for (i = 0; i < cnt; i++) {
nge_eeprom_getword(sc, off + i, &word);
ptr = (uint16_t *)(dest + (i * 2));
*ptr = word;
}
}
/*
* Read the MII serial port for the MII bit-bang module.
*/
static uint32_t
nge_mii_bitbang_read(device_t dev)
{
struct nge_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
val = CSR_READ_4(sc, NGE_MEAR);
CSR_BARRIER_4(sc, NGE_MEAR,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
/*
* Write the MII serial port for the MII bit-bang module.
*/
static void
nge_mii_bitbang_write(device_t dev, uint32_t val)
{
struct nge_softc *sc;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, NGE_MEAR, val);
CSR_BARRIER_4(sc, NGE_MEAR,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
nge_miibus_readreg(device_t dev, int phy, int reg)
{
struct nge_softc *sc;
int rv;
sc = device_get_softc(dev);
if ((sc->nge_flags & NGE_FLAG_TBI) != 0) {
/* Pretend PHY is at address 0. */
if (phy != 0)
return (0);
switch (reg) {
case MII_BMCR:
reg = NGE_TBI_BMCR;
break;
case MII_BMSR:
/* 83820/83821 has different bit layout for BMSR. */
rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT;
reg = CSR_READ_4(sc, NGE_TBI_BMSR);
if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0)
rv |= BMSR_ACOMP;
if ((reg & NGE_TBIBMSR_LINKSTAT) != 0)
rv |= BMSR_LINK;
return (rv);
case MII_ANAR:
reg = NGE_TBI_ANAR;
break;
case MII_ANLPAR:
reg = NGE_TBI_ANLPAR;
break;
case MII_ANER:
reg = NGE_TBI_ANER;
break;
case MII_EXTSR:
reg = NGE_TBI_ESR;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
default:
device_printf(sc->nge_dev,
"bad phy register read : %d\n", reg);
return (0);
}
return (CSR_READ_4(sc, reg));
}
return (mii_bitbang_readreg(dev, &nge_mii_bitbang_ops, phy, reg));
}
static int
nge_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct nge_softc *sc;
sc = device_get_softc(dev);
if ((sc->nge_flags & NGE_FLAG_TBI) != 0) {
/* Pretend PHY is at address 0. */
if (phy != 0)
return (0);
switch (reg) {
case MII_BMCR:
reg = NGE_TBI_BMCR;
break;
case MII_BMSR:
return (0);
case MII_ANAR:
reg = NGE_TBI_ANAR;
break;
case MII_ANLPAR:
reg = NGE_TBI_ANLPAR;
break;
case MII_ANER:
reg = NGE_TBI_ANER;
break;
case MII_EXTSR:
reg = NGE_TBI_ESR;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
default:
device_printf(sc->nge_dev,
"bad phy register write : %d\n", reg);
return (0);
}
CSR_WRITE_4(sc, reg, data);
return (0);
}
mii_bitbang_writereg(dev, &nge_mii_bitbang_ops, phy, reg, data);
return (0);
}
/*
* media status/link state change handler.
*/
static void
nge_miibus_statchg(device_t dev)
{
struct nge_softc *sc;
struct mii_data *mii;
if_t ifp;
struct nge_txdesc *txd;
uint32_t done, reg, status;
int i;
sc = device_get_softc(dev);
NGE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->nge_miibus);
ifp = sc->nge_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->nge_flags &= ~NGE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
(IFM_AVALID | IFM_ACTIVE)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
case IFM_1000_T:
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
sc->nge_flags |= NGE_FLAG_LINK;
break;
default:
break;
}
}
/* Stop Tx/Rx MACs. */
if (nge_stop_mac(sc) == ETIMEDOUT)
device_printf(sc->nge_dev,
"%s: unable to stop Tx/Rx MAC\n", __func__);
nge_txeof(sc);
nge_rxeof(sc);
if (sc->nge_head != NULL) {
m_freem(sc->nge_head);
sc->nge_head = sc->nge_tail = NULL;
}
/* Release queued frames. */
for (i = 0; i < NGE_TX_RING_CNT; i++) {
txd = &sc->nge_cdata.nge_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->nge_cdata.nge_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->nge_cdata.nge_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
/* Program MAC with resolved speed/duplex. */
if ((sc->nge_flags & NGE_FLAG_LINK) != 0) {
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
NGE_SETBIT(sc, NGE_TX_CFG,
(NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR));
NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
#ifdef notyet
/* Enable flow-control. */
if ((IFM_OPTIONS(mii->mii_media_active) &
(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0)
NGE_SETBIT(sc, NGE_PAUSECSR,
NGE_PAUSECSR_PAUSE_ENB);
#endif
} else {
NGE_CLRBIT(sc, NGE_TX_CFG,
(NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR));
NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB);
}
/* If we have a 1000Mbps link, set the mode_1000 bit. */
reg = CSR_READ_4(sc, NGE_CFG);
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
case IFM_1000_T:
reg |= NGE_CFG_MODE_1000;
break;
default:
reg &= ~NGE_CFG_MODE_1000;
break;
}
CSR_WRITE_4(sc, NGE_CFG, reg);
/* Reset Tx/Rx MAC. */
reg = CSR_READ_4(sc, NGE_CSR);
reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET;
CSR_WRITE_4(sc, NGE_CSR, reg);
/* Check the completion of reset. */
done = 0;
for (i = 0; i < NGE_TIMEOUT; i++) {
DELAY(1);
status = CSR_READ_4(sc, NGE_ISR);
if ((status & NGE_ISR_RX_RESET_DONE) != 0)
done |= NGE_ISR_RX_RESET_DONE;
if ((status & NGE_ISR_TX_RESET_DONE) != 0)
done |= NGE_ISR_TX_RESET_DONE;
if (done ==
(NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE))
break;
}
if (i == NGE_TIMEOUT)
device_printf(sc->nge_dev,
"%s: unable to reset Tx/Rx MAC\n", __func__);
/* Reuse Rx buffer and reset consumer pointer. */
sc->nge_cdata.nge_rx_cons = 0;
/*
* It seems that resetting Rx/Tx MAC results in
* resetting Tx/Rx descriptor pointer registers such
* that reloading Tx/Rx lists address are needed.
*/
CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI,
NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr));
CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO,
NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr));
CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI,
NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr));
CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO,
NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr));
/* Reinitialize Tx buffers. */
nge_list_tx_init(sc);
/* Restart Rx MAC. */
reg = CSR_READ_4(sc, NGE_CSR);
reg |= NGE_CSR_RX_ENABLE;
CSR_WRITE_4(sc, NGE_CSR, reg);
for (i = 0; i < NGE_TIMEOUT; i++) {
if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0)
break;
DELAY(1);
}
if (i == NGE_TIMEOUT)
device_printf(sc->nge_dev,
"%s: unable to restart Rx MAC\n", __func__);
}
/* Data LED off for TBI mode */
if ((sc->nge_flags & NGE_FLAG_TBI) != 0)
CSR_WRITE_4(sc, NGE_GPIO,
CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT);
}
static u_int
nge_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct nge_softc *sc = arg;
uint32_t h;
int bit, index;
/*
* From the 11 bits returned by the crc routine, the top 7
* bits represent the 16-bit word in the mcast hash table
* that needs to be updated, and the lower 4 bits represent
* which bit within that byte needs to be set.
*/
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 21;
index = (h >> 4) & 0x7F;
bit = h & 0xF;
CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + (index * 2));
NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
return (1);
}
static void
nge_rxfilter(struct nge_softc *sc)
{
if_t ifp;
uint32_t i, rxfilt;
NGE_LOCK_ASSERT(sc);
ifp = sc->nge_ifp;
/* Make sure to stop Rx filtering. */
rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL);
rxfilt &= ~NGE_RXFILTCTL_ENABLE;
CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE);
rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS);
rxfilt &= ~NGE_RXFILTCTL_BROAD;
/*
* We don't want to use the hash table for matching unicast
* addresses.
*/
rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH);
/*
* For the NatSemi chip, we have to explicitly enable the
* reception of ARP frames, as well as turn on the 'perfect
* match' filter where we store the station address, otherwise
* we won't receive unicasts meant for this host.
*/
rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT;
/*
* Set the capture broadcast bit to capture broadcast frames.
*/
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxfilt |= NGE_RXFILTCTL_BROAD;
if ((if_getflags(ifp) & IFF_PROMISC) != 0 ||
(if_getflags(ifp) & IFF_ALLMULTI) != 0) {
rxfilt |= NGE_RXFILTCTL_ALLMULTI;
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxfilt |= NGE_RXFILTCTL_ALLPHYS;
goto done;
}
/*
* We have to explicitly enable the multicast hash table
* on the NatSemi chip if we want to use it, which we do.
*/
rxfilt |= NGE_RXFILTCTL_MCHASH;
/* first, zot all the existing hash bits */
for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
}
if_foreach_llmaddr(ifp, nge_write_maddr, sc);
done:
CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
/* Turn the receive filter on. */
rxfilt |= NGE_RXFILTCTL_ENABLE;
CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE);
}
static void
nge_reset(struct nge_softc *sc)
{
uint32_t v;
int i;
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
for (i = 0; i < NGE_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
break;
DELAY(1);
}
if (i == NGE_TIMEOUT)
device_printf(sc->nge_dev, "reset never completed\n");
/* Wait a little while for the chip to get its brains in order. */
DELAY(1000);
/*
* If this is a NetSemi chip, make sure to clear
* PME mode.
*/
CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
CSR_WRITE_4(sc, NGE_CLKRUN, 0);
/* Clear WOL events which may interfere normal Rx filter opertaion. */
CSR_WRITE_4(sc, NGE_WOLCSR, 0);
/*
* Only DP83820 supports 64bits addressing/data transfers and
* 64bit addressing requires different descriptor structures.
* To make it simple, disable 64bit addressing/data transfers.
*/
v = CSR_READ_4(sc, NGE_CFG);
v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB);
CSR_WRITE_4(sc, NGE_CFG, v);
}
/*
* Probe for a NatSemi chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
nge_probe(device_t dev)
{
const struct nge_type *t;
t = nge_devs;
while (t->nge_name != NULL) {
if ((pci_get_vendor(dev) == t->nge_vid) &&
(pci_get_device(dev) == t->nge_did)) {
device_set_desc(dev, t->nge_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
nge_attach(device_t dev)
{
uint8_t eaddr[ETHER_ADDR_LEN];
uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg;
struct nge_softc *sc;
if_t ifp;
int error, i, rid;
error = 0;
sc = device_get_softc(dev);
sc->nge_dev = dev;
NGE_LOCK_INIT(sc, device_get_nameunit(dev));
callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
#ifdef NGE_USEIOSPACE
sc->nge_res_type = SYS_RES_IOPORT;
sc->nge_res_id = PCIR_BAR(0);
#else
sc->nge_res_type = SYS_RES_MEMORY;
sc->nge_res_id = PCIR_BAR(1);
#endif
sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type,
&sc->nge_res_id, RF_ACTIVE);
if (sc->nge_res == NULL) {
if (sc->nge_res_type == SYS_RES_MEMORY) {
sc->nge_res_type = SYS_RES_IOPORT;
sc->nge_res_id = PCIR_BAR(0);
} else {
sc->nge_res_type = SYS_RES_MEMORY;
sc->nge_res_id = PCIR_BAR(1);
}
sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type,
&sc->nge_res_id, RF_ACTIVE);
if (sc->nge_res == NULL) {
device_printf(dev, "couldn't allocate %s resources\n",
sc->nge_res_type == SYS_RES_MEMORY ? "memory" :
"I/O");
NGE_LOCK_DESTROY(sc);
return (ENXIO);
}
}
/* Allocate interrupt */
rid = 0;
sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->nge_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
/* Enable MWI. */
reg = pci_read_config(dev, PCIR_COMMAND, 2);
reg |= PCIM_CMD_MWRICEN;
pci_write_config(dev, PCIR_COMMAND, reg, 2);
/* Reset the adapter. */
nge_reset(sc);
/*
* Get station address from the EEPROM.
*/
nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3);
for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
ea[i] = le16toh(ea[i]);
ea_temp = ea[0];
ea[0] = ea[2];
ea[2] = ea_temp;
bcopy(ea, eaddr, sizeof(eaddr));
if (nge_dma_alloc(sc) != 0) {
error = ENXIO;
goto fail;
}
nge_sysctl_node(sc);
ifp = sc->nge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, nge_ioctl);
if_setstartfn(ifp, nge_start);
if_setinitfn(ifp, nge_init);
if_setsendqlen(ifp, NGE_TX_RING_CNT - 1);
if_setsendqready(ifp);
if_sethwassist(ifp, NGE_CSUM_FEATURES);
if_setcapabilities(ifp, IFCAP_HWCSUM);
/*
* It seems that some hardwares doesn't provide 3.3V auxiliary
* supply(3VAUX) to drive PME such that checking PCI power
* management capability is necessary.
*/
if (pci_find_cap(sc->nge_dev, PCIY_PMG, &i) == 0)
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) {
sc->nge_flags |= NGE_FLAG_TBI;
device_printf(dev, "Using TBI\n");
/* Configure GPIO. */
CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
| NGE_GPIO_GP4_OUT
| NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
| NGE_GPIO_GP3_OUTENB
| NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
}
/*
* Do MII setup.
*/
error = mii_attach(dev, &sc->nge_miibus, ifp, nge_mediachange,
nge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* Hookup IRQ last.
*/
error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, nge_intr, sc, &sc->nge_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
goto fail;
}
fail:
if (error != 0)
nge_detach(dev);
return (error);
}
static int
nge_detach(device_t dev)
{
struct nge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->nge_ifp;
#ifdef DEVICE_POLLING
if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
if (device_is_attached(dev)) {
NGE_LOCK(sc);
sc->nge_flags |= NGE_FLAG_DETACH;
nge_stop(sc);
NGE_UNLOCK(sc);
callout_drain(&sc->nge_stat_ch);
if (ifp != NULL)
ether_ifdetach(ifp);
}
if (sc->nge_miibus != NULL) {
device_delete_child(dev, sc->nge_miibus);
sc->nge_miibus = NULL;
}
bus_generic_detach(dev);
if (sc->nge_intrhand != NULL)
bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
if (sc->nge_irq != NULL)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
if (sc->nge_res != NULL)
bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id,
sc->nge_res);
nge_dma_free(sc);
if (ifp != NULL)
if_free(ifp);
NGE_LOCK_DESTROY(sc);
return (0);
}
struct nge_dmamap_arg {
bus_addr_t nge_busaddr;
};
static void
nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct nge_dmamap_arg *ctx;
if (error != 0)
return;
ctx = arg;
ctx->nge_busaddr = segs[0].ds_addr;
}
static int
nge_dma_alloc(struct nge_softc *sc)
{
struct nge_dmamap_arg ctx;
struct nge_txdesc *txd;
struct nge_rxdesc *rxd;
int error, i;
/* Create parent DMA tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->nge_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->nge_cdata.nge_parent_tag);
if (error != 0) {
device_printf(sc->nge_dev, "failed to create parent DMA tag\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
NGE_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
NGE_TX_RING_SIZE, /* maxsize */
1, /* nsegments */
NGE_TX_RING_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->nge_cdata.nge_tx_ring_tag);
if (error != 0) {
device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n");
goto fail;
}
/* Create tag for Rx ring. */
error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
NGE_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
NGE_RX_RING_SIZE, /* maxsize */
1, /* nsegments */
NGE_RX_RING_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->nge_cdata.nge_rx_ring_tag);
if (error != 0) {
device_printf(sc->nge_dev,
"failed to create Rx ring DMA tag\n");
goto fail;
}
/* Create tag for Tx buffers. */
error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES * NGE_MAXTXSEGS, /* maxsize */
NGE_MAXTXSEGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->nge_cdata.nge_tx_tag);
if (error != 0) {
device_printf(sc->nge_dev, "failed to create Tx DMA tag\n");
goto fail;
}
/* Create tag for Rx buffers. */
error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
NGE_RX_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->nge_cdata.nge_rx_tag);
if (error != 0) {
device_printf(sc->nge_dev, "failed to create Rx DMA tag\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag,
(void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map);
if (error != 0) {
device_printf(sc->nge_dev,
"failed to allocate DMA'able memory for Tx ring\n");
goto fail;
}
ctx.nge_busaddr = 0;
error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag,
sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring,
NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.nge_busaddr == 0) {
device_printf(sc->nge_dev,
"failed to load DMA'able memory for Tx ring\n");
goto fail;
}
sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx ring. */
error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag,
(void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map);
if (error != 0) {
device_printf(sc->nge_dev,
"failed to allocate DMA'able memory for Rx ring\n");
goto fail;
}
ctx.nge_busaddr = 0;
error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag,
sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring,
NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.nge_busaddr == 0) {
device_printf(sc->nge_dev,
"failed to load DMA'able memory for Rx ring\n");
goto fail;
}
sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr;
/* Create DMA maps for Tx buffers. */
for (i = 0; i < NGE_TX_RING_CNT; i++) {
txd = &sc->nge_cdata.nge_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->nge_dev,
"failed to create Tx dmamap\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0,
&sc->nge_cdata.nge_rx_sparemap)) != 0) {
device_printf(sc->nge_dev,
"failed to create spare Rx dmamap\n");
goto fail;
}
for (i = 0; i < NGE_RX_RING_CNT; i++) {
rxd = &sc->nge_cdata.nge_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->nge_dev,
"failed to create Rx dmamap\n");
goto fail;
}
}
fail:
return (error);
}
static void
nge_dma_free(struct nge_softc *sc)
{
struct nge_txdesc *txd;
struct nge_rxdesc *rxd;
int i;
/* Tx ring. */
if (sc->nge_cdata.nge_tx_ring_tag) {
if (sc->nge_rdata.nge_tx_ring_paddr)
bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag,
sc->nge_cdata.nge_tx_ring_map);
if (sc->nge_rdata.nge_tx_ring)
bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag,
sc->nge_rdata.nge_tx_ring,
sc->nge_cdata.nge_tx_ring_map);
sc->nge_rdata.nge_tx_ring = NULL;
sc->nge_rdata.nge_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag);
sc->nge_cdata.nge_tx_ring_tag = NULL;
}
/* Rx ring. */
if (sc->nge_cdata.nge_rx_ring_tag) {
if (sc->nge_rdata.nge_rx_ring_paddr)
bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag,
sc->nge_cdata.nge_rx_ring_map);
if (sc->nge_rdata.nge_rx_ring)
bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag,
sc->nge_rdata.nge_rx_ring,
sc->nge_cdata.nge_rx_ring_map);
sc->nge_rdata.nge_rx_ring = NULL;
sc->nge_rdata.nge_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag);
sc->nge_cdata.nge_rx_ring_tag = NULL;
}
/* Tx buffers. */
if (sc->nge_cdata.nge_tx_tag) {
for (i = 0; i < NGE_TX_RING_CNT; i++) {
txd = &sc->nge_cdata.nge_txdesc[i];
if (txd->tx_dmamap) {
bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag);
sc->nge_cdata.nge_tx_tag = NULL;
}
/* Rx buffers. */
if (sc->nge_cdata.nge_rx_tag) {
for (i = 0; i < NGE_RX_RING_CNT; i++) {
rxd = &sc->nge_cdata.nge_rxdesc[i];
if (rxd->rx_dmamap) {
bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc->nge_cdata.nge_rx_sparemap) {
bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag,
sc->nge_cdata.nge_rx_sparemap);
sc->nge_cdata.nge_rx_sparemap = 0;
}
bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag);
sc->nge_cdata.nge_rx_tag = NULL;
}
if (sc->nge_cdata.nge_parent_tag) {
bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag);
sc->nge_cdata.nge_parent_tag = NULL;
}
}
/*
* Initialize the transmit descriptors.
*/
static int
nge_list_tx_init(struct nge_softc *sc)
{
struct nge_ring_data *rd;
struct nge_txdesc *txd;
bus_addr_t addr;
int i;
sc->nge_cdata.nge_tx_prod = 0;
sc->nge_cdata.nge_tx_cons = 0;
sc->nge_cdata.nge_tx_cnt = 0;
rd = &sc->nge_rdata;
bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT);
for (i = 0; i < NGE_TX_RING_CNT; i++) {
if (i == NGE_TX_RING_CNT - 1)
addr = NGE_TX_RING_ADDR(sc, 0);
else
addr = NGE_TX_RING_ADDR(sc, i + 1);
rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr));
txd = &sc->nge_cdata.nge_txdesc[i];
txd->tx_m = NULL;
}
bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag,
sc->nge_cdata.nge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that
* we arrange the descriptors in a closed ring, so that the last descriptor
* points back to the first.
*/
static int
nge_list_rx_init(struct nge_softc *sc)
{
struct nge_ring_data *rd;
bus_addr_t addr;
int i;
sc->nge_cdata.nge_rx_cons = 0;
sc->nge_head = sc->nge_tail = NULL;
rd = &sc->nge_rdata;
bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT);
for (i = 0; i < NGE_RX_RING_CNT; i++) {
if (nge_newbuf(sc, i) != 0)
return (ENOBUFS);
if (i == NGE_RX_RING_CNT - 1)
addr = NGE_RX_RING_ADDR(sc, 0);
else
addr = NGE_RX_RING_ADDR(sc, i + 1);
rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr));
}
bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag,
sc->nge_cdata.nge_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static __inline void
nge_discard_rxbuf(struct nge_softc *sc, int idx)
{
struct nge_desc *desc;
desc = &sc->nge_rdata.nge_rx_ring[idx];
desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t));
desc->nge_extsts = 0;
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
*/
static int
nge_newbuf(struct nge_softc *sc, int idx)
{
struct nge_desc *desc;
struct nge_rxdesc *rxd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, sizeof(uint64_t));
if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag,
sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc->nge_cdata.nge_rxdesc[idx];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap;
sc->nge_cdata.nge_rx_sparemap = map;
bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
desc = &sc->nge_rdata.nge_rx_ring[idx];
desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr));
desc->nge_cmdsts = htole32(segs[0].ds_len);
desc->nge_extsts = 0;
return (0);
}
#ifndef __NO_STRICT_ALIGNMENT
static __inline void
nge_fixup_rx(struct mbuf *m)
{
int i;
uint16_t *src, *dst;
src = mtod(m, uint16_t *);
dst = src - 1;
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= ETHER_ALIGN;
}
#endif
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static int
nge_rxeof(struct nge_softc *sc)
{
struct mbuf *m;
if_t ifp;
struct nge_desc *cur_rx;
struct nge_rxdesc *rxd;
int cons, prog, rx_npkts, total_len;
uint32_t cmdsts, extsts;
NGE_LOCK_ASSERT(sc);
ifp = sc->nge_ifp;
cons = sc->nge_cdata.nge_rx_cons;
rx_npkts = 0;
bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag,
sc->nge_cdata.nge_rx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (prog = 0; prog < NGE_RX_RING_CNT &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0;
NGE_INC(cons, NGE_RX_RING_CNT)) {
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
}
#endif
cur_rx = &sc->nge_rdata.nge_rx_ring[cons];
cmdsts = le32toh(cur_rx->nge_cmdsts);
extsts = le32toh(cur_rx->nge_extsts);
if ((cmdsts & NGE_CMDSTS_OWN) == 0)
break;
prog++;
rxd = &sc->nge_cdata.nge_rxdesc[cons];
m = rxd->rx_m;
total_len = cmdsts & NGE_CMDSTS_BUFLEN;
if ((cmdsts & NGE_CMDSTS_MORE) != 0) {
if (nge_newbuf(sc, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
if (sc->nge_head != NULL) {
m_freem(sc->nge_head);
sc->nge_head = sc->nge_tail = NULL;
}
nge_discard_rxbuf(sc, cons);
continue;
}
m->m_len = total_len;
if (sc->nge_head == NULL) {
m->m_pkthdr.len = total_len;
sc->nge_head = sc->nge_tail = m;
} else {
m->m_flags &= ~M_PKTHDR;
sc->nge_head->m_pkthdr.len += total_len;
sc->nge_tail->m_next = m;
sc->nge_tail = m;
}
continue;
}
/*
* If an error occurs, update stats, clear the
* status word and leave the mbuf cluster in place:
* it should simply get re-used next time this descriptor
* comes up in the ring.
*/
if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) {
if ((cmdsts & NGE_RXSTAT_RUNT) &&
total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) {
/*
* Work-around hardware bug, accept runt frames
* if its length is larger than or equal to 56.
*/
} else {
/*
* Input error counters are updated by hardware.
*/
if (sc->nge_head != NULL) {
m_freem(sc->nge_head);
sc->nge_head = sc->nge_tail = NULL;
}
nge_discard_rxbuf(sc, cons);
continue;
}
}
/* Try conjure up a replacement mbuf. */
if (nge_newbuf(sc, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
if (sc->nge_head != NULL) {
m_freem(sc->nge_head);
sc->nge_head = sc->nge_tail = NULL;
}
nge_discard_rxbuf(sc, cons);
continue;
}
/* Chain received mbufs. */
if (sc->nge_head != NULL) {
m->m_len = total_len;
m->m_flags &= ~M_PKTHDR;
sc->nge_tail->m_next = m;
m = sc->nge_head;
m->m_pkthdr.len += total_len;
sc->nge_head = sc->nge_tail = NULL;
} else
m->m_pkthdr.len = m->m_len = total_len;
/*
* Ok. NatSemi really screwed up here. This is the
* only gigE chip I know of with alignment constraints
* on receive buffers. RX buffers must be 64-bit aligned.
*/
/*
* By popular demand, ignore the alignment problems
* on the non-strict alignment platform. The performance hit
* incurred due to unaligned accesses is much smaller
* than the hit produced by forcing buffer copies all
* the time, especially with jumbo frames. We still
* need to fix up the alignment everywhere else though.
*/
#ifndef __NO_STRICT_ALIGNMENT
nge_fixup_rx(m);
#endif
m->m_pkthdr.rcvif = ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
/* Do IP checksum checking. */
if ((extsts & NGE_RXEXTSTS_IPPKT) != 0)
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((extsts & NGE_RXEXTSTS_TCPPKT &&
!(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
(extsts & NGE_RXEXTSTS_UDPPKT &&
!(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
/*
* If we received a packet with a vlan tag, pass it
* to vlan_input() instead of ether_input().
*/
if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag =
bswap16(extsts & NGE_RXEXTSTS_VTCI);
m->m_flags |= M_VLANTAG;
}
NGE_UNLOCK(sc);
if_input(ifp, m);
NGE_LOCK(sc);
rx_npkts++;
}
if (prog > 0) {
sc->nge_cdata.nge_rx_cons = cons;
bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag,
sc->nge_cdata.nge_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
return (rx_npkts);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
nge_txeof(struct nge_softc *sc)
{
struct nge_desc *cur_tx;
struct nge_txdesc *txd;
if_t ifp;
uint32_t cmdsts;
int cons, prod;
NGE_LOCK_ASSERT(sc);
ifp = sc->nge_ifp;
cons = sc->nge_cdata.nge_tx_cons;
prod = sc->nge_cdata.nge_tx_prod;
if (cons == prod)
return;
bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag,
sc->nge_cdata.nge_tx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Go through our tx list and free mbufs for those
* frames that have been transmitted.
*/
for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) {
cur_tx = &sc->nge_rdata.nge_tx_ring[cons];
cmdsts = le32toh(cur_tx->nge_cmdsts);
if ((cmdsts & NGE_CMDSTS_OWN) != 0)
break;
sc->nge_cdata.nge_tx_cnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if ((cmdsts & NGE_CMDSTS_MORE) != 0)
continue;
txd = &sc->nge_cdata.nge_txdesc[cons];
bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap);
if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
} else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (cmdsts & NGE_TXSTAT_COLLCNT) >> 16);
KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
__func__));
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
sc->nge_cdata.nge_tx_cons = cons;
if (sc->nge_cdata.nge_tx_cnt == 0)
sc->nge_watchdog_timer = 0;
}
static void
nge_tick(void *xsc)
{
struct nge_softc *sc;
struct mii_data *mii;
sc = xsc;
NGE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->nge_miibus);
mii_tick(mii);
/*
* For PHYs that does not reset established link, it is
* necessary to check whether driver still have a valid
* link(e.g link state change callback is not called).
* Otherwise, driver think it lost link because driver
* initialization routine clears link state flag.
*/
if ((sc->nge_flags & NGE_FLAG_LINK) == 0)
nge_miibus_statchg(sc->nge_dev);
nge_stats_update(sc);
nge_watchdog(sc);
callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc);
}
static void
nge_stats_update(struct nge_softc *sc)
{
if_t ifp;
struct nge_stats now, *stats, *nstats;
NGE_LOCK_ASSERT(sc);
ifp = sc->nge_ifp;
stats = &now;
stats->rx_pkts_errs =
CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF;
stats->rx_crc_errs =
CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF;
stats->rx_fifo_oflows =
CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF;
stats->rx_align_errs =
CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF;
stats->rx_sym_errs =
CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF;
stats->rx_pkts_jumbos =
CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF;
stats->rx_len_errs =
CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF;
stats->rx_unctl_frames =
CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF;
stats->rx_pause =
CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF;
stats->tx_pause =
CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF;
stats->tx_seq_errs =
CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF;
/*
* Since we've accept errored frames exclude Rx length errors.
*/
if_inc_counter(ifp, IFCOUNTER_IERRORS,
stats->rx_pkts_errs + stats->rx_crc_errs +
stats->rx_fifo_oflows + stats->rx_sym_errs);
nstats = &sc->nge_stats;
nstats->rx_pkts_errs += stats->rx_pkts_errs;
nstats->rx_crc_errs += stats->rx_crc_errs;
nstats->rx_fifo_oflows += stats->rx_fifo_oflows;
nstats->rx_align_errs += stats->rx_align_errs;
nstats->rx_sym_errs += stats->rx_sym_errs;
nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos;
nstats->rx_len_errs += stats->rx_len_errs;
nstats->rx_unctl_frames += stats->rx_unctl_frames;
nstats->rx_pause += stats->rx_pause;
nstats->tx_pause += stats->tx_pause;
nstats->tx_seq_errs += stats->tx_seq_errs;
}
#ifdef DEVICE_POLLING
static poll_handler_t nge_poll;
static int
nge_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct nge_softc *sc;
int rx_npkts = 0;
sc = if_getsoftc(ifp);
NGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
NGE_UNLOCK(sc);
return (rx_npkts);
}
/*
* On the nge, reading the status register also clears it.
* So before returning to intr mode we must make sure that all
* possible pending sources of interrupts have been served.
* In practice this means run to completion the *eof routines,
* and then call the interrupt routine.
*/
sc->rxcycles = count;
rx_npkts = nge_rxeof(sc);
nge_txeof(sc);
if (!if_sendq_empty(ifp))
nge_start_locked(ifp);
if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
uint32_t status;
/* Reading the ISR register clears all interrupts. */
status = CSR_READ_4(sc, NGE_ISR);
if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0)
rx_npkts += nge_rxeof(sc);
if ((status & NGE_ISR_RX_IDLE) != 0)
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
if ((status & NGE_ISR_SYSERR) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nge_init_locked(sc);
}
}
NGE_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
nge_intr(void *arg)
{
struct nge_softc *sc;
if_t ifp;
uint32_t status;
sc = (struct nge_softc *)arg;
ifp = sc->nge_ifp;
NGE_LOCK(sc);
if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0)
goto done_locked;
/* Reading the ISR register clears all interrupts. */
status = CSR_READ_4(sc, NGE_ISR);
if (status == 0xffffffff || (status & NGE_INTRS) == 0)
goto done_locked;
#ifdef DEVICE_POLLING
if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
goto done_locked;
#endif
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
goto done_locked;
/* Disable interrupts. */
CSR_WRITE_4(sc, NGE_IER, 0);
/* Data LED on for TBI mode */
if ((sc->nge_flags & NGE_FLAG_TBI) != 0)
CSR_WRITE_4(sc, NGE_GPIO,
CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT);
for (; (status & NGE_INTRS) != 0;) {
if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR |
NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0)
nge_txeof(sc);
if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR |
NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW |
NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0)
nge_rxeof(sc);
if ((status & NGE_ISR_RX_IDLE) != 0)
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
if ((status & NGE_ISR_SYSERR) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nge_init_locked(sc);
}
/* Reading the ISR register clears all interrupts. */
status = CSR_READ_4(sc, NGE_ISR);
}
/* Re-enable interrupts. */
CSR_WRITE_4(sc, NGE_IER, 1);
if (!if_sendq_empty(ifp))
nge_start_locked(ifp);
/* Data LED off for TBI mode */
if ((sc->nge_flags & NGE_FLAG_TBI) != 0)
CSR_WRITE_4(sc, NGE_GPIO,
CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT);
done_locked:
NGE_UNLOCK(sc);
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
nge_encap(struct nge_softc *sc, struct mbuf **m_head)
{
struct nge_txdesc *txd, *txd_last;
struct nge_desc *desc;
struct mbuf *m;
bus_dmamap_t map;
bus_dma_segment_t txsegs[NGE_MAXTXSEGS];
int error, i, nsegs, prod, si;
NGE_LOCK_ASSERT(sc);
m = *m_head;
prod = sc->nge_cdata.nge_tx_prod;
txd = &sc->nge_cdata.nge_txdesc[prod];
txd_last = txd;
map = txd->tx_dmamap;
error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, NGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag,
map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check number of available descriptors. */
if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) {
bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map);
return (ENOBUFS);
}
bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE);
si = prod;
for (i = 0; i < nsegs; i++) {
desc = &sc->nge_rdata.nge_tx_ring[prod];
desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr));
if (i == 0)
desc->nge_cmdsts = htole32(txsegs[i].ds_len |
NGE_CMDSTS_MORE);
else
desc->nge_cmdsts = htole32(txsegs[i].ds_len |
NGE_CMDSTS_MORE | NGE_CMDSTS_OWN);
desc->nge_extsts = 0;
sc->nge_cdata.nge_tx_cnt++;
NGE_INC(prod, NGE_TX_RING_CNT);
}
/* Update producer index. */
sc->nge_cdata.nge_tx_prod = prod;
prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT;
desc = &sc->nge_rdata.nge_tx_ring[prod];
/* Check if we have a VLAN tag to insert. */
if ((m->m_flags & M_VLANTAG) != 0)
desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT |
bswap16(m->m_pkthdr.ether_vtag));
/* Set EOP on the last descriptor. */
desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE);
/* Set checksum offload in the first descriptor. */
desc = &sc->nge_rdata.nge_tx_ring[si];
if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) {
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM);
if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM);
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM);
}
/* Lastly, turn the first descriptor ownership to hardware. */
desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN);
txd = &sc->nge_cdata.nge_txdesc[prod];
map = txd_last->tx_dmamap;
txd_last->tx_dmamap = txd->tx_dmamap;
txd->tx_dmamap = map;
txd->tx_m = m;
return (0);
}
/*
* Main transmit routine. To avoid having to do mbuf copies, we put pointers
* to the mbuf data regions directly in the transmit lists. We also save a
* copy of the pointers since the transmit list fragment pointers are
* physical addresses.
*/
static void
nge_start(if_t ifp)
{
struct nge_softc *sc;
sc = if_getsoftc(ifp);
NGE_LOCK(sc);
nge_start_locked(ifp);
NGE_UNLOCK(sc);
}
static void
nge_start_locked(if_t ifp)
{
struct nge_softc *sc;
struct mbuf *m_head;
int enq;
sc = if_getsoftc(ifp);
NGE_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp) &&
sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (nge_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag,
sc->nge_cdata.nge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Transmit */
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
/* Set a timeout in case the chip goes out to lunch. */
sc->nge_watchdog_timer = 5;
}
}
static void
nge_init(void *xsc)
{
struct nge_softc *sc = xsc;
NGE_LOCK(sc);
nge_init_locked(sc);
NGE_UNLOCK(sc);
}
static void
nge_init_locked(struct nge_softc *sc)
{
if_t ifp = sc->nge_ifp;
struct mii_data *mii;
uint8_t *eaddr;
uint32_t reg;
NGE_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
nge_stop(sc);
/* Reset the adapter. */
nge_reset(sc);
/* Disable Rx filter prior to programming Rx filter. */
CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0);
CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE);
mii = device_get_softc(sc->nge_miibus);
/* Set MAC address. */
eaddr = if_getlladdr(sc->nge_ifp);
CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]);
CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]);
CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]);
/* Init circular RX list. */
if (nge_list_rx_init(sc) == ENOBUFS) {
device_printf(sc->nge_dev, "initialization failed: no "
"memory for rx buffers\n");
nge_stop(sc);
return;
}
/*
* Init tx descriptors.
*/
nge_list_tx_init(sc);
/* Set Rx filter. */
nge_rxfilter(sc);
/* Disable PRIQ ctl. */
CSR_WRITE_4(sc, NGE_PRIOQCTL, 0);
/*
* Set pause frames parameters.
* Rx stat FIFO hi-threshold : 2 or more packets
* Rx stat FIFO lo-threshold : less than 2 packets
* Rx data FIFO hi-threshold : 2K or more bytes
* Rx data FIFO lo-threshold : less than 2K bytes
* pause time : (512ns * 0xffff) -> 33.55ms
*/
CSR_WRITE_4(sc, NGE_PAUSECSR,
NGE_PAUSECSR_PAUSE_ON_MCAST |
NGE_PAUSECSR_PAUSE_ON_DA |
((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) |
((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) |
((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) |
((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) |
NGE_PAUSECSR_CNT);
/*
* Load the address of the RX and TX lists.
*/
CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI,
NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr));
CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO,
NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr));
CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI,
NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr));
CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO,
NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr));
/* Set RX configuration. */
CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0);
/*
* Enable hardware checksum validation for all IPv4
* packets, do not reject packets with bad checksums.
*/
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
/*
* Tell the chip to detect and strip VLAN tag info from
* received frames. The tag will be provided in the extsts
* field in the RX descriptors.
*/
NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB);
/* Set TX configuration. */
CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
/*
* Enable TX IPv4 checksumming on a per-packet basis.
*/
CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
/*
* Tell the chip to insert VLAN tags on a per-packet basis as
* dictated by the code in the frame encapsulation routine.
*/
NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
/*
* Enable the delivery of PHY interrupts based on
* link/speed/duplex status changes. Also enable the
* extsts field in the DMA descriptors (needed for
* TCP/IP checksum offload on transmit).
*/
NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD |
NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB);
/*
* Configure interrupt holdoff (moderation). We can
* have the chip delay interrupt delivery for a certain
* period. Units are in 100us, and the max setting
* is 25500us (0xFF x 100us). Default is a 100us holdoff.
*/
CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff);
/*
* Enable MAC statistics counters and clear.
*/
reg = CSR_READ_4(sc, NGE_MIBCTL);
reg &= ~NGE_MIBCTL_FREEZE_CNT;
reg |= NGE_MIBCTL_CLEAR_CNT;
CSR_WRITE_4(sc, NGE_MIBCTL, reg);
/*
* Enable interrupts.
*/
CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
#ifdef DEVICE_POLLING
/*
* ... only enable interrupts if we are not polling, make sure
* they are off otherwise.
*/
if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
CSR_WRITE_4(sc, NGE_IER, 0);
else
#endif
CSR_WRITE_4(sc, NGE_IER, 1);
sc->nge_flags &= ~NGE_FLAG_LINK;
mii_mediachg(mii);
sc->nge_watchdog_timer = 0;
callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
/*
* Set media options.
*/
static int
nge_mediachange(if_t ifp)
{
struct nge_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
NGE_LOCK(sc);
mii = device_get_softc(sc->nge_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
NGE_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
nge_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct nge_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
NGE_LOCK(sc);
mii = device_get_softc(sc->nge_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
NGE_UNLOCK(sc);
}
static int
nge_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct nge_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int error = 0, mask;
switch (command) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU)
error = EINVAL;
else {
NGE_LOCK(sc);
if_setmtu(ifp, ifr->ifr_mtu);
/*
* Workaround: if the MTU is larger than
* 8152 (TX FIFO size minus 64 minus 18), turn off
* TX checksum offloading.
*/
if (ifr->ifr_mtu >= 8152) {
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
if_sethwassistbits(ifp, 0, NGE_CSUM_FEATURES);
} else {
if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
if_sethwassistbits(ifp, NGE_CSUM_FEATURES, 0);
}
NGE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
}
break;
case SIOCSIFFLAGS:
NGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((if_getflags(ifp) ^ sc->nge_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
nge_rxfilter(sc);
} else {
if ((sc->nge_flags & NGE_FLAG_DETACH) == 0)
nge_init_locked(sc);
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
nge_stop(sc);
}
sc->nge_if_flags = if_getflags(ifp);
NGE_UNLOCK(sc);
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
NGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
nge_rxfilter(sc);
NGE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->nge_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
NGE_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if ((mask & IFCAP_POLLING) != 0 &&
(IFCAP_POLLING & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_POLLING);
if ((IFCAP_POLLING & if_getcapenable(ifp)) != 0) {
error = ether_poll_register(nge_poll, ifp);
if (error != 0) {
NGE_UNLOCK(sc);
break;
}
/* Disable interrupts. */
CSR_WRITE_4(sc, NGE_IER, 0);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
CSR_WRITE_4(sc, NGE_IER, 1);
}
}
#endif /* DEVICE_POLLING */
if ((mask & IFCAP_TXCSUM) != 0 &&
(IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, NGE_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, NGE_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_WOL) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
if ((mask & IFCAP_WOL_UCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_UCAST);
if ((mask & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
}
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((if_getcapenable(ifp) &
IFCAP_VLAN_HWTAGGING) != 0)
NGE_SETBIT(sc,
NGE_VLAN_IP_RXCTL,
NGE_VIPRXCTL_TAG_STRIP_ENB);
else
NGE_CLRBIT(sc,
NGE_VLAN_IP_RXCTL,
NGE_VIPRXCTL_TAG_STRIP_ENB);
}
}
/*
* Both VLAN hardware tagging and checksum offload is
* required to do checksum offload on VLAN interface.
*/
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM);
NGE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
nge_watchdog(struct nge_softc *sc)
{
if_t ifp;
NGE_LOCK_ASSERT(sc);
if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer)
return;
ifp = sc->nge_ifp;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_printf(ifp, "watchdog timeout\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nge_init_locked(sc);
if (!if_sendq_empty(ifp))
nge_start_locked(ifp);
}
static int
nge_stop_mac(struct nge_softc *sc)
{
uint32_t reg;
int i;
NGE_LOCK_ASSERT(sc);
reg = CSR_READ_4(sc, NGE_CSR);
if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) {
reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE);
reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE;
CSR_WRITE_4(sc, NGE_CSR, reg);
for (i = 0; i < NGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_4(sc, NGE_CSR) &
(NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0)
break;
}
if (i == NGE_TIMEOUT)
return (ETIMEDOUT);
}
return (0);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
nge_stop(struct nge_softc *sc)
{
struct nge_txdesc *txd;
struct nge_rxdesc *rxd;
int i;
if_t ifp;
NGE_LOCK_ASSERT(sc);
ifp = sc->nge_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->nge_flags &= ~NGE_FLAG_LINK;
callout_stop(&sc->nge_stat_ch);
sc->nge_watchdog_timer = 0;
CSR_WRITE_4(sc, NGE_IER, 0);
CSR_WRITE_4(sc, NGE_IMR, 0);
if (nge_stop_mac(sc) == ETIMEDOUT)
device_printf(sc->nge_dev,
"%s: unable to stop Tx/Rx MAC\n", __func__);
CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0);
CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0);
CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0);
CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0);
nge_stats_update(sc);
if (sc->nge_head != NULL) {
m_freem(sc->nge_head);
sc->nge_head = sc->nge_tail = NULL;
}
/*
* Free RX and TX mbufs still in the queues.
*/
for (i = 0; i < NGE_RX_RING_CNT; i++) {
rxd = &sc->nge_cdata.nge_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->nge_cdata.nge_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->nge_cdata.nge_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < NGE_TX_RING_CNT; i++) {
txd = &sc->nge_cdata.nge_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->nge_cdata.nge_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->nge_cdata.nge_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
}
/*
* Before setting WOL bits, caller should have stopped Receiver.
*/
static void
nge_wol(struct nge_softc *sc)
{
if_t ifp;
uint32_t reg;
uint16_t pmstat;
int pmc;
NGE_LOCK_ASSERT(sc);
if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) != 0)
return;
ifp = sc->nge_ifp;
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
/* Disable WOL & disconnect CLKRUN to save power. */
CSR_WRITE_4(sc, NGE_WOLCSR, 0);
CSR_WRITE_4(sc, NGE_CLKRUN, 0);
} else {
if (nge_stop_mac(sc) == ETIMEDOUT)
device_printf(sc->nge_dev,
"%s: unable to stop Tx/Rx MAC\n", __func__);
/*
* Make sure wake frames will be buffered in the Rx FIFO.
* (i.e. Silent Rx mode.)
*/
CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0);
CSR_BARRIER_4(sc, NGE_RX_LISTPTR_HI, BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0);
CSR_BARRIER_4(sc, NGE_RX_LISTPTR_LO, BUS_SPACE_BARRIER_WRITE);
/* Enable Rx again. */
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
CSR_BARRIER_4(sc, NGE_CSR, BUS_SPACE_BARRIER_WRITE);
/* Configure WOL events. */
reg = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
reg |= NGE_WOLCSR_WAKE_ON_UNICAST;
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
reg |= NGE_WOLCSR_WAKE_ON_MULTICAST;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT;
CSR_WRITE_4(sc, NGE_WOLCSR, reg);
/* Activate CLKRUN. */
reg = CSR_READ_4(sc, NGE_CLKRUN);
reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB;
CSR_WRITE_4(sc, NGE_CLKRUN, reg);
}
/* Request PME. */
pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
nge_shutdown(device_t dev)
{
return (nge_suspend(dev));
}
static int
nge_suspend(device_t dev)
{
struct nge_softc *sc;
sc = device_get_softc(dev);
NGE_LOCK(sc);
nge_stop(sc);
nge_wol(sc);
sc->nge_flags |= NGE_FLAG_SUSPENDED;
NGE_UNLOCK(sc);
return (0);
}
static int
nge_resume(device_t dev)
{
struct nge_softc *sc;
if_t ifp;
uint16_t pmstat;
int pmc;
sc = device_get_softc(dev);
NGE_LOCK(sc);
ifp = sc->nge_ifp;
if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) == 0) {
/* Disable PME and clear PME status. */
pmstat = pci_read_config(sc->nge_dev,
pmc + PCIR_POWER_STATUS, 2);
if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->nge_dev,
pmc + PCIR_POWER_STATUS, pmstat, 2);
}
}
if (if_getflags(ifp) & IFF_UP) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nge_init_locked(sc);
}
sc->nge_flags &= ~NGE_FLAG_SUSPENDED;
NGE_UNLOCK(sc);
return (0);
}
#define NGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
static void
nge_sysctl_node(struct nge_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct nge_stats *stats;
int error;
ctx = device_get_sysctl_ctx(sc->nge_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->nge_int_holdoff,
0, sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation");
/* Pull in device tunables. */
sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT;
error = resource_int_value(device_get_name(sc->nge_dev),
device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff);
if (error == 0) {
if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN ||
sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) {
device_printf(sc->nge_dev,
"int_holdoff value out of range; "
"using default: %d(%d us)\n",
NGE_INT_HOLDOFF_DEFAULT,
NGE_INT_HOLDOFF_DEFAULT * 100);
sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT;
}
}
stats = &sc->nge_stats;
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NGE statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
child = SYSCTL_CHILDREN(tree);
NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs",
&stats->rx_pkts_errs,
"Packet errors including both wire errors and FIFO overruns");
NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
&stats->rx_crc_errs, "CRC errors");
NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
&stats->rx_fifo_oflows, "FIFO overflows");
NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
&stats->rx_align_errs, "Frame alignment errors");
NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
&stats->rx_sym_errs, "One or more symbol errors");
NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos",
&stats->rx_pkts_jumbos,
"Packets received with length greater than 1518 bytes");
NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
&stats->rx_len_errs, "In Range Length errors");
NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames",
&stats->rx_unctl_frames, "Control frames with unsupported opcode");
NGE_SYSCTL_STAT_ADD32(ctx, child, "pause",
&stats->rx_pause, "Pause frames");
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
NGE_SYSCTL_STAT_ADD32(ctx, child, "pause",
&stats->tx_pause, "Pause frames");
NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs",
&stats->tx_seq_errs,
"Loss of collision heartbeat during transmission");
}
#undef NGE_SYSCTL_STAT_ADD32
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN,
NGE_INT_HOLDOFF_MAX));
}
diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c
index 5a132baa5f1c..2334265cefa7 100644
--- a/sys/dev/ntb/if_ntb/if_ntb.c
+++ b/sys/dev/ntb/if_ntb/if_ntb.c
@@ -1,509 +1,505 @@
/*-
* Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
* Copyright (C) 2013 Intel Corporation
* Copyright (C) 2015 EMC Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* The Non-Transparent Bridge (NTB) is a device that allows you to connect
* two or more systems using a PCI-e links, providing remote memory access.
*
* This module contains a driver for simulated Ethernet device, using
* underlying NTB Transport device.
*
* NOTE: Much of the code in this module is shared with Linux. Any patches may
* be picked up and redistributed in Linux with a dual GPL/BSD license.
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/buf_ring.h>
#include <sys/bus.h>
#include <sys/ktr.h>
#include <sys/limits.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_var.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <machine/bus.h>
#include "../ntb_transport.h"
#define KTR_NTB KTR_SPARE3
#define NTB_MEDIATYPE (IFM_ETHER | IFM_AUTO | IFM_FDX)
#define NTB_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)
#define NTB_CSUM_FEATURES6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6)
#define NTB_CSUM_SET (CSUM_DATA_VALID | CSUM_DATA_VALID_IPV6 | \
CSUM_PSEUDO_HDR | \
CSUM_IP_CHECKED | CSUM_IP_VALID | \
CSUM_SCTP_VALID)
static SYSCTL_NODE(_hw, OID_AUTO, if_ntb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"if_ntb");
static unsigned g_if_ntb_num_queues = UINT_MAX;
SYSCTL_UINT(_hw_if_ntb, OID_AUTO, num_queues, CTLFLAG_RWTUN,
&g_if_ntb_num_queues, 0, "Number of queues per interface");
struct ntb_net_queue {
struct ntb_net_ctx *sc;
if_t ifp;
struct ntb_transport_qp *qp;
struct buf_ring *br;
struct task tx_task;
struct taskqueue *tx_tq;
struct mtx tx_lock;
struct callout queue_full;
};
struct ntb_net_ctx {
if_t ifp;
struct ifmedia media;
u_char eaddr[ETHER_ADDR_LEN];
int num_queues;
struct ntb_net_queue *queues;
int mtu;
};
static int ntb_net_probe(device_t dev);
static int ntb_net_attach(device_t dev);
static int ntb_net_detach(device_t dev);
static void ntb_net_init(void *arg);
static int ntb_ifmedia_upd(struct ifnet *);
static void ntb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static int ntb_ioctl(if_t ifp, u_long command, caddr_t data);
static int ntb_transmit(if_t ifp, struct mbuf *m);
static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
static void ntb_net_event_handler(void *data, enum ntb_link_event status);
static void ntb_handle_tx(void *arg, int pending);
static void ntb_qp_full(void *arg);
static void ntb_qflush(if_t ifp);
static void create_random_local_eui48(u_char *eaddr);
static int
ntb_net_probe(device_t dev)
{
device_set_desc(dev, "NTB Network Interface");
return (0);
}
static int
ntb_net_attach(device_t dev)
{
struct ntb_net_ctx *sc = device_get_softc(dev);
struct ntb_net_queue *q;
if_t ifp;
struct ntb_queue_handlers handlers = { ntb_net_rx_handler,
ntb_net_tx_handler, ntb_net_event_handler };
int i;
ifp = sc->ifp = if_gethandle(IFT_ETHER);
- if (ifp == NULL) {
- printf("ntb: Cannot allocate ifnet structure\n");
- return (ENOMEM);
- }
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setdev(ifp, dev);
sc->num_queues = min(g_if_ntb_num_queues,
ntb_transport_queue_count(dev));
sc->queues = malloc(sc->num_queues * sizeof(struct ntb_net_queue),
M_DEVBUF, M_WAITOK | M_ZERO);
sc->mtu = INT_MAX;
for (i = 0; i < sc->num_queues; i++) {
q = &sc->queues[i];
q->sc = sc;
q->ifp = ifp;
q->qp = ntb_transport_create_queue(dev, i, &handlers, q);
if (q->qp == NULL)
break;
sc->mtu = imin(sc->mtu, ntb_transport_max_size(q->qp));
mtx_init(&q->tx_lock, "ntb tx", NULL, MTX_DEF);
q->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &q->tx_lock);
TASK_INIT(&q->tx_task, 0, ntb_handle_tx, q);
q->tx_tq = taskqueue_create_fast("ntb_txq", M_NOWAIT,
taskqueue_thread_enqueue, &q->tx_tq);
taskqueue_start_threads(&q->tx_tq, 1, PI_NET, "%s txq%d",
device_get_nameunit(dev), i);
callout_init(&q->queue_full, 1);
}
sc->num_queues = i;
device_printf(dev, "%d queue(s)\n", sc->num_queues);
if_setinitfn(ifp, ntb_net_init);
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, ntb_ioctl);
if_settransmitfn(ifp, ntb_transmit);
if_setqflushfn(ifp, ntb_qflush);
create_random_local_eui48(sc->eaddr);
ether_ifattach(ifp, sc->eaddr);
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
IFCAP_JUMBO_MTU | IFCAP_LINKSTATE);
if_setcapenable(ifp, IFCAP_JUMBO_MTU | IFCAP_LINKSTATE);
if_setmtu(ifp, sc->mtu - ETHER_HDR_LEN);
ifmedia_init(&sc->media, IFM_IMASK, ntb_ifmedia_upd,
ntb_ifmedia_sts);
ifmedia_add(&sc->media, NTB_MEDIATYPE, 0, NULL);
ifmedia_set(&sc->media, NTB_MEDIATYPE);
for (i = 0; i < sc->num_queues; i++)
ntb_transport_link_up(sc->queues[i].qp);
return (0);
}
static int
ntb_net_detach(device_t dev)
{
struct ntb_net_ctx *sc = device_get_softc(dev);
struct ntb_net_queue *q;
int i;
for (i = 0; i < sc->num_queues; i++)
ntb_transport_link_down(sc->queues[i].qp);
ether_ifdetach(sc->ifp);
if_free(sc->ifp);
ifmedia_removeall(&sc->media);
for (i = 0; i < sc->num_queues; i++) {
q = &sc->queues[i];
ntb_transport_free_queue(q->qp);
buf_ring_free(q->br, M_DEVBUF);
callout_drain(&q->queue_full);
taskqueue_drain_all(q->tx_tq);
mtx_destroy(&q->tx_lock);
}
free(sc->queues, M_DEVBUF);
return (0);
}
/* Network device interface */
static void
ntb_net_init(void *arg)
{
struct ntb_net_ctx *sc = arg;
if_t ifp = sc->ifp;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
if_setbaudrate(ifp, ntb_transport_link_speed(sc->queues[0].qp));
if_link_state_change(ifp, ntb_transport_link_query(sc->queues[0].qp) ?
LINK_STATE_UP : LINK_STATE_DOWN);
}
static int
ntb_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct ntb_net_ctx *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int error = 0;
switch (command) {
case SIOCSIFFLAGS:
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCSIFMTU:
{
if (ifr->ifr_mtu > sc->mtu - ETHER_HDR_LEN) {
error = EINVAL;
break;
}
if_setmtu(ifp, ifr->ifr_mtu);
break;
}
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
case SIOCSIFCAP:
if (ifr->ifr_reqcap & IFCAP_RXCSUM)
if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
else
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM);
if (ifr->ifr_reqcap & IFCAP_TXCSUM) {
if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
if_sethwassistbits(ifp, NTB_CSUM_FEATURES, 0);
} else {
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
if_sethwassistbits(ifp, 0, NTB_CSUM_FEATURES);
}
if (ifr->ifr_reqcap & IFCAP_RXCSUM_IPV6)
if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0);
else
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM_IPV6);
if (ifr->ifr_reqcap & IFCAP_TXCSUM_IPV6) {
if_setcapenablebit(ifp, IFCAP_TXCSUM_IPV6, 0);
if_sethwassistbits(ifp, NTB_CSUM_FEATURES6, 0);
} else {
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM_IPV6);
if_sethwassistbits(ifp, 0, NTB_CSUM_FEATURES6);
}
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
ntb_ifmedia_upd(struct ifnet *ifp)
{
struct ntb_net_ctx *sc = if_getsoftc(ifp);
struct ifmedia *ifm = &sc->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
return (0);
}
static void
ntb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct ntb_net_ctx *sc = if_getsoftc(ifp);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = NTB_MEDIATYPE;
if (ntb_transport_link_query(sc->queues[0].qp))
ifmr->ifm_status |= IFM_ACTIVE;
}
static void
ntb_transmit_locked(struct ntb_net_queue *q)
{
if_t ifp = q->ifp;
struct mbuf *m;
int rc, len;
short mflags;
CTR0(KTR_NTB, "TX: ntb_transmit_locked");
while ((m = drbr_peek(ifp, q->br)) != NULL) {
CTR1(KTR_NTB, "TX: start mbuf %p", m);
ether_bpf_mtap_if(ifp, m);
len = m->m_pkthdr.len;
mflags = m->m_flags;
rc = ntb_transport_tx_enqueue(q->qp, m, m, len);
if (rc != 0) {
CTR2(KTR_NTB, "TX: could not tx mbuf %p: %d", m, rc);
if (rc == EAGAIN) {
drbr_putback(ifp, q->br, m);
callout_reset_sbt(&q->queue_full,
SBT_1MS / 4, SBT_1MS / 4,
ntb_qp_full, q, 0);
} else {
m_freem(m);
drbr_advance(ifp, q->br);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
break;
}
drbr_advance(ifp, q->br);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
if (mflags & M_MCAST)
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
}
}
static int
ntb_transmit(if_t ifp, struct mbuf *m)
{
struct ntb_net_ctx *sc = if_getsoftc(ifp);
struct ntb_net_queue *q;
int error, i;
CTR0(KTR_NTB, "TX: ntb_transmit");
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
i = m->m_pkthdr.flowid % sc->num_queues;
else
i = curcpu % sc->num_queues;
q = &sc->queues[i];
error = drbr_enqueue(ifp, q->br, m);
if (error)
return (error);
if (mtx_trylock(&q->tx_lock)) {
ntb_transmit_locked(q);
mtx_unlock(&q->tx_lock);
} else
taskqueue_enqueue(q->tx_tq, &q->tx_task);
return (0);
}
static void
ntb_handle_tx(void *arg, int pending)
{
struct ntb_net_queue *q = arg;
mtx_lock(&q->tx_lock);
ntb_transmit_locked(q);
mtx_unlock(&q->tx_lock);
}
static void
ntb_qp_full(void *arg)
{
struct ntb_net_queue *q = arg;
CTR0(KTR_NTB, "TX: qp_full callout");
if (ntb_transport_tx_free_entry(q->qp) > 0)
taskqueue_enqueue(q->tx_tq, &q->tx_task);
else
callout_schedule_sbt(&q->queue_full,
SBT_1MS / 4, SBT_1MS / 4, 0);
}
static void
ntb_qflush(if_t ifp)
{
struct ntb_net_ctx *sc = if_getsoftc(ifp);
struct ntb_net_queue *q;
struct mbuf *m;
int i;
for (i = 0; i < sc->num_queues; i++) {
q = &sc->queues[i];
mtx_lock(&q->tx_lock);
while ((m = buf_ring_dequeue_sc(q->br)) != NULL)
m_freem(m);
mtx_unlock(&q->tx_lock);
}
if_qflush(ifp);
}
/* Network Device Callbacks */
static void
ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data,
int len)
{
m_freem(data);
CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data);
}
static void
ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data,
int len)
{
struct ntb_net_queue *q = qp_data;
struct ntb_net_ctx *sc = q->sc;
struct mbuf *m = data;
if_t ifp = q->ifp;
uint16_t proto;
CTR1(KTR_NTB, "RX: rx handler (%d)", len);
if (len < 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
m->m_pkthdr.rcvif = ifp;
if (sc->num_queues > 1) {
m->m_pkthdr.flowid = q - sc->queues;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
}
if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
m_copydata(m, 12, 2, (void *)&proto);
switch (ntohs(proto)) {
case ETHERTYPE_IP:
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = NTB_CSUM_SET;
}
break;
case ETHERTYPE_IPV6:
if (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6) {
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = NTB_CSUM_SET;
}
break;
}
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_input(ifp, m);
}
static void
ntb_net_event_handler(void *data, enum ntb_link_event status)
{
struct ntb_net_queue *q = data;
if_setbaudrate(q->ifp, ntb_transport_link_speed(q->qp));
if_link_state_change(q->ifp, (status == NTB_LINK_UP) ? LINK_STATE_UP :
LINK_STATE_DOWN);
}
/* Helper functions */
/* TODO: This too should really be part of the kernel */
#define EUI48_MULTICAST 1 << 0
#define EUI48_LOCALLY_ADMINISTERED 1 << 1
static void
create_random_local_eui48(u_char *eaddr)
{
static uint8_t counter = 0;
eaddr[0] = EUI48_LOCALLY_ADMINISTERED;
arc4rand(&eaddr[1], 4, 0);
eaddr[5] = counter++;
}
static device_method_t ntb_net_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ntb_net_probe),
DEVMETHOD(device_attach, ntb_net_attach),
DEVMETHOD(device_detach, ntb_net_detach),
DEVMETHOD_END
};
static DEFINE_CLASS_0(ntb, ntb_net_driver, ntb_net_methods,
sizeof(struct ntb_net_ctx));
DRIVER_MODULE(if_ntb, ntb_transport, ntb_net_driver, NULL, NULL);
MODULE_DEPEND(if_ntb, ntb_transport, 1, 1, 1);
MODULE_VERSION(if_ntb, 1);
diff --git a/sys/dev/oce/oce_if.c b/sys/dev/oce/oce_if.c
index 2084b9c84fff..bf257b952a03 100644
--- a/sys/dev/oce/oce_if.c
+++ b/sys/dev/oce/oce_if.c
@@ -1,2969 +1,2962 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2013 Emulex
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Emulex Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* freebsd-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*/
#include "opt_inet6.h"
#include "opt_inet.h"
#include "oce_if.h"
#include "oce_user.h"
#define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
/* UE Status Low CSR */
static char *ue_status_low_desc[] = {
"CEV",
"CTX",
"DBUF",
"ERX",
"Host",
"MPU",
"NDMA",
"PTC ",
"RDMA ",
"RXF ",
"RXIPS ",
"RXULP0 ",
"RXULP1 ",
"RXULP2 ",
"TIM ",
"TPOST ",
"TPRE ",
"TXIPS ",
"TXULP0 ",
"TXULP1 ",
"UC ",
"WDMA ",
"TXULP2 ",
"HOST1 ",
"P0_OB_LINK ",
"P1_OB_LINK ",
"HOST_GPIO ",
"MBOX ",
"AXGMAC0",
"AXGMAC1",
"JTAG",
"MPU_INTPEND"
};
/* UE Status High CSR */
static char *ue_status_hi_desc[] = {
"LPCMEMHOST",
"MGMT_MAC",
"PCS0ONLINE",
"MPU_IRAM",
"PCS1ONLINE",
"PCTL0",
"PCTL1",
"PMEM",
"RR",
"TXPB",
"RXPP",
"XAUI",
"TXP",
"ARM",
"IPC",
"HOST2",
"HOST3",
"HOST4",
"HOST5",
"HOST6",
"HOST7",
"HOST8",
"HOST9",
"NETC",
"Unknown",
"Unknown",
"Unknown",
"Unknown",
"Unknown",
"Unknown",
"Unknown",
"Unknown"
};
struct oce_common_cqe_info{
uint8_t vtp:1;
uint8_t l4_cksum_pass:1;
uint8_t ip_cksum_pass:1;
uint8_t ipv6_frame:1;
uint8_t qnq:1;
uint8_t rsvd:3;
uint8_t num_frags;
uint16_t pkt_size;
uint16_t vtag;
};
/* Driver entry points prototypes */
static int oce_probe(device_t dev);
static int oce_attach(device_t dev);
static int oce_detach(device_t dev);
static int oce_shutdown(device_t dev);
static int oce_ioctl(if_t ifp, u_long command, caddr_t data);
static void oce_init(void *xsc);
static int oce_multiq_start(if_t ifp, struct mbuf *m);
static void oce_multiq_flush(if_t ifp);
/* Driver interrupt routines protypes */
static void oce_intr(void *arg, int pending);
static int oce_setup_intr(POCE_SOFTC sc);
static int oce_fast_isr(void *arg);
static int oce_alloc_intr(POCE_SOFTC sc, int vector,
void (*isr) (void *arg, int pending));
/* Media callbacks prototypes */
static void oce_media_status(if_t ifp, struct ifmediareq *req);
static int oce_media_change(if_t ifp);
/* Transmit routines prototypes */
static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
static void oce_process_tx_completion(struct oce_wq *wq);
static int oce_multiq_transmit(if_t ifp, struct mbuf *m,
struct oce_wq *wq);
/* Receive routines prototypes */
static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
static uint16_t oce_rq_handler_lro(void *arg);
static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
/* Helper function prototypes in this file */
-static int oce_attach_ifp(POCE_SOFTC sc);
+static void oce_attach_ifp(POCE_SOFTC sc);
static void oce_add_vlan(void *arg, if_t ifp, uint16_t vtag);
static void oce_del_vlan(void *arg, if_t ifp, uint16_t vtag);
static int oce_vid_config(POCE_SOFTC sc);
static void oce_mac_addr_set(POCE_SOFTC sc);
static int oce_handle_passthrough(if_t ifp, caddr_t data);
static void oce_local_timer(void *arg);
static void oce_if_deactivate(POCE_SOFTC sc);
static void oce_if_activate(POCE_SOFTC sc);
static void setup_max_queues_want(POCE_SOFTC sc);
static void update_queues_got(POCE_SOFTC sc);
static void process_link_state(POCE_SOFTC sc,
struct oce_async_cqe_link_state *acqe);
static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
static void oce_get_config(POCE_SOFTC sc);
static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
static void oce_read_env_variables(POCE_SOFTC sc);
/* IP specific */
#if defined(INET6) || defined(INET)
static int oce_init_lro(POCE_SOFTC sc);
static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
#endif
static device_method_t oce_dispatch[] = {
DEVMETHOD(device_probe, oce_probe),
DEVMETHOD(device_attach, oce_attach),
DEVMETHOD(device_detach, oce_detach),
DEVMETHOD(device_shutdown, oce_shutdown),
DEVMETHOD_END
};
static driver_t oce_driver = {
"oce",
oce_dispatch,
sizeof(OCE_SOFTC)
};
/* global vars */
const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
/* Module capabilites and parameters */
uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
uint32_t oce_enable_rss = OCE_MODCAP_RSS;
uint32_t oce_rq_buf_size = 2048;
TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
/* Supported devices table */
static uint32_t supportedDevices[] = {
(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
};
DRIVER_MODULE(oce, pci, oce_driver, 0, 0);
MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
nitems(supportedDevices));
MODULE_DEPEND(oce, pci, 1, 1, 1);
MODULE_DEPEND(oce, ether, 1, 1, 1);
MODULE_VERSION(oce, 1);
POCE_SOFTC softc_head = NULL;
POCE_SOFTC softc_tail = NULL;
struct oce_rdma_if *oce_rdma_if = NULL;
/*****************************************************************************
* Driver entry points functions *
*****************************************************************************/
static int
oce_probe(device_t dev)
{
uint16_t vendor = 0;
uint16_t device = 0;
int i = 0;
POCE_SOFTC sc;
sc = device_get_softc(dev);
bzero(sc, sizeof(OCE_SOFTC));
sc->dev = dev;
vendor = pci_get_vendor(dev);
device = pci_get_device(dev);
for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
if (device == (supportedDevices[i] & 0xffff)) {
device_set_descf(dev,
"%s:%s", "Emulex CNA NIC function",
component_revision);
switch (device) {
case PCI_PRODUCT_BE2:
sc->flags |= OCE_FLAGS_BE2;
break;
case PCI_PRODUCT_BE3:
sc->flags |= OCE_FLAGS_BE3;
break;
case PCI_PRODUCT_XE201:
case PCI_PRODUCT_XE201_VF:
sc->flags |= OCE_FLAGS_XE201;
break;
case PCI_PRODUCT_SH:
sc->flags |= OCE_FLAGS_SH;
break;
default:
return ENXIO;
}
return BUS_PROBE_DEFAULT;
}
}
}
return ENXIO;
}
static int
oce_attach(device_t dev)
{
POCE_SOFTC sc;
int rc = 0;
sc = device_get_softc(dev);
rc = oce_hw_pci_alloc(sc);
if (rc)
return rc;
sc->tx_ring_size = OCE_TX_RING_SIZE;
sc->rx_ring_size = OCE_RX_RING_SIZE;
/* receive fragment size should be multiple of 2K */
sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
sc->promisc = OCE_DEFAULT_PROMISCUOUS;
LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
LOCK_CREATE(&sc->dev_lock, "Device_lock");
/* initialise the hardware */
rc = oce_hw_init(sc);
if (rc)
goto pci_res_free;
oce_read_env_variables(sc);
oce_get_config(sc);
setup_max_queues_want(sc);
rc = oce_setup_intr(sc);
if (rc)
goto mbox_free;
rc = oce_queue_init_all(sc);
if (rc)
goto intr_free;
- rc = oce_attach_ifp(sc);
- if (rc)
- goto queues_free;
+ oce_attach_ifp(sc);
#if defined(INET6) || defined(INET)
rc = oce_init_lro(sc);
if (rc)
goto ifp_free;
#endif
rc = oce_hw_start(sc);
if (rc)
goto lro_free;
sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
rc = oce_stats_init(sc);
if (rc)
goto vlan_free;
oce_add_sysctls(sc);
callout_init(&sc->timer, CALLOUT_MPSAFE);
rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
if (rc)
goto stats_free;
sc->next =NULL;
if (softc_tail != NULL) {
softc_tail->next = sc;
} else {
softc_head = sc;
}
softc_tail = sc;
gone_in_dev(dev, 15, "relatively uncommon 10GbE NIC");
return 0;
stats_free:
callout_drain(&sc->timer);
oce_stats_free(sc);
vlan_free:
if (sc->vlan_attach)
EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
if (sc->vlan_detach)
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
oce_hw_intr_disable(sc);
lro_free:
#if defined(INET6) || defined(INET)
oce_free_lro(sc);
ifp_free:
#endif
ether_ifdetach(sc->ifp);
if_free(sc->ifp);
-queues_free:
oce_queue_release_all(sc);
intr_free:
oce_intr_free(sc);
mbox_free:
oce_dma_free(sc, &sc->bsmbx);
pci_res_free:
oce_hw_pci_free(sc);
LOCK_DESTROY(&sc->dev_lock);
LOCK_DESTROY(&sc->bmbx_lock);
return rc;
}
static int
oce_detach(device_t dev)
{
POCE_SOFTC sc = device_get_softc(dev);
POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
poce_sc_tmp = softc_head;
ppoce_sc_tmp1 = &softc_head;
while (poce_sc_tmp != NULL) {
if (poce_sc_tmp == sc) {
*ppoce_sc_tmp1 = sc->next;
if (sc->next == NULL) {
softc_tail = poce_sc_tmp2;
}
break;
}
poce_sc_tmp2 = poce_sc_tmp;
ppoce_sc_tmp1 = &poce_sc_tmp->next;
poce_sc_tmp = poce_sc_tmp->next;
}
LOCK(&sc->dev_lock);
oce_if_deactivate(sc);
UNLOCK(&sc->dev_lock);
callout_drain(&sc->timer);
if (sc->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
if (sc->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
ether_ifdetach(sc->ifp);
if_free(sc->ifp);
oce_hw_shutdown(sc);
bus_generic_detach(dev);
return 0;
}
static int
oce_shutdown(device_t dev)
{
int rc;
rc = oce_detach(dev);
return rc;
}
static int
oce_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct ifreq *ifr = (struct ifreq *)data;
POCE_SOFTC sc = if_getsoftc(ifp);
struct ifi2creq i2c;
uint8_t offset = 0;
int rc = 0;
uint32_t u;
switch (command) {
case SIOCGIFMEDIA:
rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu > OCE_MAX_MTU)
rc = EINVAL;
else
if_setmtu(ifp, ifr->ifr_mtu);
break;
case SIOCSIFFLAGS:
if (if_getflags(ifp) & IFF_UP) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
oce_init(sc);
}
device_printf(sc->dev, "Interface Up\n");
} else {
LOCK(&sc->dev_lock);
if_setdrvflagbits(sc->ifp, 0,
IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
oce_if_deactivate(sc);
UNLOCK(&sc->dev_lock);
device_printf(sc->dev, "Interface Down\n");
}
if ((if_getflags(ifp) & IFF_PROMISC) && !sc->promisc) {
if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
sc->promisc = TRUE;
} else if (!(if_getflags(ifp) & IFF_PROMISC) && sc->promisc) {
if (!oce_rxf_set_promiscuous(sc, 0))
sc->promisc = FALSE;
}
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
rc = oce_hw_update_multicast(sc);
if (rc)
device_printf(sc->dev,
"Update multicast address failed\n");
break;
case SIOCSIFCAP:
u = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if (u & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
if (IFCAP_TSO & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
u &= ~IFCAP_TSO;
if_setcapenablebit(ifp, 0, IFCAP_TSO);
if_sethwassistbits(ifp, 0, CSUM_TSO);
if_printf(ifp,
"TSO disabled due to -txcsum.\n");
}
}
if (u & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (u & IFCAP_TSO4) {
if_togglecapenable(ifp, IFCAP_TSO4);
if (IFCAP_TSO & if_getcapenable(ifp)) {
if (IFCAP_TXCSUM & if_getcapenable(ifp))
if_sethwassistbits(ifp, CSUM_TSO, 0);
else {
if_setcapenablebit(ifp, 0, IFCAP_TSO);
if_sethwassistbits(ifp, 0, CSUM_TSO);
if_printf(ifp,
"Enable txcsum first.\n");
rc = EAGAIN;
}
} else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if (u & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (u & IFCAP_VLAN_HWFILTER) {
if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
oce_vid_config(sc);
}
#if defined(INET6) || defined(INET)
if (u & IFCAP_LRO) {
if_togglecapenable(ifp, IFCAP_LRO);
if(sc->enable_hwlro) {
if(if_getcapenable(ifp) & IFCAP_LRO) {
rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
}else {
rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
}
}
}
#endif
break;
case SIOCGI2C:
rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (rc)
break;
if (i2c.dev_addr == PAGE_NUM_A0) {
offset = i2c.offset;
} else if (i2c.dev_addr == PAGE_NUM_A2) {
offset = TRANSCEIVER_A0_SIZE + i2c.offset;
} else {
rc = EINVAL;
break;
}
if (i2c.len > sizeof(i2c.data) ||
i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
rc = EINVAL;
break;
}
rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
if (rc) {
rc = -rc;
break;
}
memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
break;
case SIOCGPRIVATE_0:
rc = priv_check(curthread, PRIV_DRIVER);
if (rc != 0)
break;
rc = oce_handle_passthrough(ifp, data);
break;
default:
rc = ether_ioctl(ifp, command, data);
break;
}
return rc;
}
static void
oce_init(void *arg)
{
POCE_SOFTC sc = arg;
LOCK(&sc->dev_lock);
if (if_getflags(sc->ifp) & IFF_UP) {
oce_if_deactivate(sc);
oce_if_activate(sc);
}
UNLOCK(&sc->dev_lock);
}
static int
oce_multiq_start(if_t ifp, struct mbuf *m)
{
POCE_SOFTC sc = if_getsoftc(ifp);
struct oce_wq *wq = NULL;
int queue_index = 0;
int status = 0;
if (!sc->link_status)
return ENXIO;
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
queue_index = m->m_pkthdr.flowid % sc->nwqs;
wq = sc->wq[queue_index];
LOCK(&wq->tx_lock);
status = oce_multiq_transmit(ifp, m, wq);
UNLOCK(&wq->tx_lock);
return status;
}
static void
oce_multiq_flush(if_t ifp)
{
POCE_SOFTC sc = if_getsoftc(ifp);
struct mbuf *m;
int i = 0;
for (i = 0; i < sc->nwqs; i++) {
while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
m_freem(m);
}
if_qflush(ifp);
}
/*****************************************************************************
* Driver interrupt routines functions *
*****************************************************************************/
static void
oce_intr(void *arg, int pending)
{
POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
POCE_SOFTC sc = ii->sc;
struct oce_eq *eq = ii->eq;
struct oce_eqe *eqe;
struct oce_cq *cq = NULL;
int i, num_eqes = 0;
bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
BUS_DMASYNC_POSTWRITE);
do {
eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
if (eqe->evnt == 0)
break;
eqe->evnt = 0;
bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
BUS_DMASYNC_POSTWRITE);
RING_GET(eq->ring, 1);
num_eqes++;
} while (TRUE);
if (!num_eqes)
goto eq_arm; /* Spurious */
/* Clear EQ entries, but dont arm */
oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
/* Process TX, RX and MCC. But dont arm CQ*/
for (i = 0; i < eq->cq_valid; i++) {
cq = eq->cq[i];
(*cq->cq_handler)(cq->cb_arg);
}
/* Arm all cqs connected to this EQ */
for (i = 0; i < eq->cq_valid; i++) {
cq = eq->cq[i];
oce_arm_cq(sc, cq->cq_id, 0, TRUE);
}
eq_arm:
oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
return;
}
static int
oce_setup_intr(POCE_SOFTC sc)
{
int rc = 0, use_intx = 0;
int vector = 0, req_vectors = 0;
int tot_req_vectors, tot_vectors;
if (is_rss_enabled(sc))
req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
else
req_vectors = 1;
tot_req_vectors = req_vectors;
if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
if (req_vectors > 1) {
tot_req_vectors += OCE_RDMA_VECTORS;
sc->roce_intr_count = OCE_RDMA_VECTORS;
}
}
if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
sc->intr_count = req_vectors;
tot_vectors = tot_req_vectors;
rc = pci_alloc_msix(sc->dev, &tot_vectors);
if (rc != 0) {
use_intx = 1;
pci_release_msi(sc->dev);
} else {
if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
if (tot_vectors < tot_req_vectors) {
if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
sc->roce_intr_count = (tot_vectors / 2);
}
sc->intr_count = tot_vectors - sc->roce_intr_count;
}
} else {
sc->intr_count = tot_vectors;
}
sc->flags |= OCE_FLAGS_USING_MSIX;
}
} else
use_intx = 1;
if (use_intx)
sc->intr_count = 1;
/* Scale number of queues based on intr we got */
update_queues_got(sc);
if (use_intx) {
device_printf(sc->dev, "Using legacy interrupt\n");
rc = oce_alloc_intr(sc, vector, oce_intr);
if (rc)
goto error;
} else {
for (; vector < sc->intr_count; vector++) {
rc = oce_alloc_intr(sc, vector, oce_intr);
if (rc)
goto error;
}
}
return 0;
error:
oce_intr_free(sc);
return rc;
}
static int
oce_fast_isr(void *arg)
{
POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
POCE_SOFTC sc = ii->sc;
if (ii->eq == NULL)
return FILTER_STRAY;
oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
taskqueue_enqueue(ii->tq, &ii->task);
ii->eq->intr++;
return FILTER_HANDLED;
}
static int
oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
{
POCE_INTR_INFO ii;
int rc = 0, rr;
if (vector >= OCE_MAX_EQ)
return (EINVAL);
ii = &sc->intrs[vector];
/* Set the resource id for the interrupt.
* MSIx is vector + 1 for the resource id,
* INTx is 0 for the resource id.
*/
if (sc->flags & OCE_FLAGS_USING_MSIX)
rr = vector + 1;
else
rr = 0;
ii->intr_res = bus_alloc_resource_any(sc->dev,
SYS_RES_IRQ,
&rr, RF_ACTIVE|RF_SHAREABLE);
ii->irq_rr = rr;
if (ii->intr_res == NULL) {
device_printf(sc->dev,
"Could not allocate interrupt\n");
rc = ENXIO;
return rc;
}
TASK_INIT(&ii->task, 0, isr, ii);
ii->vector = vector;
sprintf(ii->task_name, "oce_task[%d]", ii->vector);
ii->tq = taskqueue_create_fast(ii->task_name,
M_NOWAIT,
taskqueue_thread_enqueue,
&ii->tq);
taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->dev));
ii->sc = sc;
rc = bus_setup_intr(sc->dev,
ii->intr_res,
INTR_TYPE_NET,
oce_fast_isr, NULL, ii, &ii->tag);
return rc;
}
void
oce_intr_free(POCE_SOFTC sc)
{
int i = 0;
for (i = 0; i < sc->intr_count; i++) {
if (sc->intrs[i].tag != NULL)
bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
sc->intrs[i].tag);
if (sc->intrs[i].tq != NULL)
taskqueue_free(sc->intrs[i].tq);
if (sc->intrs[i].intr_res != NULL)
bus_release_resource(sc->dev, SYS_RES_IRQ,
sc->intrs[i].irq_rr,
sc->intrs[i].intr_res);
sc->intrs[i].tag = NULL;
sc->intrs[i].intr_res = NULL;
}
if (sc->flags & OCE_FLAGS_USING_MSIX)
pci_release_msi(sc->dev);
}
/******************************************************************************
* Media callbacks functions *
******************************************************************************/
static void
oce_media_status(if_t ifp, struct ifmediareq *req)
{
POCE_SOFTC sc = (POCE_SOFTC) if_getsoftc(ifp);
req->ifm_status = IFM_AVALID;
req->ifm_active = IFM_ETHER;
if (sc->link_status == 1)
req->ifm_status |= IFM_ACTIVE;
else
return;
switch (sc->link_speed) {
case 1: /* 10 Mbps */
req->ifm_active |= IFM_10_T | IFM_FDX;
sc->speed = 10;
break;
case 2: /* 100 Mbps */
req->ifm_active |= IFM_100_TX | IFM_FDX;
sc->speed = 100;
break;
case 3: /* 1 Gbps */
req->ifm_active |= IFM_1000_T | IFM_FDX;
sc->speed = 1000;
break;
case 4: /* 10 Gbps */
req->ifm_active |= IFM_10G_SR | IFM_FDX;
sc->speed = 10000;
break;
case 5: /* 20 Gbps */
req->ifm_active |= IFM_10G_SR | IFM_FDX;
sc->speed = 20000;
break;
case 6: /* 25 Gbps */
req->ifm_active |= IFM_10G_SR | IFM_FDX;
sc->speed = 25000;
break;
case 7: /* 40 Gbps */
req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
sc->speed = 40000;
break;
default:
sc->speed = 0;
break;
}
return;
}
int
oce_media_change(if_t ifp)
{
return 0;
}
static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
struct mbuf *m, boolean_t *os2bmc,
struct mbuf **m_new)
{
struct ether_header *eh = NULL;
eh = mtod(m, struct ether_header *);
if (!is_os2bmc_enabled(sc) || *os2bmc) {
*os2bmc = FALSE;
goto done;
}
if (!ETHER_IS_MULTICAST(eh->ether_dhost))
goto done;
if (is_mc_allowed_on_bmc(sc, eh) ||
is_bc_allowed_on_bmc(sc, eh) ||
is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
*os2bmc = TRUE;
goto done;
}
if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
uint8_t nexthdr = ip6->ip6_nxt;
if (nexthdr == IPPROTO_ICMPV6) {
struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
switch (icmp6->icmp6_type) {
case ND_ROUTER_ADVERT:
*os2bmc = is_ipv6_ra_filt_enabled(sc);
goto done;
case ND_NEIGHBOR_ADVERT:
*os2bmc = is_ipv6_na_filt_enabled(sc);
goto done;
default:
break;
}
}
}
if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
struct ip *ip = mtod(m, struct ip *);
int iphlen = ip->ip_hl << 2;
struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
switch (uh->uh_dport) {
case DHCP_CLIENT_PORT:
*os2bmc = is_dhcp_client_filt_enabled(sc);
goto done;
case DHCP_SERVER_PORT:
*os2bmc = is_dhcp_srvr_filt_enabled(sc);
goto done;
case NET_BIOS_PORT1:
case NET_BIOS_PORT2:
*os2bmc = is_nbios_filt_enabled(sc);
goto done;
case DHCPV6_RAS_PORT:
*os2bmc = is_ipv6_ras_filt_enabled(sc);
goto done;
default:
break;
}
}
done:
if (*os2bmc) {
*m_new = m_dup(m, M_NOWAIT);
if (!*m_new) {
*os2bmc = FALSE;
return;
}
*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
}
}
/*****************************************************************************
* Transmit routines functions *
*****************************************************************************/
static int
oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
{
int rc = 0, i, retry_cnt = 0;
bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
struct mbuf *m, *m_temp, *m_new = NULL;
struct oce_wq *wq = sc->wq[wq_index];
struct oce_packet_desc *pd;
struct oce_nic_hdr_wqe *nichdr;
struct oce_nic_frag_wqe *nicfrag;
struct ether_header *eh = NULL;
int num_wqes;
uint32_t reg_value;
boolean_t complete = TRUE;
boolean_t os2bmc = FALSE;
m = *mpp;
if (!m)
return EINVAL;
if (!(m->m_flags & M_PKTHDR)) {
rc = ENXIO;
goto free_ret;
}
/* Don't allow non-TSO packets longer than MTU */
if (!is_tso_pkt(m)) {
eh = mtod(m, struct ether_header *);
if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
goto free_ret;
}
if(oce_tx_asic_stall_verify(sc, m)) {
m = oce_insert_vlan_tag(sc, m, &complete);
if(!m) {
device_printf(sc->dev, "Insertion unsuccessful\n");
return 0;
}
}
/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
* may cause a transmit stall on that port. So the work-around is to
* pad short packets (<= 32 bytes) to a 36-byte length.
*/
if(IS_SH(sc) || IS_XE201(sc) ) {
if(m->m_pkthdr.len <= 32) {
char buf[36];
bzero((void *)buf, 36);
m_append(m, (36 - m->m_pkthdr.len), buf);
}
}
tx_start:
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
/* consolidate packet buffers for TSO/LSO segment offload */
#if defined(INET6) || defined(INET)
m = oce_tso_setup(sc, mpp);
#else
m = NULL;
#endif
if (m == NULL) {
rc = ENXIO;
goto free_ret;
}
}
pd = &wq->pckts[wq->pkt_desc_head];
retry:
rc = bus_dmamap_load_mbuf_sg(wq->tag,
pd->map,
m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
if (rc == 0) {
num_wqes = pd->nsegs + 1;
if (IS_BE(sc) || IS_SH(sc)) {
/*Dummy required only for BE3.*/
if (num_wqes & 1)
num_wqes++;
}
if (num_wqes >= RING_NUM_FREE(wq->ring)) {
bus_dmamap_unload(wq->tag, pd->map);
return EBUSY;
}
atomic_store_rel_int(&wq->pkt_desc_head,
(wq->pkt_desc_head + 1) % \
OCE_WQ_PACKET_ARRAY_SIZE);
bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
pd->mbuf = m;
nichdr =
RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
nichdr->u0.dw[0] = 0;
nichdr->u0.dw[1] = 0;
nichdr->u0.dw[2] = 0;
nichdr->u0.dw[3] = 0;
nichdr->u0.s.complete = complete;
nichdr->u0.s.mgmt = os2bmc;
nichdr->u0.s.event = 1;
nichdr->u0.s.crc = 1;
nichdr->u0.s.forward = 0;
nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
nichdr->u0.s.udpcs =
(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
nichdr->u0.s.tcpcs =
(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
nichdr->u0.s.num_wqe = num_wqes;
nichdr->u0.s.total_length = m->m_pkthdr.len;
if (m->m_flags & M_VLANTAG) {
nichdr->u0.s.vlan = 1; /*Vlan present*/
nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
}
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
if (m->m_pkthdr.tso_segsz) {
nichdr->u0.s.lso = 1;
nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
}
if (!IS_BE(sc) || !IS_SH(sc))
nichdr->u0.s.ipcs = 1;
}
RING_PUT(wq->ring, 1);
atomic_add_int(&wq->ring->num_used, 1);
for (i = 0; i < pd->nsegs; i++) {
nicfrag =
RING_GET_PRODUCER_ITEM_VA(wq->ring,
struct oce_nic_frag_wqe);
nicfrag->u0.s.rsvd0 = 0;
nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
nicfrag->u0.s.frag_len = segs[i].ds_len;
pd->wqe_idx = wq->ring->pidx;
RING_PUT(wq->ring, 1);
atomic_add_int(&wq->ring->num_used, 1);
}
if (num_wqes > (pd->nsegs + 1)) {
nicfrag =
RING_GET_PRODUCER_ITEM_VA(wq->ring,
struct oce_nic_frag_wqe);
nicfrag->u0.dw[0] = 0;
nicfrag->u0.dw[1] = 0;
nicfrag->u0.dw[2] = 0;
nicfrag->u0.dw[3] = 0;
pd->wqe_idx = wq->ring->pidx;
RING_PUT(wq->ring, 1);
atomic_add_int(&wq->ring->num_used, 1);
pd->nsegs++;
}
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
wq->tx_stats.tx_reqs++;
wq->tx_stats.tx_wrbs += num_wqes;
wq->tx_stats.tx_bytes += m->m_pkthdr.len;
wq->tx_stats.tx_pkts++;
bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
reg_value = (num_wqes << 16) | wq->wq_id;
/* if os2bmc is not enabled or if the pkt is already tagged as
bmc, do nothing
*/
oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
if (m->m_flags & M_MCAST)
if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
ETHER_BPF_MTAP(sc->ifp, m);
OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
} else if (rc == EFBIG) {
if (retry_cnt == 0) {
m_temp = m_defrag(m, M_NOWAIT);
if (m_temp == NULL)
goto free_ret;
m = m_temp;
*mpp = m_temp;
retry_cnt = retry_cnt + 1;
goto retry;
} else
goto free_ret;
} else if (rc == ENOMEM)
return rc;
else
goto free_ret;
if (os2bmc) {
m = m_new;
goto tx_start;
}
return 0;
free_ret:
m_freem(*mpp);
*mpp = NULL;
return rc;
}
static void
oce_process_tx_completion(struct oce_wq *wq)
{
struct oce_packet_desc *pd;
POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
struct mbuf *m;
pd = &wq->pckts[wq->pkt_desc_tail];
atomic_store_rel_int(&wq->pkt_desc_tail,
(wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(wq->tag, pd->map);
m = pd->mbuf;
m_freem(m);
pd->mbuf = NULL;
if (if_getdrvflags(sc->ifp) & IFF_DRV_OACTIVE) {
if (wq->ring->num_used < (wq->ring->num_items / 2)) {
if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_OACTIVE));
oce_tx_restart(sc, wq);
}
}
}
static void
oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
{
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
return;
if (!drbr_empty(sc->ifp, wq->br))
taskqueue_enqueue(taskqueue_swi, &wq->txtask);
}
#if defined(INET6) || defined(INET)
static struct mbuf *
oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
{
struct mbuf *m;
#ifdef INET
struct ip *ip;
#endif
#ifdef INET6
struct ip6_hdr *ip6;
#endif
struct ether_vlan_header *eh;
struct tcphdr *th;
uint16_t etype;
int total_len = 0, ehdrlen = 0;
m = *mpp;
if (M_WRITABLE(m) == 0) {
m = m_dup(*mpp, M_NOWAIT);
if (!m)
return NULL;
m_freem(*mpp);
*mpp = m;
}
eh = mtod(m, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
etype = ntohs(eh->evl_proto);
ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
etype = ntohs(eh->evl_encap_proto);
ehdrlen = ETHER_HDR_LEN;
}
switch (etype) {
#ifdef INET
case ETHERTYPE_IP:
ip = (struct ip *)(m->m_data + ehdrlen);
if (ip->ip_p != IPPROTO_TCP)
return NULL;
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
if (ip6->ip6_nxt != IPPROTO_TCP)
return NULL;
th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
break;
#endif
default:
return NULL;
}
m = m_pullup(m, total_len);
*mpp = m;
return m;
}
#endif /* INET6 || INET */
void
oce_tx_task(void *arg, int npending)
{
struct oce_wq *wq = arg;
POCE_SOFTC sc = wq->parent;
if_t ifp = sc->ifp;
int rc = 0;
LOCK(&wq->tx_lock);
rc = oce_multiq_transmit(ifp, NULL, wq);
if (rc) {
device_printf(sc->dev,
"TX[%d] restart failed\n", wq->queue_index);
}
UNLOCK(&wq->tx_lock);
}
void
oce_start(if_t ifp)
{
POCE_SOFTC sc = if_getsoftc(ifp);
struct mbuf *m;
int rc = 0;
int def_q = 0; /* Defualt tx queue is 0*/
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
if (!sc->link_status)
return;
while (true) {
m = if_dequeue(sc->ifp);
if (m == NULL)
break;
LOCK(&sc->wq[def_q]->tx_lock);
rc = oce_tx(sc, &m, def_q);
UNLOCK(&sc->wq[def_q]->tx_lock);
if (rc) {
if (m != NULL) {
sc->wq[def_q]->tx_stats.tx_stops ++;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m);
m = NULL;
}
break;
}
}
}
/* Handle the Completion Queue for transmit */
uint16_t
oce_wq_handler(void *arg)
{
struct oce_wq *wq = (struct oce_wq *)arg;
POCE_SOFTC sc = wq->parent;
struct oce_cq *cq = wq->cq;
struct oce_nic_tx_cqe *cqe;
int num_cqes = 0;
LOCK(&wq->tx_compl_lock);
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
while (cqe->u0.dw[3]) {
DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
wq->ring->cidx = cqe->u0.s.wqe_index + 1;
if (wq->ring->cidx >= wq->ring->num_items)
wq->ring->cidx -= wq->ring->num_items;
oce_process_tx_completion(wq);
wq->tx_stats.tx_compl++;
cqe->u0.dw[3] = 0;
RING_GET(cq->ring, 1);
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe =
RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
num_cqes++;
}
if (num_cqes)
oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
UNLOCK(&wq->tx_compl_lock);
return num_cqes;
}
static int
oce_multiq_transmit(if_t ifp, struct mbuf *m, struct oce_wq *wq)
{
POCE_SOFTC sc = if_getsoftc(ifp);
int status = 0, queue_index = 0;
struct mbuf *next = NULL;
struct buf_ring *br = NULL;
br = wq->br;
queue_index = wq->queue_index;
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
if (m != NULL)
status = drbr_enqueue(ifp, br, m);
return status;
}
if (m != NULL) {
if ((status = drbr_enqueue(ifp, br, m)) != 0)
return status;
}
while ((next = drbr_peek(ifp, br)) != NULL) {
if (oce_tx(sc, &next, queue_index)) {
if (next == NULL) {
drbr_advance(ifp, br);
} else {
drbr_putback(ifp, br, next);
wq->tx_stats.tx_stops ++;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
}
break;
}
drbr_advance(ifp, br);
}
return 0;
}
/*****************************************************************************
* Receive routines functions *
*****************************************************************************/
static void
oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
{
uint32_t *p;
struct ether_header *eh = NULL;
struct tcphdr *tcp_hdr = NULL;
struct ip *ip4_hdr = NULL;
struct ip6_hdr *ip6 = NULL;
uint32_t payload_len = 0;
eh = mtod(m, struct ether_header *);
/* correct IP header */
if(!cqe2->ipv6_frame) {
ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
ip4_hdr->ip_ttl = cqe2->frame_lifespan;
ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
}else {
ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
- sizeof(struct ip6_hdr);
ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
}
/* correct tcp header */
tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
if(cqe2->push) {
tcp_hdr->th_flags |= TH_PUSH;
}
tcp_hdr->th_win = htons(cqe2->tcp_window);
tcp_hdr->th_sum = 0xffff;
if(cqe2->ts_opt) {
p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
*p = cqe1->tcp_timestamp_val;
*(p+1) = cqe1->tcp_timestamp_ecr;
}
return;
}
static void
oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
{
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
uint32_t i = 0, frag_len = 0;
uint32_t len = cqe_info->pkt_size;
struct oce_packet_desc *pd;
struct mbuf *tail = NULL;
for (i = 0; i < cqe_info->num_frags; i++) {
if (rq->ring->cidx == rq->ring->pidx) {
device_printf(sc->dev,
"oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
return;
}
pd = &rq->pckts[rq->ring->cidx];
bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(rq->tag, pd->map);
RING_GET(rq->ring, 1);
rq->pending--;
frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
pd->mbuf->m_len = frag_len;
if (tail != NULL) {
/* additional fragments */
pd->mbuf->m_flags &= ~M_PKTHDR;
tail->m_next = pd->mbuf;
if(rq->islro)
tail->m_nextpkt = NULL;
tail = pd->mbuf;
} else {
/* first fragment, fill out much of the packet header */
pd->mbuf->m_pkthdr.len = len;
if(rq->islro)
pd->mbuf->m_nextpkt = NULL;
pd->mbuf->m_pkthdr.csum_flags = 0;
if (IF_CSUM_ENABLED(sc)) {
if (cqe_info->l4_cksum_pass) {
if(!cqe_info->ipv6_frame) { /* IPV4 */
pd->mbuf->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
}else { /* IPV6 frame */
if(rq->islro) {
pd->mbuf->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
}
}
pd->mbuf->m_pkthdr.csum_data = 0xffff;
}
if (cqe_info->ip_cksum_pass) {
pd->mbuf->m_pkthdr.csum_flags |=
(CSUM_IP_CHECKED|CSUM_IP_VALID);
}
}
*m = tail = pd->mbuf;
}
pd->mbuf = NULL;
len -= frag_len;
}
return;
}
static void
oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
{
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
struct nic_hwlro_cqe_part1 *cqe1 = NULL;
struct mbuf *m = NULL;
struct oce_common_cqe_info cq_info;
/* parse cqe */
if(cqe2 == NULL) {
cq_info.pkt_size = cqe->pkt_size;
cq_info.vtag = cqe->vlan_tag;
cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
cq_info.ipv6_frame = cqe->ipv6_frame;
cq_info.vtp = cqe->vtp;
cq_info.qnq = cqe->qnq;
}else {
cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
cq_info.pkt_size = cqe2->coalesced_size;
cq_info.vtag = cqe2->vlan_tag;
cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
cq_info.ipv6_frame = cqe2->ipv6_frame;
cq_info.vtp = cqe2->vtp;
cq_info.qnq = cqe1->qnq;
}
cq_info.vtag = BSWAP_16(cq_info.vtag);
cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
if(cq_info.pkt_size % rq->cfg.frag_size)
cq_info.num_frags++;
oce_rx_mbuf_chain(rq, &cq_info, &m);
if (m) {
if(cqe2) {
//assert(cqe2->valid != 0);
//assert(cqe2->cqe_type != 2);
oce_correct_header(m, cqe1, cqe2);
}
m->m_pkthdr.rcvif = sc->ifp;
if (rq->queue_index)
m->m_pkthdr.flowid = (rq->queue_index - 1);
else
m->m_pkthdr.flowid = rq->queue_index;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
/* This deternies if vlan tag is Valid */
if (cq_info.vtp) {
if (sc->function_mode & FNM_FLEX10_MODE) {
/* FLEX10. If QnQ is not set, neglect VLAN */
if (cq_info.qnq) {
m->m_pkthdr.ether_vtag = cq_info.vtag;
m->m_flags |= M_VLANTAG;
}
} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK)) {
/* In UMC mode generally pvid will be striped by
hw. But in some cases we have seen it comes
with pvid. So if pvid == vlan, neglect vlan.
*/
m->m_pkthdr.ether_vtag = cq_info.vtag;
m->m_flags |= M_VLANTAG;
}
}
if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
if_input(sc->ifp, m);
/* Update rx stats per queue */
rq->rx_stats.rx_pkts++;
rq->rx_stats.rx_bytes += cq_info.pkt_size;
rq->rx_stats.rx_frags += cq_info.num_frags;
rq->rx_stats.rx_ucast_pkts++;
}
return;
}
static void
oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
{
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
int len;
struct mbuf *m = NULL;
struct oce_common_cqe_info cq_info;
uint16_t vtag = 0;
/* Is it a flush compl that has no data */
if(!cqe->u0.s.num_fragments)
goto exit;
len = cqe->u0.s.pkt_size;
if (!len) {
/*partial DMA workaround for Lancer*/
oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
goto exit;
}
if (!oce_cqe_portid_valid(sc, cqe)) {
oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
goto exit;
}
/* Get vlan_tag value */
if(IS_BE(sc) || IS_SH(sc))
vtag = BSWAP_16(cqe->u0.s.vlan_tag);
else
vtag = cqe->u0.s.vlan_tag;
cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
cq_info.ipv6_frame = cqe->u0.s.ip_ver;
cq_info.num_frags = cqe->u0.s.num_fragments;
cq_info.pkt_size = cqe->u0.s.pkt_size;
oce_rx_mbuf_chain(rq, &cq_info, &m);
if (m) {
m->m_pkthdr.rcvif = sc->ifp;
if (rq->queue_index)
m->m_pkthdr.flowid = (rq->queue_index - 1);
else
m->m_pkthdr.flowid = rq->queue_index;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
/* This deternies if vlan tag is Valid */
if (oce_cqe_vtp_valid(sc, cqe)) {
if (sc->function_mode & FNM_FLEX10_MODE) {
/* FLEX10. If QnQ is not set, neglect VLAN */
if (cqe->u0.s.qnq) {
m->m_pkthdr.ether_vtag = vtag;
m->m_flags |= M_VLANTAG;
}
} else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
/* In UMC mode generally pvid will be striped by
hw. But in some cases we have seen it comes
with pvid. So if pvid == vlan, neglect vlan.
*/
m->m_pkthdr.ether_vtag = vtag;
m->m_flags |= M_VLANTAG;
}
}
if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
#if defined(INET6) || defined(INET)
/* Try to queue to LRO */
if (IF_LRO_ENABLED(sc) &&
(cqe->u0.s.ip_cksum_pass) &&
(cqe->u0.s.l4_cksum_pass) &&
(!cqe->u0.s.ip_ver) &&
(rq->lro.lro_cnt != 0)) {
if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
rq->lro_pkts_queued ++;
goto post_done;
}
/* If LRO posting fails then try to post to STACK */
}
#endif
if_input(sc->ifp, m);
#if defined(INET6) || defined(INET)
post_done:
#endif
/* Update rx stats per queue */
rq->rx_stats.rx_pkts++;
rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
rq->rx_stats.rx_mcast_pkts++;
if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
rq->rx_stats.rx_ucast_pkts++;
}
exit:
return;
}
void
oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
{
uint32_t i = 0;
struct oce_packet_desc *pd;
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
for (i = 0; i < num_frags; i++) {
if (rq->ring->cidx == rq->ring->pidx) {
device_printf(sc->dev,
"oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
return;
}
pd = &rq->pckts[rq->ring->cidx];
bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(rq->tag, pd->map);
if (pd->mbuf != NULL) {
m_freem(pd->mbuf);
pd->mbuf = NULL;
}
RING_GET(rq->ring, 1);
rq->pending--;
}
}
static int
oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
{
struct oce_nic_rx_cqe_v1 *cqe_v1;
int vtp = 0;
if (sc->be3_native) {
cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
vtp = cqe_v1->u0.s.vlan_tag_present;
} else
vtp = cqe->u0.s.vlan_tag_present;
return vtp;
}
static int
oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
{
struct oce_nic_rx_cqe_v1 *cqe_v1;
int port_id = 0;
if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
port_id = cqe_v1->u0.s.port;
if (sc->port_id != port_id)
return 0;
} else
;/* For BE3 legacy and Lancer this is dummy */
return 1;
}
#if defined(INET6) || defined(INET)
void
oce_rx_flush_lro(struct oce_rq *rq)
{
struct lro_ctrl *lro = &rq->lro;
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
if (!IF_LRO_ENABLED(sc))
return;
tcp_lro_flush_all(lro);
rq->lro_pkts_queued = 0;
return;
}
static int
oce_init_lro(POCE_SOFTC sc)
{
struct lro_ctrl *lro = NULL;
int i = 0, rc = 0;
for (i = 0; i < sc->nrqs; i++) {
lro = &sc->rq[i]->lro;
rc = tcp_lro_init(lro);
if (rc != 0) {
device_printf(sc->dev, "LRO init failed\n");
return rc;
}
lro->ifp = sc->ifp;
}
return rc;
}
void
oce_free_lro(POCE_SOFTC sc)
{
struct lro_ctrl *lro = NULL;
int i = 0;
for (i = 0; i < sc->nrqs; i++) {
lro = &sc->rq[i]->lro;
if (lro)
tcp_lro_free(lro);
}
}
#endif
int
oce_alloc_rx_bufs(struct oce_rq *rq, int count)
{
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
int i, rc;
struct oce_packet_desc *pd;
bus_dma_segment_t segs[6];
int nsegs, added = 0;
struct oce_nic_rqe *rqe;
pd_rxulp_db_t rxdb_reg;
uint32_t val = 0;
uint32_t oce_max_rq_posts = 64;
bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
for (i = 0; i < count; i++) {
pd = &rq->pckts[rq->ring->pidx];
pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
if (pd->mbuf == NULL) {
device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
break;
}
pd->mbuf->m_nextpkt = NULL;
pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
rc = bus_dmamap_load_mbuf_sg(rq->tag,
pd->map,
pd->mbuf,
segs, &nsegs, BUS_DMA_NOWAIT);
if (rc) {
m_free(pd->mbuf);
device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
break;
}
if (nsegs != 1) {
i--;
continue;
}
bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
RING_PUT(rq->ring, 1);
added++;
rq->pending++;
}
oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
if (added != 0) {
for (i = added / oce_max_rq_posts; i > 0; i--) {
rxdb_reg.bits.num_posted = oce_max_rq_posts;
rxdb_reg.bits.qid = rq->rq_id;
if(rq->islro) {
val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
val |= oce_max_rq_posts << 16;
OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
}else {
OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
}
added -= oce_max_rq_posts;
}
if (added > 0) {
rxdb_reg.bits.qid = rq->rq_id;
rxdb_reg.bits.num_posted = added;
if(rq->islro) {
val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
val |= added << 16;
OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
}else {
OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
}
}
}
return 0;
}
static void
oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
{
if (num_cqes) {
oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
if(!sc->enable_hwlro) {
if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
}else {
if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
oce_alloc_rx_bufs(rq, 64);
}
}
return;
}
uint16_t
oce_rq_handler_lro(void *arg)
{
struct oce_rq *rq = (struct oce_rq *)arg;
struct oce_cq *cq = rq->cq;
POCE_SOFTC sc = rq->parent;
struct nic_hwlro_singleton_cqe *cqe;
struct nic_hwlro_cqe_part2 *cqe2;
int num_cqes = 0;
LOCK(&rq->rx_lock);
bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
while (cqe->valid) {
if(cqe->cqe_type == 0) { /* singleton cqe */
/* we should not get singleton cqe after cqe1 on same rq */
if(rq->cqe_firstpart != NULL) {
device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
goto exit_rq_handler_lro;
}
if(cqe->error != 0) {
rq->rx_stats.rxcp_err++;
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
}
oce_rx_lro(rq, cqe, NULL);
rq->rx_stats.rx_compl++;
cqe->valid = 0;
RING_GET(cq->ring, 1);
num_cqes++;
if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
break;
}else if(cqe->cqe_type == 0x1) { /* first part */
/* we should not get cqe1 after cqe1 on same rq */
if(rq->cqe_firstpart != NULL) {
device_printf(sc->dev, "Got cqe1 after cqe1 \n");
goto exit_rq_handler_lro;
}
rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
RING_GET(cq->ring, 1);
}else if(cqe->cqe_type == 0x2) { /* second part */
cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
if(cqe2->error != 0) {
rq->rx_stats.rxcp_err++;
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
}
/* We should not get cqe2 without cqe1 */
if(rq->cqe_firstpart == NULL) {
device_printf(sc->dev, "Got cqe2 without cqe1 \n");
goto exit_rq_handler_lro;
}
oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
rq->rx_stats.rx_compl++;
rq->cqe_firstpart->valid = 0;
cqe2->valid = 0;
rq->cqe_firstpart = NULL;
RING_GET(cq->ring, 1);
num_cqes += 2;
if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
break;
}
bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
}
oce_check_rx_bufs(sc, num_cqes, rq);
exit_rq_handler_lro:
UNLOCK(&rq->rx_lock);
return 0;
}
/* Handle the Completion Queue for receive */
uint16_t
oce_rq_handler(void *arg)
{
struct epoch_tracker et;
struct oce_rq *rq = (struct oce_rq *)arg;
struct oce_cq *cq = rq->cq;
POCE_SOFTC sc = rq->parent;
struct oce_nic_rx_cqe *cqe;
int num_cqes = 0;
NET_EPOCH_ENTER(et);
if(rq->islro) {
oce_rq_handler_lro(arg);
NET_EPOCH_EXIT(et);
return 0;
}
LOCK(&rq->rx_lock);
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
while (cqe->u0.dw[2]) {
DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
if (cqe->u0.s.error == 0) {
oce_rx(rq, cqe);
} else {
rq->rx_stats.rxcp_err++;
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
/* Post L3/L4 errors to stack.*/
oce_rx(rq, cqe);
}
rq->rx_stats.rx_compl++;
cqe->u0.dw[2] = 0;
#if defined(INET6) || defined(INET)
if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
oce_rx_flush_lro(rq);
}
#endif
RING_GET(cq->ring, 1);
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe =
RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
num_cqes++;
if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
break;
}
#if defined(INET6) || defined(INET)
if (IF_LRO_ENABLED(sc))
oce_rx_flush_lro(rq);
#endif
oce_check_rx_bufs(sc, num_cqes, rq);
UNLOCK(&rq->rx_lock);
NET_EPOCH_EXIT(et);
return 0;
}
/*****************************************************************************
* Helper function prototypes in this file *
*****************************************************************************/
-static int
+static void
oce_attach_ifp(POCE_SOFTC sc)
{
sc->ifp = if_alloc(IFT_ETHER);
- if (!sc->ifp)
- return ENOMEM;
ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
if_setflags(sc->ifp, IFF_BROADCAST | IFF_MULTICAST);
if_setioctlfn(sc->ifp, oce_ioctl);
if_setstartfn(sc->ifp, oce_start);
if_setinitfn(sc->ifp, oce_init);
if_setmtu(sc->ifp, ETHERMTU);
if_setsoftc(sc->ifp, sc);
if_settransmitfn(sc->ifp, oce_multiq_start);
if_setqflushfn(sc->ifp, oce_multiq_flush);
if_initname(sc->ifp,
device_get_name(sc->dev), device_get_unit(sc->dev));
if_setsendqlen(sc->ifp, OCE_MAX_TX_DESC - 1);
if_setsendqready(sc->ifp);
if_sethwassist(sc->ifp, OCE_IF_HWASSIST);
if_sethwassistbits(sc->ifp, CSUM_TSO, 0);
if_sethwassistbits(sc->ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP), 0);
if_setcapabilities(sc->ifp, OCE_IF_CAPABILITIES);
if_setcapabilitiesbit(sc->ifp, IFCAP_HWCSUM, 0);
if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWFILTER, 0);
#if defined(INET6) || defined(INET)
if_setcapabilitiesbit(sc->ifp, IFCAP_TSO, 0);
if_setcapabilitiesbit(sc->ifp, IFCAP_LRO, 0);
if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWTSO, 0);
#endif
if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
if_setbaudrate(sc->ifp, IF_Gbps(10));
if_sethwtsomax(sc->ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(sc->ifp, OCE_MAX_TX_ELEMENTS);
if_sethwtsomaxsegsize(sc->ifp, 4096);
ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
-
- return 0;
}
static void
oce_add_vlan(void *arg, if_t ifp, uint16_t vtag)
{
POCE_SOFTC sc = if_getsoftc(ifp);
if (if_getsoftc(ifp) != arg)
return;
if ((vtag == 0) || (vtag > 4095))
return;
sc->vlan_tag[vtag] = 1;
sc->vlans_added++;
if (sc->vlans_added <= (sc->max_vlans + 1))
oce_vid_config(sc);
}
static void
oce_del_vlan(void *arg, if_t ifp, uint16_t vtag)
{
POCE_SOFTC sc = if_getsoftc(ifp);
if (if_getsoftc(ifp) != arg)
return;
if ((vtag == 0) || (vtag > 4095))
return;
sc->vlan_tag[vtag] = 0;
sc->vlans_added--;
oce_vid_config(sc);
}
/*
* A max of 64 vlans can be configured in BE. If the user configures
* more, place the card in vlan promiscuous mode.
*/
static int
oce_vid_config(POCE_SOFTC sc)
{
struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
uint16_t ntags = 0, i;
int status = 0;
if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
(if_getcapenable(sc->ifp) & IFCAP_VLAN_HWFILTER)) {
for (i = 0; i < MAX_VLANS; i++) {
if (sc->vlan_tag[i]) {
vtags[ntags].vtag = i;
ntags++;
}
}
if (ntags)
status = oce_config_vlan(sc, (uint8_t) sc->if_id,
vtags, ntags, 1, 0);
} else
status = oce_config_vlan(sc, (uint8_t) sc->if_id,
NULL, 0, 1, 1);
return status;
}
static void
oce_mac_addr_set(POCE_SOFTC sc)
{
uint32_t old_pmac_id = sc->pmac_id;
int status = 0;
status = bcmp((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
sc->macaddr.size_of_struct);
if (!status)
return;
status = oce_mbox_macaddr_add(sc, (uint8_t *)(if_getlladdr(sc->ifp)),
sc->if_id, &sc->pmac_id);
if (!status) {
status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
bcopy((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
sc->macaddr.size_of_struct);
}
if (status)
device_printf(sc->dev, "Failed update macaddress\n");
}
static int
oce_handle_passthrough(if_t ifp, caddr_t data)
{
POCE_SOFTC sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int rc = ENXIO;
char cookie[32] = {0};
void *priv_data = ifr_data_get_ptr(ifr);
void *ioctl_ptr;
uint32_t req_size;
struct mbx_hdr req;
OCE_DMA_MEM dma_mem;
if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
return EFAULT;
if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
return EINVAL;
ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
return EFAULT;
req_size = le32toh(req.u0.req.request_length);
if (req_size > 65536)
return EINVAL;
req_size += sizeof(struct mbx_hdr);
rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
if (rc)
return ENOMEM;
if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
rc = EFAULT;
goto dma_free;
}
rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
if (rc) {
rc = EIO;
goto dma_free;
}
if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size)) {
rc = EFAULT;
goto dma_free;
}
/*
firmware is filling all the attributes for this ioctl except
the driver version..so fill it
*/
if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
struct mbx_common_get_cntl_attr *fw_cmd =
(struct mbx_common_get_cntl_attr *)ioctl_ptr;
_Static_assert(sizeof(COMPONENT_REVISION) <=
sizeof(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str),
"driver version string too long");
rc = copyout(COMPONENT_REVISION,
fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
sizeof(COMPONENT_REVISION));
}
dma_free:
oce_dma_free(sc, &dma_mem);
return rc;
}
static void
oce_eqd_set_periodic(POCE_SOFTC sc)
{
struct oce_set_eqd set_eqd[OCE_MAX_EQ];
struct oce_aic_obj *aic;
struct oce_eq *eqo;
uint64_t now = 0, delta;
int eqd, i, num = 0;
uint32_t tx_reqs = 0, rxpkts = 0, pps;
struct oce_wq *wq;
struct oce_rq *rq;
#define ticks_to_msecs(t) (1000 * (t) / hz)
for (i = 0 ; i < sc->neqs; i++) {
eqo = sc->eq[i];
aic = &sc->aic_obj[i];
/* When setting the static eq delay from the user space */
if (!aic->enable) {
if (aic->ticks)
aic->ticks = 0;
eqd = aic->et_eqd;
goto modify_eqd;
}
if (i == 0) {
rq = sc->rq[0];
rxpkts = rq->rx_stats.rx_pkts;
} else
rxpkts = 0;
if (i + 1 < sc->nrqs) {
rq = sc->rq[i + 1];
rxpkts += rq->rx_stats.rx_pkts;
}
if (i < sc->nwqs) {
wq = sc->wq[i];
tx_reqs = wq->tx_stats.tx_reqs;
} else
tx_reqs = 0;
now = ticks;
if (!aic->ticks || now < aic->ticks ||
rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
aic->prev_rxpkts = rxpkts;
aic->prev_txreqs = tx_reqs;
aic->ticks = now;
continue;
}
delta = ticks_to_msecs(now - aic->ticks);
pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
(((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
eqd = (pps / 15000) << 2;
if (eqd < 8)
eqd = 0;
/* Make sure that the eq delay is in the known range */
eqd = min(eqd, aic->max_eqd);
eqd = max(eqd, aic->min_eqd);
aic->prev_rxpkts = rxpkts;
aic->prev_txreqs = tx_reqs;
aic->ticks = now;
modify_eqd:
if (eqd != aic->cur_eqd) {
set_eqd[num].delay_multiplier = (eqd * 65)/100;
set_eqd[num].eq_id = eqo->eq_id;
aic->cur_eqd = eqd;
num++;
}
}
/* Is there atleast one eq that needs to be modified? */
for(i = 0; i < num; i += 8) {
if((num - i) >=8 )
oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
else
oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
}
}
static void oce_detect_hw_error(POCE_SOFTC sc)
{
uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
uint32_t i;
if (sc->hw_error)
return;
if (IS_XE201(sc)) {
sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
}
} else {
ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
ue_low = (ue_low & ~ue_low_mask);
ue_high = (ue_high & ~ue_high_mask);
}
/* On certain platforms BE hardware can indicate spurious UEs.
* Allow the h/w to stop working completely in case of a real UE.
* Hence not setting the hw_error for UE detection.
*/
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sc->hw_error = TRUE;
device_printf(sc->dev, "Error detected in the card\n");
}
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
device_printf(sc->dev,
"ERR: sliport status 0x%x\n", sliport_status);
device_printf(sc->dev,
"ERR: sliport error1 0x%x\n", sliport_err1);
device_printf(sc->dev,
"ERR: sliport error2 0x%x\n", sliport_err2);
}
if (ue_low) {
for (i = 0; ue_low; ue_low >>= 1, i++) {
if (ue_low & 1)
device_printf(sc->dev, "UE: %s bit set\n",
ue_status_low_desc[i]);
}
}
if (ue_high) {
for (i = 0; ue_high; ue_high >>= 1, i++) {
if (ue_high & 1)
device_printf(sc->dev, "UE: %s bit set\n",
ue_status_hi_desc[i]);
}
}
}
static void
oce_local_timer(void *arg)
{
POCE_SOFTC sc = arg;
int i = 0;
oce_detect_hw_error(sc);
oce_refresh_nic_stats(sc);
oce_refresh_queue_stats(sc);
oce_mac_addr_set(sc);
/* TX Watch Dog*/
for (i = 0; i < sc->nwqs; i++)
oce_tx_restart(sc, sc->wq[i]);
/* calculate and set the eq delay for optimal interrupt rate */
if (IS_BE(sc) || IS_SH(sc))
oce_eqd_set_periodic(sc);
callout_reset(&sc->timer, hz, oce_local_timer, sc);
}
static void
oce_tx_compl_clean(POCE_SOFTC sc)
{
struct oce_wq *wq;
int i = 0, timeo = 0, num_wqes = 0;
int pending_txqs = sc->nwqs;
/* Stop polling for compls when HW has been silent for 10ms or
* hw_error or no outstanding completions expected
*/
do {
pending_txqs = sc->nwqs;
for_all_wq_queues(sc, wq, i) {
num_wqes = oce_wq_handler(wq);
if(num_wqes)
timeo = 0;
if(!wq->ring->num_used)
pending_txqs--;
}
if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
break;
DELAY(1000);
} while (TRUE);
for_all_wq_queues(sc, wq, i) {
while(wq->ring->num_used) {
LOCK(&wq->tx_compl_lock);
oce_process_tx_completion(wq);
UNLOCK(&wq->tx_compl_lock);
}
}
}
/* NOTE : This should only be called holding
* DEVICE_LOCK.
*/
static void
oce_if_deactivate(POCE_SOFTC sc)
{
int i;
struct oce_rq *rq;
struct oce_wq *wq;
struct oce_eq *eq;
if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
oce_tx_compl_clean(sc);
/* Stop intrs and finish any bottom halves pending */
oce_hw_intr_disable(sc);
/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
any other lock. So unlock device lock and require after
completing taskqueue_drain.
*/
UNLOCK(&sc->dev_lock);
for (i = 0; i < sc->intr_count; i++) {
if (sc->intrs[i].tq != NULL) {
taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
}
}
LOCK(&sc->dev_lock);
/* Delete RX queue in card with flush param */
oce_stop_rx(sc);
/* Invalidate any pending cq and eq entries*/
for_all_evnt_queues(sc, eq, i)
oce_drain_eq(eq);
for_all_rq_queues(sc, rq, i)
oce_drain_rq_cq(rq);
for_all_wq_queues(sc, wq, i)
oce_drain_wq_cq(wq);
/* But still we need to get MCC aync events.
So enable intrs and also arm first EQ
*/
oce_hw_intr_enable(sc);
oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
DELAY(10);
}
static void
oce_if_activate(POCE_SOFTC sc)
{
struct oce_eq *eq;
struct oce_rq *rq;
struct oce_wq *wq;
int i, rc = 0;
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING , 0);
oce_hw_intr_disable(sc);
oce_start_rx(sc);
for_all_rq_queues(sc, rq, i) {
rc = oce_start_rq(rq);
if (rc)
device_printf(sc->dev, "Unable to start RX\n");
}
for_all_wq_queues(sc, wq, i) {
rc = oce_start_wq(wq);
if (rc)
device_printf(sc->dev, "Unable to start TX\n");
}
for_all_evnt_queues(sc, eq, i)
oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
oce_hw_intr_enable(sc);
}
static void
process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
{
/* Update Link status */
if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
ASYNC_EVENT_LINK_UP) {
sc->link_status = ASYNC_EVENT_LINK_UP;
if_link_state_change(sc->ifp, LINK_STATE_UP);
} else {
sc->link_status = ASYNC_EVENT_LINK_DOWN;
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
}
static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
struct oce_async_evt_grp5_os2bmc *evt)
{
DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
if (evt->u.s.mgmt_enable)
sc->flags |= OCE_FLAGS_OS2BMC;
else
return;
sc->bmc_filt_mask = evt->u.s.arp_filter;
sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
}
static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
{
struct oce_async_event_grp5_pvid_state *gcqe;
struct oce_async_evt_grp5_os2bmc *bmccqe;
switch (cqe->u0.s.async_type) {
case ASYNC_EVENT_PVID_STATE:
/* GRP5 PVID */
gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
if (gcqe->enabled)
sc->pvid = gcqe->tag & VLAN_VID_MASK;
else
sc->pvid = 0;
break;
case ASYNC_EVENT_OS2BMC:
bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
oce_async_grp5_osbmc_process(sc, bmccqe);
break;
default:
break;
}
}
/* Handle the Completion Queue for the Mailbox/Async notifications */
uint16_t
oce_mq_handler(void *arg)
{
struct oce_mq *mq = (struct oce_mq *)arg;
POCE_SOFTC sc = mq->parent;
struct oce_cq *cq = mq->cq;
int num_cqes = 0, evt_type = 0, optype = 0;
struct oce_mq_cqe *cqe;
struct oce_async_cqe_link_state *acqe;
struct oce_async_event_qnq *dbgcqe;
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
while (cqe->u0.dw[3]) {
DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
if (cqe->u0.s.async_event) {
evt_type = cqe->u0.s.event_type;
optype = cqe->u0.s.async_type;
if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
/* Link status evt */
acqe = (struct oce_async_cqe_link_state *)cqe;
process_link_state(sc, acqe);
} else if (evt_type == ASYNC_EVENT_GRP5) {
oce_process_grp5_events(sc, cqe);
} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
optype == ASYNC_EVENT_DEBUG_QNQ) {
dbgcqe = (struct oce_async_event_qnq *)cqe;
if(dbgcqe->valid)
sc->qnqid = dbgcqe->vlan_tag;
sc->qnq_debug_event = TRUE;
}
}
cqe->u0.dw[3] = 0;
RING_GET(cq->ring, 1);
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
num_cqes++;
}
if (num_cqes)
oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
return 0;
}
static void
setup_max_queues_want(POCE_SOFTC sc)
{
/* Check if it is FLEX machine. Is so dont use RSS */
if ((sc->function_mode & FNM_FLEX10_MODE) ||
(sc->function_mode & FNM_UMC_MODE) ||
(sc->function_mode & FNM_VNIC_MODE) ||
(!is_rss_enabled(sc)) ||
IS_BE2(sc)) {
sc->nrqs = 1;
sc->nwqs = 1;
} else {
sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
}
if (IS_BE2(sc) && is_rss_enabled(sc))
sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
}
static void
update_queues_got(POCE_SOFTC sc)
{
if (is_rss_enabled(sc)) {
sc->nrqs = sc->intr_count + 1;
sc->nwqs = sc->intr_count;
} else {
sc->nrqs = 1;
sc->nwqs = 1;
}
if (IS_BE2(sc))
sc->nwqs = 1;
}
static int
oce_check_ipv6_ext_hdr(struct mbuf *m)
{
struct ether_header *eh = mtod(m, struct ether_header *);
caddr_t m_datatemp = m->m_data;
if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
m->m_data += sizeof(struct ether_header);
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
if((ip6->ip6_nxt != IPPROTO_TCP) && \
(ip6->ip6_nxt != IPPROTO_UDP)){
struct ip6_ext *ip6e = NULL;
m->m_data += sizeof(struct ip6_hdr);
ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
if(ip6e->ip6e_len == 0xff) {
m->m_data = m_datatemp;
return TRUE;
}
}
m->m_data = m_datatemp;
}
return FALSE;
}
static int
is_be3_a1(POCE_SOFTC sc)
{
if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
return TRUE;
}
return FALSE;
}
static struct mbuf *
oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
{
uint16_t vlan_tag = 0;
if(!M_WRITABLE(m))
return NULL;
/* Embed vlan tag in the packet if it is not part of it */
if(m->m_flags & M_VLANTAG) {
vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
m->m_flags &= ~M_VLANTAG;
}
/* if UMC, ignore vlan tag insertion and instead insert pvid */
if(sc->pvid) {
if(!vlan_tag)
vlan_tag = sc->pvid;
if (complete)
*complete = FALSE;
}
if(vlan_tag) {
m = ether_vlanencap(m, vlan_tag);
}
if(sc->qnqid) {
m = ether_vlanencap(m, sc->qnqid);
if (complete)
*complete = FALSE;
}
return m;
}
static int
oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
{
if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
oce_check_ipv6_ext_hdr(m)) {
return TRUE;
}
return FALSE;
}
static void
oce_get_config(POCE_SOFTC sc)
{
int rc = 0;
uint32_t max_rss = 0;
if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
max_rss = OCE_LEGACY_MODE_RSS;
else
max_rss = OCE_MAX_RSS;
if (!IS_BE(sc)) {
rc = oce_get_profile_config(sc, max_rss);
if (rc) {
sc->nwqs = OCE_MAX_WQ;
sc->nrssqs = max_rss;
sc->nrqs = sc->nrssqs + 1;
}
}
else { /* For BE3 don't rely on fw for determining the resources */
sc->nrssqs = max_rss;
sc->nrqs = sc->nrssqs + 1;
sc->nwqs = OCE_MAX_WQ;
sc->max_vlans = MAX_VLANFILTER_SIZE;
}
}
static void
oce_rdma_close(void)
{
if (oce_rdma_if != NULL) {
oce_rdma_if = NULL;
}
}
static void
oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
{
memcpy(macaddr, sc->macaddr.mac_addr, 6);
}
int
oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
{
POCE_SOFTC sc;
struct oce_dev_info di;
int i;
if ((rdma_info == NULL) || (rdma_if == NULL)) {
return -EINVAL;
}
if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
(rdma_if->size != OCE_RDMA_IF_SIZE)) {
return -ENXIO;
}
rdma_info->close = oce_rdma_close;
rdma_info->mbox_post = oce_mbox_post;
rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
rdma_info->get_mac_addr = oce_get_mac_addr;
oce_rdma_if = rdma_if;
sc = softc_head;
while (sc != NULL) {
if (oce_rdma_if->announce != NULL) {
memset(&di, 0, sizeof(di));
di.dev = sc->dev;
di.softc = sc;
di.ifp = sc->ifp;
di.db_bhandle = sc->db_bhandle;
di.db_btag = sc->db_btag;
di.db_page_size = 4096;
if (sc->flags & OCE_FLAGS_USING_MSIX) {
di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
} else if (sc->flags & OCE_FLAGS_USING_MSI) {
di.intr_mode = OCE_INTERRUPT_MODE_MSI;
} else {
di.intr_mode = OCE_INTERRUPT_MODE_INTX;
}
di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
di.msix.start_vector = sc->intr_count;
for (i=0; i<di.msix.num_vectors; i++) {
di.msix.vector_list[i] = sc->intrs[i].vector;
}
} else {
}
memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
di.vendor_id = pci_get_vendor(sc->dev);
di.dev_id = pci_get_device(sc->dev);
if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
di.flags |= OCE_RDMA_INFO_RDMA_SUPPORTED;
}
rdma_if->announce(&di);
sc = sc->next;
}
}
return 0;
}
static void
oce_read_env_variables( POCE_SOFTC sc )
{
char *value = NULL;
int rc = 0;
/* read if user wants to enable hwlro or swlro */
//value = getenv("oce_enable_hwlro");
if(value && IS_SH(sc)) {
sc->enable_hwlro = strtol(value, NULL, 10);
if(sc->enable_hwlro) {
rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
if(rc) {
device_printf(sc->dev, "no hardware lro support\n");
device_printf(sc->dev, "software lro enabled\n");
sc->enable_hwlro = 0;
}else {
device_printf(sc->dev, "hardware lro enabled\n");
oce_max_rsp_handled = 32;
}
}else {
device_printf(sc->dev, "software lro enabled\n");
}
}else {
sc->enable_hwlro = 0;
}
/* read mbuf size */
//value = getenv("oce_rq_buf_size");
if(value && IS_SH(sc)) {
oce_rq_buf_size = strtol(value, NULL, 10);
switch(oce_rq_buf_size) {
case 2048:
case 4096:
case 9216:
case 16384:
break;
default:
device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
oce_rq_buf_size = 2048;
}
}
return;
}
diff --git a/sys/dev/ppbus/if_plip.c b/sys/dev/ppbus/if_plip.c
index a9ef8dc016b7..c2adcb602f6b 100644
--- a/sys/dev/ppbus/if_plip.c
+++ b/sys/dev/ppbus/if_plip.c
@@ -1,843 +1,839 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1997 Poul-Henning Kamp
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* From Id: lpt.c,v 1.55.2.1 1996/11/12 09:08:38 phk Exp
*/
#include <sys/cdefs.h>
/*
* Parallel port TCP/IP interfaces added. I looked at the driver from
* MACH but this is a complete rewrite, and btw. incompatible, and it
* should perform better too. I have never run the MACH driver though.
*
* This driver sends two bytes (0x08, 0x00) in front of each packet,
* to allow us to distinguish another format later.
*
* Now added a Linux/Crynwr compatibility mode which is enabled using
* IF_LINK0 - Tim Wilkinson.
*
* TODO:
* Make HDLC/PPP mode, use IF_LLC1 to enable.
*
* Connect the two computers using a Laplink parallel cable to use this
* feature:
*
* +----------------------------------------+
* |A-name A-End B-End Descr. Port/Bit |
* +----------------------------------------+
* |DATA0 2 15 Data 0/0x01 |
* |-ERROR 15 2 1/0x08 |
* +----------------------------------------+
* |DATA1 3 13 Data 0/0x02 |
* |+SLCT 13 3 1/0x10 |
* +----------------------------------------+
* |DATA2 4 12 Data 0/0x04 |
* |+PE 12 4 1/0x20 |
* +----------------------------------------+
* |DATA3 5 10 Strobe 0/0x08 |
* |-ACK 10 5 1/0x40 |
* +----------------------------------------+
* |DATA4 6 11 Data 0/0x10 |
* |BUSY 11 6 1/~0x80 |
* +----------------------------------------+
* |GND 18-25 18-25 GND - |
* +----------------------------------------+
*
* Expect transfer-rates up to 75 kbyte/sec.
*
* If GCC could correctly grok
* register int port asm("edx")
* the code would be cleaner
*
* Poul-Henning Kamp <phk@freebsd.org>
*/
/*
* Update for ppbus, PLIP support only - Nicolas Souchu
*/
#include "opt_plip.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/netisr.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <net/bpf.h>
#include <dev/ppbus/ppbconf.h>
#include "ppbus_if.h"
#include <dev/ppbus/ppbio.h>
#ifndef LPMTU /* MTU for the lp# interfaces */
#define LPMTU 1500
#endif
#ifndef LPMAXSPIN1 /* DELAY factor for the lp# interfaces */
#define LPMAXSPIN1 8000 /* Spinning for remote intr to happen */
#endif
#ifndef LPMAXSPIN2 /* DELAY factor for the lp# interfaces */
#define LPMAXSPIN2 500 /* Spinning for remote handshake to happen */
#endif
#ifndef LPMAXERRS /* Max errors before !RUNNING */
#define LPMAXERRS 100
#endif
#define CLPIPHDRLEN 14 /* We send dummy ethernet addresses (two) + packet type in front of packet */
#define CLPIP_SHAKE 0x80 /* This bit toggles between nibble reception */
#define MLPIPHDRLEN CLPIPHDRLEN
#define LPIPHDRLEN 2 /* We send 0x08, 0x00 in front of packet */
#define LPIP_SHAKE 0x40 /* This bit toggles between nibble reception */
#if !defined(MLPIPHDRLEN) || LPIPHDRLEN > MLPIPHDRLEN
#define MLPIPHDRLEN LPIPHDRLEN
#endif
#define LPIPTBLSIZE 256 /* Size of octet translation table */
#define lprintf if (lptflag) printf
#ifdef PLIP_DEBUG
static int volatile lptflag = 1;
#else
static int volatile lptflag = 0;
#endif
struct lp_data {
struct ifnet *sc_ifp;
device_t sc_dev;
u_char *sc_ifbuf;
int sc_iferrs;
struct resource *res_irq;
void *sc_intr_cookie;
};
static struct mtx lp_tables_lock;
MTX_SYSINIT(lp_tables, &lp_tables_lock, "plip tables", MTX_DEF);
/* Tables for the lp# interface */
static u_char *txmith;
#define txmitl (txmith + (1 * LPIPTBLSIZE))
#define trecvh (txmith + (2 * LPIPTBLSIZE))
#define trecvl (txmith + (3 * LPIPTBLSIZE))
static u_char *ctxmith;
#define ctxmitl (ctxmith + (1 * LPIPTBLSIZE))
#define ctrecvh (ctxmith + (2 * LPIPTBLSIZE))
#define ctrecvl (ctxmith + (3 * LPIPTBLSIZE))
/* Functions for the lp# interface */
static int lpinittables(void);
static int lpioctl(if_t, u_long, caddr_t);
static int lpoutput(if_t, struct mbuf *, const struct sockaddr *,
struct route *);
static void lpstop(struct lp_data *);
static void lp_intr(void *);
static int lp_module_handler(module_t, int, void *);
#define DEVTOSOFTC(dev) \
((struct lp_data *)device_get_softc(dev))
static int
lp_module_handler(module_t mod, int what, void *arg)
{
switch (what) {
case MOD_UNLOAD:
mtx_lock(&lp_tables_lock);
if (txmith != NULL) {
free(txmith, M_DEVBUF);
txmith = NULL;
}
if (ctxmith != NULL) {
free(ctxmith, M_DEVBUF);
ctxmith = NULL;
}
mtx_unlock(&lp_tables_lock);
break;
case MOD_LOAD:
case MOD_QUIESCE:
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static void
lp_identify(driver_t *driver, device_t parent)
{
device_t dev;
dev = device_find_child(parent, "plip", -1);
if (!dev)
BUS_ADD_CHILD(parent, 0, "plip", -1);
}
static int
lp_probe(device_t dev)
{
device_set_desc(dev, "PLIP network interface");
return (0);
}
static int
lp_attach(device_t dev)
{
struct lp_data *lp = DEVTOSOFTC(dev);
if_t ifp;
int error, rid = 0;
lp->sc_dev = dev;
/*
* Reserve the interrupt resource. If we don't have one, the
* attach fails.
*/
lp->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE);
if (lp->res_irq == NULL) {
device_printf(dev, "cannot reserve interrupt, failed.\n");
return (ENXIO);
}
ifp = lp->sc_ifp = if_alloc(IFT_PARA);
- if (ifp == NULL) {
- return (ENOSPC);
- }
-
if_setsoftc(ifp, lp);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setmtu(ifp, LPMTU);
if_setflags(ifp, IFF_SIMPLEX | IFF_POINTOPOINT | IFF_MULTICAST);
if_setioctlfn(ifp, lpioctl);
if_setoutputfn(ifp, lpoutput);
if_setsendqlen(ifp, ifqmaxlen);
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
/*
* Attach our interrupt handler. It is only called while we
* own the ppbus.
*/
error = bus_setup_intr(dev, lp->res_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, lp_intr, lp, &lp->sc_intr_cookie);
if (error) {
bpfdetach(ifp);
if_detach(ifp);
bus_release_resource(dev, SYS_RES_IRQ, 0, lp->res_irq);
device_printf(dev, "Unable to register interrupt handler\n");
return (error);
}
return (0);
}
static int
lp_detach(device_t dev)
{
struct lp_data *sc = device_get_softc(dev);
device_t ppbus = device_get_parent(dev);
ppb_lock(ppbus);
lpstop(sc);
ppb_unlock(ppbus);
bpfdetach(sc->sc_ifp);
if_detach(sc->sc_ifp);
bus_teardown_intr(dev, sc->res_irq, sc->sc_intr_cookie);
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->res_irq);
return (0);
}
/*
* Build the translation tables for the LPIP (BSD unix) protocol.
* We don't want to calculate these nasties in our tight loop, so we
* precalculate them when we initialize.
*/
static int
lpinittables(void)
{
int i;
mtx_lock(&lp_tables_lock);
if (txmith == NULL)
txmith = malloc(4 * LPIPTBLSIZE, M_DEVBUF, M_NOWAIT);
if (txmith == NULL) {
mtx_unlock(&lp_tables_lock);
return (1);
}
if (ctxmith == NULL)
ctxmith = malloc(4 * LPIPTBLSIZE, M_DEVBUF, M_NOWAIT);
if (ctxmith == NULL) {
mtx_unlock(&lp_tables_lock);
return (1);
}
for (i = 0; i < LPIPTBLSIZE; i++) {
ctxmith[i] = (i & 0xF0) >> 4;
ctxmitl[i] = 0x10 | (i & 0x0F);
ctrecvh[i] = (i & 0x78) << 1;
ctrecvl[i] = (i & 0x78) >> 3;
}
for (i = 0; i < LPIPTBLSIZE; i++) {
txmith[i] = ((i & 0x80) >> 3) | ((i & 0x70) >> 4) | 0x08;
txmitl[i] = ((i & 0x08) << 1) | (i & 0x07);
trecvh[i] = ((~i) & 0x80) | ((i & 0x38) << 1);
trecvl[i] = (((~i) & 0x80) >> 4) | ((i & 0x38) >> 3);
}
mtx_unlock(&lp_tables_lock);
return (0);
}
static void
lpstop(struct lp_data *sc)
{
device_t ppbus = device_get_parent(sc->sc_dev);
ppb_assert_locked(ppbus);
ppb_wctr(ppbus, 0x00);
if_setdrvflagbits(sc->sc_ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
free(sc->sc_ifbuf, M_DEVBUF);
sc->sc_ifbuf = NULL;
/* IFF_UP is not set, try to release the bus anyway */
ppb_release_bus(ppbus, sc->sc_dev);
}
static int
lpinit_locked(if_t ifp)
{
struct lp_data *sc = if_getsoftc(ifp);
device_t dev = sc->sc_dev;
device_t ppbus = device_get_parent(dev);
int error;
ppb_assert_locked(ppbus);
error = ppb_request_bus(ppbus, dev, PPB_DONTWAIT);
if (error)
return (error);
/* Now IFF_UP means that we own the bus */
ppb_set_mode(ppbus, PPB_COMPATIBLE);
if (lpinittables()) {
ppb_release_bus(ppbus, dev);
return (ENOBUFS);
}
sc->sc_ifbuf = malloc(if_getmtu(sc->sc_ifp) + MLPIPHDRLEN,
M_DEVBUF, M_NOWAIT);
if (sc->sc_ifbuf == NULL) {
ppb_release_bus(ppbus, dev);
return (ENOBUFS);
}
ppb_wctr(ppbus, IRQENABLE);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
return (0);
}
/*
* Process an ioctl request.
*/
static int
lpioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct lp_data *sc = if_getsoftc(ifp);
device_t dev = sc->sc_dev;
device_t ppbus = device_get_parent(dev);
struct ifaddr *ifa = (struct ifaddr *)data;
struct ifreq *ifr = (struct ifreq *)data;
u_char *ptr;
int error;
switch (cmd) {
case SIOCAIFADDR:
case SIOCSIFADDR:
if (ifa->ifa_addr->sa_family != AF_INET)
return (EAFNOSUPPORT);
if_setflagbits(ifp, IFF_UP, 0);
/* FALLTHROUGH */
case SIOCSIFFLAGS:
error = 0;
ppb_lock(ppbus);
if ((!(if_getflags(ifp) & IFF_UP)) &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
lpstop(sc);
else if (((if_getflags(ifp) & IFF_UP)) &&
(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
error = lpinit_locked(ifp);
ppb_unlock(ppbus);
return (error);
case SIOCSIFMTU:
ppb_lock(ppbus);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
ptr = malloc(ifr->ifr_mtu + MLPIPHDRLEN, M_DEVBUF,
M_NOWAIT);
if (ptr == NULL) {
ppb_unlock(ppbus);
return (ENOBUFS);
}
if (sc->sc_ifbuf)
free(sc->sc_ifbuf, M_DEVBUF);
sc->sc_ifbuf = ptr;
}
if_setmtu(ifp, ifr->ifr_mtu);
ppb_unlock(ppbus);
break;
case SIOCGIFMTU:
ifr->ifr_mtu = if_getmtu(sc->sc_ifp);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifr == NULL) {
return (EAFNOSUPPORT); /* XXX */
}
switch (ifr->ifr_addr.sa_family) {
case AF_INET:
break;
default:
return (EAFNOSUPPORT);
}
break;
case SIOCGIFMEDIA:
/*
* No ifmedia support at this stage; maybe use it
* in future for eg. protocol selection.
*/
return (EINVAL);
default:
lprintf("LP:ioctl(0x%lx)\n", cmd);
return (EINVAL);
}
return (0);
}
static __inline int
clpoutbyte(u_char byte, int spin, device_t ppbus)
{
ppb_wdtr(ppbus, ctxmitl[byte]);
while (ppb_rstr(ppbus) & CLPIP_SHAKE)
if (--spin == 0) {
return (1);
}
ppb_wdtr(ppbus, ctxmith[byte]);
while (!(ppb_rstr(ppbus) & CLPIP_SHAKE))
if (--spin == 0) {
return (1);
}
return (0);
}
static __inline int
clpinbyte(int spin, device_t ppbus)
{
u_char c, cl;
while ((ppb_rstr(ppbus) & CLPIP_SHAKE))
if (!--spin) {
return (-1);
}
cl = ppb_rstr(ppbus);
ppb_wdtr(ppbus, 0x10);
while (!(ppb_rstr(ppbus) & CLPIP_SHAKE))
if (!--spin) {
return (-1);
}
c = ppb_rstr(ppbus);
ppb_wdtr(ppbus, 0x00);
return (ctrecvl[cl] | ctrecvh[c]);
}
static void
lptap(if_t ifp, struct mbuf *m)
{
u_int32_t af = AF_INET;
bpf_mtap2_if(ifp, &af, sizeof(af), m);
}
static void
lp_intr(void *arg)
{
struct lp_data *sc = arg;
device_t ppbus = device_get_parent(sc->sc_dev);
int len, j;
u_char *bp;
u_char c, cl;
struct mbuf *top;
ppb_assert_locked(ppbus);
if (if_getflags(sc->sc_ifp) & IFF_LINK0) {
/* Ack. the request */
ppb_wdtr(ppbus, 0x01);
/* Get the packet length */
j = clpinbyte(LPMAXSPIN2, ppbus);
if (j == -1)
goto err;
len = j;
j = clpinbyte(LPMAXSPIN2, ppbus);
if (j == -1)
goto err;
len = len + (j << 8);
if (len > if_getmtu(sc->sc_ifp) + MLPIPHDRLEN)
goto err;
bp = sc->sc_ifbuf;
while (len--) {
j = clpinbyte(LPMAXSPIN2, ppbus);
if (j == -1) {
goto err;
}
*bp++ = j;
}
/* Get and ignore checksum */
j = clpinbyte(LPMAXSPIN2, ppbus);
if (j == -1) {
goto err;
}
len = bp - sc->sc_ifbuf;
if (len <= CLPIPHDRLEN)
goto err;
sc->sc_iferrs = 0;
len -= CLPIPHDRLEN;
if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, len);
top = m_devget(sc->sc_ifbuf + CLPIPHDRLEN, len, 0, sc->sc_ifp,
0);
if (top) {
ppb_unlock(ppbus);
lptap(sc->sc_ifp, top);
M_SETFIB(top, if_getfib(sc->sc_ifp));
/* mbuf is free'd on failure. */
netisr_queue(NETISR_IP, top);
ppb_lock(ppbus);
}
return;
}
while ((ppb_rstr(ppbus) & LPIP_SHAKE)) {
len = if_getmtu(sc->sc_ifp) + LPIPHDRLEN;
bp = sc->sc_ifbuf;
while (len--) {
cl = ppb_rstr(ppbus);
ppb_wdtr(ppbus, 8);
j = LPMAXSPIN2;
while ((ppb_rstr(ppbus) & LPIP_SHAKE))
if (!--j)
goto err;
c = ppb_rstr(ppbus);
ppb_wdtr(ppbus, 0);
*bp++= trecvh[cl] | trecvl[c];
j = LPMAXSPIN2;
while (!((cl = ppb_rstr(ppbus)) & LPIP_SHAKE)) {
if (cl != c &&
(((cl = ppb_rstr(ppbus)) ^ 0xb8) & 0xf8) ==
(c & 0xf8))
goto end;
if (!--j)
goto err;
}
}
end:
len = bp - sc->sc_ifbuf;
if (len <= LPIPHDRLEN)
goto err;
sc->sc_iferrs = 0;
len -= LPIPHDRLEN;
if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, len);
top = m_devget(sc->sc_ifbuf + LPIPHDRLEN, len, 0, sc->sc_ifp,
0);
if (top) {
ppb_unlock(ppbus);
lptap(sc->sc_ifp, top);
M_SETFIB(top, if_getfib(sc->sc_ifp));
/* mbuf is free'd on failure. */
netisr_queue(NETISR_IP, top);
ppb_lock(ppbus);
}
}
return;
err:
ppb_wdtr(ppbus, 0);
lprintf("R");
if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
sc->sc_iferrs++;
/*
* We are not able to send receive anything for now,
* so stop wasting our time
*/
if (sc->sc_iferrs > LPMAXERRS) {
if_printf(sc->sc_ifp, "Too many errors, Going off-line.\n");
ppb_wctr(ppbus, 0x00);
if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
sc->sc_iferrs = 0;
}
}
static __inline int
lpoutbyte(u_char byte, int spin, device_t ppbus)
{
ppb_wdtr(ppbus, txmith[byte]);
while (!(ppb_rstr(ppbus) & LPIP_SHAKE))
if (--spin == 0)
return (1);
ppb_wdtr(ppbus, txmitl[byte]);
while (ppb_rstr(ppbus) & LPIP_SHAKE)
if (--spin == 0)
return (1);
return (0);
}
static int
lpoutput(if_t ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
struct lp_data *sc = if_getsoftc(ifp);
device_t dev = sc->sc_dev;
device_t ppbus = device_get_parent(dev);
int err;
struct mbuf *mm;
u_char *cp = "\0\0";
u_char chksum = 0;
int count = 0;
int i, len, spin;
/* We need a sensible value if we abort */
cp++;
ppb_lock(ppbus);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
err = 1; /* assume we're aborting because of an error */
/* Suspend (on laptops) or receive-errors might have taken us offline */
ppb_wctr(ppbus, IRQENABLE);
if (if_getflags(ifp) & IFF_LINK0) {
if (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) {
lprintf("&");
lp_intr(sc);
}
/* Alert other end to pending packet */
spin = LPMAXSPIN1;
ppb_wdtr(ppbus, 0x08);
while ((ppb_rstr(ppbus) & 0x08) == 0)
if (--spin == 0) {
goto nend;
}
/* Calculate length of packet, then send that */
count += 14; /* Ethernet header len */
mm = m;
for (mm = m; mm; mm = mm->m_next) {
count += mm->m_len;
}
if (clpoutbyte(count & 0xFF, LPMAXSPIN1, ppbus))
goto nend;
if (clpoutbyte((count >> 8) & 0xFF, LPMAXSPIN1, ppbus))
goto nend;
/* Send dummy ethernet header */
for (i = 0; i < 12; i++) {
if (clpoutbyte(i, LPMAXSPIN1, ppbus))
goto nend;
chksum += i;
}
if (clpoutbyte(0x08, LPMAXSPIN1, ppbus))
goto nend;
if (clpoutbyte(0x00, LPMAXSPIN1, ppbus))
goto nend;
chksum += 0x08 + 0x00; /* Add into checksum */
mm = m;
do {
cp = mtod(mm, u_char *);
len = mm->m_len;
while (len--) {
chksum += *cp;
if (clpoutbyte(*cp++, LPMAXSPIN2, ppbus))
goto nend;
}
} while ((mm = mm->m_next));
/* Send checksum */
if (clpoutbyte(chksum, LPMAXSPIN2, ppbus))
goto nend;
/* Go quiescent */
ppb_wdtr(ppbus, 0);
err = 0; /* No errors */
nend:
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (err) { /* if we didn't timeout... */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
lprintf("X");
} else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
lptap(ifp, m);
}
m_freem(m);
if (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) {
lprintf("^");
lp_intr(sc);
}
ppb_unlock(ppbus);
return (0);
}
if (ppb_rstr(ppbus) & LPIP_SHAKE) {
lprintf("&");
lp_intr(sc);
}
if (lpoutbyte(0x08, LPMAXSPIN1, ppbus))
goto end;
if (lpoutbyte(0x00, LPMAXSPIN2, ppbus))
goto end;
mm = m;
do {
cp = mtod(mm, u_char *);
len = mm->m_len;
while (len--)
if (lpoutbyte(*cp++, LPMAXSPIN2, ppbus))
goto end;
} while ((mm = mm->m_next));
err = 0; /* no errors were encountered */
end:
--cp;
ppb_wdtr(ppbus, txmitl[*cp] ^ 0x17);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (err) { /* if we didn't timeout... */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
lprintf("X");
} else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
lptap(ifp, m);
}
m_freem(m);
if (ppb_rstr(ppbus) & LPIP_SHAKE) {
lprintf("^");
lp_intr(sc);
}
ppb_unlock(ppbus);
return (0);
}
static device_method_t lp_methods[] = {
/* device interface */
DEVMETHOD(device_identify, lp_identify),
DEVMETHOD(device_probe, lp_probe),
DEVMETHOD(device_attach, lp_attach),
DEVMETHOD(device_detach, lp_detach),
{ 0, 0 }
};
static driver_t lp_driver = {
"plip",
lp_methods,
sizeof(struct lp_data),
};
DRIVER_MODULE(plip, ppbus, lp_driver, lp_module_handler, NULL);
MODULE_DEPEND(plip, ppbus, 1, 1, 1);
diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.c b/sys/dev/qlnx/qlnxe/qlnx_os.c
index 21d39e809cc8..6665894d058d 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_os.c
+++ b/sys/dev/qlnx/qlnxe/qlnx_os.c
@@ -1,8349 +1,8345 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: qlnx_os.c
* Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
*/
#include <sys/cdefs.h>
#include "qlnx_os.h"
#include "bcm_osal.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore.h"
#include "ecore_chain.h"
#include "ecore_status.h"
#include "ecore_hw.h"
#include "ecore_rt_defs.h"
#include "ecore_init_ops.h"
#include "ecore_int.h"
#include "ecore_cxt.h"
#include "ecore_spq.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
#include "ecore_dev_api.h"
#include "ecore_l2_api.h"
#include "ecore_mcp.h"
#include "ecore_hw_defs.h"
#include "mcp_public.h"
#include "ecore_iro.h"
#include "nvm_cfg.h"
#include "ecore_dbg_fw_funcs.h"
#include "ecore_iov_api.h"
#include "ecore_vf_api.h"
#include "qlnx_ioctl.h"
#include "qlnx_def.h"
#include "qlnx_ver.h"
#ifdef QLNX_ENABLE_IWARP
#include "qlnx_rdma.h"
#endif /* #ifdef QLNX_ENABLE_IWARP */
#ifdef CONFIG_ECORE_SRIOV
#include <sys/nv.h>
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
#endif /* #ifdef CONFIG_ECORE_SRIOV */
#include <sys/smp.h>
/*
* static functions
*/
/*
* ioctl related functions
*/
static void qlnx_add_sysctls(qlnx_host_t *ha);
/*
* main driver
*/
static void qlnx_release(qlnx_host_t *ha);
static void qlnx_fp_isr(void *arg);
static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
static void qlnx_init(void *arg);
static void qlnx_init_locked(qlnx_host_t *ha);
static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int qlnx_media_change(if_t ifp);
static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
static void qlnx_stop(qlnx_host_t *ha);
static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct mbuf **m_headp);
static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
static uint32_t qlnx_get_optics(qlnx_host_t *ha,
struct qlnx_link_output *if_link);
static int qlnx_transmit(if_t ifp, struct mbuf *mp);
static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp,
struct mbuf *mp);
static void qlnx_qflush(if_t ifp);
static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
static int qlnx_nic_setup(struct ecore_dev *cdev,
struct ecore_pf_params *func_params);
static int qlnx_nic_start(struct ecore_dev *cdev);
static int qlnx_slowpath_start(qlnx_host_t *ha);
static int qlnx_slowpath_stop(qlnx_host_t *ha);
static int qlnx_init_hw(qlnx_host_t *ha);
static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
char ver_str[VER_SIZE]);
static void qlnx_unload(qlnx_host_t *ha);
static int qlnx_load(qlnx_host_t *ha);
static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
uint32_t add_mac);
static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
uint32_t len);
static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
struct qlnx_rx_queue *rxq);
static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
int hwfn_index);
static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
int hwfn_index);
static void qlnx_timer(void *arg);
static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
static void qlnx_trigger_dump(qlnx_host_t *ha);
static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq);
static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq);
static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
int lro_enable);
static void qlnx_fp_taskqueue(void *context, int pending);
static void qlnx_sample_storm_stats(qlnx_host_t *ha);
static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
struct qlnx_agg_info *tpa);
static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
/*
* Hooks to the Operating Systems
*/
static int qlnx_pci_probe (device_t);
static int qlnx_pci_attach (device_t);
static int qlnx_pci_detach (device_t);
#ifndef QLNX_VF
#ifdef CONFIG_ECORE_SRIOV
static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
static void qlnx_iov_uninit(device_t dev);
static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
static void qlnx_initialize_sriov(qlnx_host_t *ha);
static void qlnx_pf_taskqueue(void *context, int pending);
static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
#endif /* #ifdef CONFIG_ECORE_SRIOV */
static device_method_t qlnx_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, qlnx_pci_probe),
DEVMETHOD(device_attach, qlnx_pci_attach),
DEVMETHOD(device_detach, qlnx_pci_detach),
#ifdef CONFIG_ECORE_SRIOV
DEVMETHOD(pci_iov_init, qlnx_iov_init),
DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
#endif /* #ifdef CONFIG_ECORE_SRIOV */
{ 0, 0 }
};
static driver_t qlnx_pci_driver = {
"ql", qlnx_pci_methods, sizeof (qlnx_host_t),
};
MODULE_VERSION(if_qlnxe,1);
DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0);
MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
#else
static device_method_t qlnxv_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, qlnx_pci_probe),
DEVMETHOD(device_attach, qlnx_pci_attach),
DEVMETHOD(device_detach, qlnx_pci_detach),
{ 0, 0 }
};
static driver_t qlnxv_pci_driver = {
"ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
};
MODULE_VERSION(if_qlnxev,1);
DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0);
MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
#endif /* #ifdef QLNX_VF */
MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
char qlnx_ver_str[VER_SIZE];
char qlnx_name_str[NAME_SIZE];
/*
* Some PCI Configuration Space Related Defines
*/
#ifndef PCI_VENDOR_QLOGIC
#define PCI_VENDOR_QLOGIC 0x1077
#endif
/* 40G Adapter QLE45xxx*/
#ifndef QLOGIC_PCI_DEVICE_ID_1634
#define QLOGIC_PCI_DEVICE_ID_1634 0x1634
#endif
/* 100G Adapter QLE45xxx*/
#ifndef QLOGIC_PCI_DEVICE_ID_1644
#define QLOGIC_PCI_DEVICE_ID_1644 0x1644
#endif
/* 25G Adapter QLE45xxx*/
#ifndef QLOGIC_PCI_DEVICE_ID_1656
#define QLOGIC_PCI_DEVICE_ID_1656 0x1656
#endif
/* 50G Adapter QLE45xxx*/
#ifndef QLOGIC_PCI_DEVICE_ID_1654
#define QLOGIC_PCI_DEVICE_ID_1654 0x1654
#endif
/* 10G/25G/40G Adapter QLE41xxx*/
#ifndef QLOGIC_PCI_DEVICE_ID_8070
#define QLOGIC_PCI_DEVICE_ID_8070 0x8070
#endif
/* SRIOV Device (All Speeds) Adapter QLE41xxx*/
#ifndef QLOGIC_PCI_DEVICE_ID_8090
#define QLOGIC_PCI_DEVICE_ID_8090 0x8090
#endif
SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"qlnxe driver parameters");
/* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
&qlnxe_queue_count, 0, "Multi-Queue queue count");
/*
* Note on RDMA personality setting
*
* Read the personality configured in NVRAM
* If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
* the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
* use the personality in NVRAM.
* Otherwise use t the personality configured in sysctl.
*
*/
#define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
#define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
#define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
#define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
#define QLNX_PERSONALITY_BITS_PER_FUNC 4
#define QLNX_PERSONALIY_MASK 0xF
/* RDMA configuration; 64bit field allows setting for 16 physical functions*/
static uint64_t qlnxe_rdma_configuration = 0x22222222;
SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
&qlnxe_rdma_configuration, 0, "RDMA Configuration");
int
qlnx_vf_device(qlnx_host_t *ha)
{
uint16_t device_id;
device_id = ha->device_id;
if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
return 0;
return -1;
}
static int
qlnx_valid_device(qlnx_host_t *ha)
{
uint16_t device_id;
device_id = ha->device_id;
#ifndef QLNX_VF
if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070))
return 0;
#else
if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
return 0;
#endif /* #ifndef QLNX_VF */
return -1;
}
#ifdef QLNX_ENABLE_IWARP
static int
qlnx_rdma_supported(struct qlnx_host *ha)
{
uint16_t device_id;
device_id = pci_get_device(ha->pci_dev);
if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070))
return (0);
return (-1);
}
#endif /* #ifdef QLNX_ENABLE_IWARP */
/*
* Name: qlnx_pci_probe
* Function: Validate the PCI device to be a QLA80XX device
*/
static int
qlnx_pci_probe(device_t dev)
{
snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
return (ENXIO);
}
switch (pci_get_device(dev)) {
#ifndef QLNX_VF
case QLOGIC_PCI_DEVICE_ID_1644:
device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
break;
case QLOGIC_PCI_DEVICE_ID_1634:
device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
break;
case QLOGIC_PCI_DEVICE_ID_1656:
device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
break;
case QLOGIC_PCI_DEVICE_ID_1654:
device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
break;
case QLOGIC_PCI_DEVICE_ID_8070:
device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
" Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
break;
#else
case QLOGIC_PCI_DEVICE_ID_8090:
device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic SRIOV PCI CNA (AH) "
"Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
break;
#endif /* #ifndef QLNX_VF */
default:
return (ENXIO);
}
#ifdef QLNX_ENABLE_IWARP
qlnx_rdma_init();
#endif /* #ifdef QLNX_ENABLE_IWARP */
return (BUS_PROBE_DEFAULT);
}
static uint16_t
qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq)
{
u16 hw_bd_cons;
u16 ecore_cons_idx;
hw_bd_cons = le16toh(*txq->hw_cons_ptr);
ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
return (hw_bd_cons - ecore_cons_idx);
}
static void
qlnx_sp_intr(void *arg)
{
struct ecore_hwfn *p_hwfn;
qlnx_host_t *ha;
int i;
p_hwfn = arg;
if (p_hwfn == NULL) {
printf("%s: spurious slowpath intr\n", __func__);
return;
}
ha = (qlnx_host_t *)p_hwfn->p_dev;
QL_DPRINT2(ha, "enter\n");
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (&ha->cdev.hwfns[i] == p_hwfn) {
taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
break;
}
}
QL_DPRINT2(ha, "exit\n");
return;
}
static void
qlnx_sp_taskqueue(void *context, int pending)
{
struct ecore_hwfn *p_hwfn;
p_hwfn = context;
if (p_hwfn != NULL) {
qlnx_sp_isr(p_hwfn);
}
return;
}
static int
qlnx_create_sp_taskqueues(qlnx_host_t *ha)
{
int i;
uint8_t tq_name[32];
for (i = 0; i < ha->cdev.num_hwfns; i++) {
struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
bzero(tq_name, sizeof (tq_name));
snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
if (ha->sp_taskqueue[i] == NULL)
return (-1);
taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
tq_name);
QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
}
return (0);
}
static void
qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
{
int i;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (ha->sp_taskqueue[i] != NULL) {
taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
taskqueue_free(ha->sp_taskqueue[i]);
}
}
return;
}
static void
qlnx_fp_taskqueue(void *context, int pending)
{
struct qlnx_fastpath *fp;
qlnx_host_t *ha;
if_t ifp;
fp = context;
if (fp == NULL)
return;
ha = (qlnx_host_t *)fp->edev;
ifp = ha->ifp;
if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (!drbr_empty(ifp, fp->tx_br)) {
if(mtx_trylock(&fp->tx_mtx)) {
#ifdef QLNX_TRACE_PERF_DATA
tx_pkts = fp->tx_pkts_transmitted;
tx_compl = fp->tx_pkts_completed;
#endif
qlnx_transmit_locked(ifp, fp, NULL);
#ifdef QLNX_TRACE_PERF_DATA
fp->tx_pkts_trans_fp +=
(fp->tx_pkts_transmitted - tx_pkts);
fp->tx_pkts_compl_fp +=
(fp->tx_pkts_completed - tx_compl);
#endif
mtx_unlock(&fp->tx_mtx);
}
}
}
QL_DPRINT2(ha, "exit \n");
return;
}
static int
qlnx_create_fp_taskqueues(qlnx_host_t *ha)
{
int i;
uint8_t tq_name[32];
struct qlnx_fastpath *fp;
for (i = 0; i < ha->num_rss; i++) {
fp = &ha->fp_array[i];
bzero(tq_name, sizeof (tq_name));
snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&fp->fp_taskqueue);
if (fp->fp_taskqueue == NULL)
return (-1);
taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
tq_name);
QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
}
return (0);
}
static void
qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
{
int i;
struct qlnx_fastpath *fp;
for (i = 0; i < ha->num_rss; i++) {
fp = &ha->fp_array[i];
if (fp->fp_taskqueue != NULL) {
taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
taskqueue_free(fp->fp_taskqueue);
fp->fp_taskqueue = NULL;
}
}
return;
}
static void
qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
{
int i;
struct qlnx_fastpath *fp;
for (i = 0; i < ha->num_rss; i++) {
fp = &ha->fp_array[i];
if (fp->fp_taskqueue != NULL) {
QLNX_UNLOCK(ha);
taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
QLNX_LOCK(ha);
}
}
return;
}
static void
qlnx_get_params(qlnx_host_t *ha)
{
if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
qlnxe_queue_count);
qlnxe_queue_count = 0;
}
return;
}
static void
qlnx_error_recovery_taskqueue(void *context, int pending)
{
qlnx_host_t *ha;
ha = context;
QL_DPRINT2(ha, "enter\n");
QLNX_LOCK(ha);
qlnx_stop(ha);
QLNX_UNLOCK(ha);
#ifdef QLNX_ENABLE_IWARP
qlnx_rdma_dev_remove(ha);
#endif /* #ifdef QLNX_ENABLE_IWARP */
qlnx_slowpath_stop(ha);
qlnx_slowpath_start(ha);
#ifdef QLNX_ENABLE_IWARP
qlnx_rdma_dev_add(ha);
#endif /* #ifdef QLNX_ENABLE_IWARP */
qlnx_init(ha);
callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
QL_DPRINT2(ha, "exit\n");
return;
}
static int
qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
{
uint8_t tq_name[32];
bzero(tq_name, sizeof (tq_name));
snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
taskqueue_thread_enqueue, &ha->err_taskqueue);
if (ha->err_taskqueue == NULL)
return (-1);
taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
return (0);
}
static void
qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
{
if (ha->err_taskqueue != NULL) {
taskqueue_drain(ha->err_taskqueue, &ha->err_task);
taskqueue_free(ha->err_taskqueue);
}
ha->err_taskqueue = NULL;
return;
}
/*
* Name: qlnx_pci_attach
* Function: attaches the device to the operating system
*/
static int
qlnx_pci_attach(device_t dev)
{
qlnx_host_t *ha = NULL;
uint32_t rsrc_len_reg __unused = 0;
uint32_t rsrc_len_dbells = 0;
uint32_t rsrc_len_msix __unused = 0;
int i;
uint32_t mfw_ver;
uint32_t num_sp_msix = 0;
uint32_t num_rdma_irqs = 0;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
}
memset(ha, 0, sizeof (qlnx_host_t));
ha->device_id = pci_get_device(dev);
if (qlnx_valid_device(ha) != 0) {
device_printf(dev, "device is not valid device\n");
return (ENXIO);
}
ha->pci_func = pci_get_function(dev);
ha->pci_dev = dev;
sx_init(&ha->hw_lock, "qlnx_hw_lock");
ha->flags.lock_init = 1;
pci_enable_busmaster(dev);
/*
* map the PCI BARs
*/
ha->reg_rid = PCIR_BAR(0);
ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
RF_ACTIVE);
if (ha->pci_reg == NULL) {
device_printf(dev, "unable to map BAR0\n");
goto qlnx_pci_attach_err;
}
rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
ha->reg_rid);
ha->dbells_rid = PCIR_BAR(2);
rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
SYS_RES_MEMORY,
ha->dbells_rid);
if (rsrc_len_dbells) {
ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ha->dbells_rid, RF_ACTIVE);
if (ha->pci_dbells == NULL) {
device_printf(dev, "unable to map BAR1\n");
goto qlnx_pci_attach_err;
}
ha->dbells_phys_addr = (uint64_t)
bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
ha->dbells_size = rsrc_len_dbells;
} else {
if (qlnx_vf_device(ha) != 0) {
device_printf(dev, " BAR1 size is zero\n");
goto qlnx_pci_attach_err;
}
}
ha->msix_rid = PCIR_BAR(4);
ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ha->msix_rid, RF_ACTIVE);
if (ha->msix_bar == NULL) {
device_printf(dev, "unable to map BAR2\n");
goto qlnx_pci_attach_err;
}
rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
ha->msix_rid);
ha->dbg_level = 0x0000;
QL_DPRINT1(ha, "\n\t\t\t"
"pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
" msix_avail = 0x%x "
"\n\t\t\t[ncpus = %d]\n",
ha->pci_dev, ha->pci_reg, rsrc_len_reg,
ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
mp_ncpus);
/*
* allocate dma tags
*/
if (qlnx_alloc_parent_dma_tag(ha))
goto qlnx_pci_attach_err;
if (qlnx_alloc_tx_dma_tag(ha))
goto qlnx_pci_attach_err;
if (qlnx_alloc_rx_dma_tag(ha))
goto qlnx_pci_attach_err;
if (qlnx_init_hw(ha) != 0)
goto qlnx_pci_attach_err;
ha->flags.hw_init = 1;
qlnx_get_params(ha);
if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
(qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
qlnxe_queue_count = QLNX_MAX_RSS;
}
/*
* Allocate MSI-x vectors
*/
if (qlnx_vf_device(ha) != 0) {
if (qlnxe_queue_count == 0)
ha->num_rss = QLNX_DEFAULT_RSS;
else
ha->num_rss = qlnxe_queue_count;
num_sp_msix = ha->cdev.num_hwfns;
} else {
uint8_t max_rxq;
uint8_t max_txq;
ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
if (max_rxq < max_txq)
ha->num_rss = max_rxq;
else
ha->num_rss = max_txq;
if (ha->num_rss > QLNX_MAX_VF_RSS)
ha->num_rss = QLNX_MAX_VF_RSS;
num_sp_msix = 0;
}
if (ha->num_rss > mp_ncpus)
ha->num_rss = mp_ncpus;
ha->num_tc = QLNX_MAX_TC;
ha->msix_count = pci_msix_count(dev);
#ifdef QLNX_ENABLE_IWARP
num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
#endif /* #ifdef QLNX_ENABLE_IWARP */
if (!ha->msix_count ||
(ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
ha->msix_count);
goto qlnx_pci_attach_err;
}
if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
else
ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
QL_DPRINT1(ha, "\n\t\t\t"
"pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
"\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
"\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
" msix_avail = 0x%x msix_alloc = 0x%x"
"\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
ha->pci_reg, rsrc_len_reg,
ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
if (pci_alloc_msix(dev, &ha->msix_count)) {
device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
ha->msix_count);
ha->msix_count = 0;
goto qlnx_pci_attach_err;
}
/*
* Initialize slow path interrupt and task queue
*/
if (num_sp_msix) {
if (qlnx_create_sp_taskqueues(ha) != 0)
goto qlnx_pci_attach_err;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
ha->sp_irq_rid[i] = i + 1;
ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ha->sp_irq_rid[i],
(RF_ACTIVE | RF_SHAREABLE));
if (ha->sp_irq[i] == NULL) {
device_printf(dev,
"could not allocate mbx interrupt\n");
goto qlnx_pci_attach_err;
}
if (bus_setup_intr(dev, ha->sp_irq[i],
(INTR_TYPE_NET | INTR_MPSAFE), NULL,
qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
device_printf(dev,
"could not setup slow path interrupt\n");
goto qlnx_pci_attach_err;
}
QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
" sp_irq %p sp_handle %p\n", p_hwfn,
ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
}
}
/*
* initialize fast path interrupt
*/
if (qlnx_create_fp_taskqueues(ha) != 0)
goto qlnx_pci_attach_err;
for (i = 0; i < ha->num_rss; i++) {
ha->irq_vec[i].rss_idx = i;
ha->irq_vec[i].ha = ha;
ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ha->irq_vec[i].irq_rid,
(RF_ACTIVE | RF_SHAREABLE));
if (ha->irq_vec[i].irq == NULL) {
device_printf(dev,
"could not allocate interrupt[%d] irq_rid = %d\n",
i, ha->irq_vec[i].irq_rid);
goto qlnx_pci_attach_err;
}
if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
device_printf(dev, "could not allocate tx_br[%d]\n", i);
goto qlnx_pci_attach_err;
}
}
if (qlnx_vf_device(ha) != 0) {
callout_init(&ha->qlnx_callout, 1);
ha->flags.callout_init = 1;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
goto qlnx_pci_attach_err;
if (ha->grcdump_size[i] == 0)
goto qlnx_pci_attach_err;
ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
i, ha->grcdump_size[i]);
ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
if (ha->grcdump[i] == NULL) {
device_printf(dev, "grcdump alloc[%d] failed\n", i);
goto qlnx_pci_attach_err;
}
if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
goto qlnx_pci_attach_err;
if (ha->idle_chk_size[i] == 0)
goto qlnx_pci_attach_err;
ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
i, ha->idle_chk_size[i]);
ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
if (ha->idle_chk[i] == NULL) {
device_printf(dev, "idle_chk alloc failed\n");
goto qlnx_pci_attach_err;
}
}
if (qlnx_create_error_recovery_taskqueue(ha) != 0)
goto qlnx_pci_attach_err;
}
if (qlnx_slowpath_start(ha) != 0)
goto qlnx_pci_attach_err;
else
ha->flags.slowpath_start = 1;
if (qlnx_vf_device(ha) != 0) {
if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
qlnx_mdelay(__func__, 1000);
qlnx_trigger_dump(ha);
goto qlnx_pci_attach_err0;
}
if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
qlnx_mdelay(__func__, 1000);
qlnx_trigger_dump(ha);
goto qlnx_pci_attach_err0;
}
} else {
struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
}
snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
FW_ENGINEERING_VERSION);
QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
ha->stormfw_ver, ha->mfw_ver);
qlnx_init_ifnet(dev, ha);
/*
* add sysctls
*/
qlnx_add_sysctls(ha);
qlnx_pci_attach_err0:
/*
* create ioctl device interface
*/
if (qlnx_vf_device(ha) != 0) {
if (qlnx_make_cdev(ha)) {
device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
goto qlnx_pci_attach_err;
}
#ifdef QLNX_ENABLE_IWARP
qlnx_rdma_dev_add(ha);
#endif /* #ifdef QLNX_ENABLE_IWARP */
}
#ifndef QLNX_VF
#ifdef CONFIG_ECORE_SRIOV
if (qlnx_vf_device(ha) != 0)
qlnx_initialize_sriov(ha);
#endif /* #ifdef CONFIG_ECORE_SRIOV */
#endif /* #ifdef QLNX_VF */
QL_DPRINT2(ha, "success\n");
return (0);
qlnx_pci_attach_err:
qlnx_release(ha);
return (ENXIO);
}
/*
* Name: qlnx_pci_detach
* Function: Unhooks the device from the operating system
*/
static int
qlnx_pci_detach(device_t dev)
{
qlnx_host_t *ha = NULL;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "%s: cannot get softc\n", __func__);
return (ENOMEM);
}
if (qlnx_vf_device(ha) != 0) {
#ifdef CONFIG_ECORE_SRIOV
int ret;
ret = pci_iov_detach(dev);
if (ret) {
device_printf(dev, "%s: SRIOV in use\n", __func__);
return (ret);
}
#endif /* #ifdef CONFIG_ECORE_SRIOV */
#ifdef QLNX_ENABLE_IWARP
if (qlnx_rdma_dev_remove(ha) != 0)
return (EBUSY);
#endif /* #ifdef QLNX_ENABLE_IWARP */
}
QLNX_LOCK(ha);
qlnx_stop(ha);
QLNX_UNLOCK(ha);
qlnx_release(ha);
return (0);
}
#ifdef QLNX_ENABLE_IWARP
static uint8_t
qlnx_get_personality(uint8_t pci_func)
{
uint8_t personality;
personality = (qlnxe_rdma_configuration >>
(pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
QLNX_PERSONALIY_MASK;
return (personality);
}
static void
qlnx_set_personality(qlnx_host_t *ha)
{
uint8_t personality;
personality = qlnx_get_personality(ha->pci_func);
switch (personality) {
case QLNX_PERSONALITY_DEFAULT:
device_printf(ha->pci_dev, "%s: DEFAULT\n",
__func__);
ha->personality = ECORE_PCI_DEFAULT;
break;
case QLNX_PERSONALITY_ETH_ONLY:
device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
__func__);
ha->personality = ECORE_PCI_ETH;
break;
case QLNX_PERSONALITY_ETH_IWARP:
device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
__func__);
ha->personality = ECORE_PCI_ETH_IWARP;
break;
case QLNX_PERSONALITY_ETH_ROCE:
device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
__func__);
ha->personality = ECORE_PCI_ETH_ROCE;
break;
}
return;
}
#endif /* #ifdef QLNX_ENABLE_IWARP */
static int
qlnx_init_hw(qlnx_host_t *ha)
{
int rval = 0;
struct ecore_hw_prepare_params params;
ha->cdev.ha = ha;
ecore_init_struct(&ha->cdev);
/* ha->dp_module = ECORE_MSG_PROBE |
ECORE_MSG_INTR |
ECORE_MSG_SP |
ECORE_MSG_LINK |
ECORE_MSG_SPQ |
ECORE_MSG_RDMA;
ha->dp_level = ECORE_LEVEL_VERBOSE;*/
//ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
ha->dp_level = ECORE_LEVEL_NOTICE;
//ha->dp_level = ECORE_LEVEL_VERBOSE;
ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
ha->cdev.regview = ha->pci_reg;
ha->personality = ECORE_PCI_DEFAULT;
if (qlnx_vf_device(ha) == 0) {
ha->cdev.b_is_vf = true;
if (ha->pci_dbells != NULL) {
ha->cdev.doorbells = ha->pci_dbells;
ha->cdev.db_phys_addr = ha->dbells_phys_addr;
ha->cdev.db_size = ha->dbells_size;
} else {
ha->pci_dbells = ha->pci_reg;
}
} else {
ha->cdev.doorbells = ha->pci_dbells;
ha->cdev.db_phys_addr = ha->dbells_phys_addr;
ha->cdev.db_size = ha->dbells_size;
#ifdef QLNX_ENABLE_IWARP
if (qlnx_rdma_supported(ha) == 0)
qlnx_set_personality(ha);
#endif /* #ifdef QLNX_ENABLE_IWARP */
}
QL_DPRINT2(ha, "%s: %s\n", __func__,
(ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
bzero(&params, sizeof (struct ecore_hw_prepare_params));
params.personality = ha->personality;
params.drv_resc_alloc = false;
params.chk_reg_fifo = false;
params.initiate_pf_flr = true;
params.epoch = 0;
ecore_hw_prepare(&ha->cdev, &params);
qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
ha, &ha->cdev, &ha->cdev.hwfns[0]);
return (rval);
}
static void
qlnx_release(qlnx_host_t *ha)
{
device_t dev;
int i;
dev = ha->pci_dev;
QL_DPRINT2(ha, "enter\n");
for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
if (ha->idle_chk[i] != NULL) {
free(ha->idle_chk[i], M_QLNXBUF);
ha->idle_chk[i] = NULL;
}
if (ha->grcdump[i] != NULL) {
free(ha->grcdump[i], M_QLNXBUF);
ha->grcdump[i] = NULL;
}
}
if (ha->flags.callout_init)
callout_drain(&ha->qlnx_callout);
if (ha->flags.slowpath_start) {
qlnx_slowpath_stop(ha);
}
if (ha->flags.hw_init)
ecore_hw_remove(&ha->cdev);
qlnx_del_cdev(ha);
if (ha->ifp != NULL)
ether_ifdetach(ha->ifp);
qlnx_free_tx_dma_tag(ha);
qlnx_free_rx_dma_tag(ha);
qlnx_free_parent_dma_tag(ha);
if (qlnx_vf_device(ha) != 0) {
qlnx_destroy_error_recovery_taskqueue(ha);
}
for (i = 0; i < ha->num_rss; i++) {
struct qlnx_fastpath *fp = &ha->fp_array[i];
if (ha->irq_vec[i].handle) {
(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
ha->irq_vec[i].handle);
}
if (ha->irq_vec[i].irq) {
(void)bus_release_resource(dev, SYS_RES_IRQ,
ha->irq_vec[i].irq_rid,
ha->irq_vec[i].irq);
}
qlnx_free_tx_br(ha, fp);
}
qlnx_destroy_fp_taskqueues(ha);
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (ha->sp_handle[i])
(void)bus_teardown_intr(dev, ha->sp_irq[i],
ha->sp_handle[i]);
if (ha->sp_irq[i])
(void) bus_release_resource(dev, SYS_RES_IRQ,
ha->sp_irq_rid[i], ha->sp_irq[i]);
}
qlnx_destroy_sp_taskqueues(ha);
if (ha->msix_count)
pci_release_msi(dev);
if (ha->flags.lock_init) {
sx_destroy(&ha->hw_lock);
}
if (ha->pci_reg)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
ha->pci_reg);
if (ha->dbells_size && ha->pci_dbells)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
ha->pci_dbells);
if (ha->msix_bar)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
ha->msix_bar);
QL_DPRINT2(ha, "exit\n");
return;
}
static void
qlnx_trigger_dump(qlnx_host_t *ha)
{
int i;
if (ha->ifp != NULL)
if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
QL_DPRINT2(ha, "enter\n");
if (qlnx_vf_device(ha) == 0)
return;
ha->error_recovery = 1;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
}
QL_DPRINT2(ha, "exit\n");
return;
}
static int
qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
{
int err, ret = 0;
qlnx_host_t *ha;
err = sysctl_handle_int(oidp, &ret, 0, req);
if (err || !req->newptr)
return (err);
if (ret == 1) {
ha = (qlnx_host_t *)arg1;
qlnx_trigger_dump(ha);
}
return (err);
}
static int
qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
{
int err, i, ret = 0, usecs = 0;
qlnx_host_t *ha;
struct ecore_hwfn *p_hwfn;
struct qlnx_fastpath *fp;
err = sysctl_handle_int(oidp, &usecs, 0, req);
if (err || !req->newptr || !usecs || (usecs > 255))
return (err);
ha = (qlnx_host_t *)arg1;
if (qlnx_vf_device(ha) == 0)
return (-1);
for (i = 0; i < ha->num_rss; i++) {
p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
fp = &ha->fp_array[i];
if (fp->txq[0]->handle != NULL) {
ret = ecore_set_queue_coalesce(p_hwfn, 0,
(uint16_t)usecs, fp->txq[0]->handle);
}
}
if (!ret)
ha->tx_coalesce_usecs = (uint8_t)usecs;
return (err);
}
static int
qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
{
int err, i, ret = 0, usecs = 0;
qlnx_host_t *ha;
struct ecore_hwfn *p_hwfn;
struct qlnx_fastpath *fp;
err = sysctl_handle_int(oidp, &usecs, 0, req);
if (err || !req->newptr || !usecs || (usecs > 255))
return (err);
ha = (qlnx_host_t *)arg1;
if (qlnx_vf_device(ha) == 0)
return (-1);
for (i = 0; i < ha->num_rss; i++) {
p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
fp = &ha->fp_array[i];
if (fp->rxq->handle != NULL) {
ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
0, fp->rxq->handle);
}
}
if (!ret)
ha->rx_coalesce_usecs = (uint8_t)usecs;
return (err);
}
static void
qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
struct sysctl_oid *ctx_oid;
ctx = device_get_sysctl_ctx(ha->pci_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
children = SYSCTL_CHILDREN(ctx_oid);
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "sp_interrupts",
CTLFLAG_RD, &ha->sp_interrupts,
"No. of slowpath interrupts");
return;
}
static void
qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
struct sysctl_oid_list *node_children;
struct sysctl_oid *ctx_oid;
int i, j;
uint8_t name_str[16];
ctx = device_get_sysctl_ctx(ha->pci_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
children = SYSCTL_CHILDREN(ctx_oid);
for (i = 0; i < ha->num_rss; i++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str), "%d", i);
ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
node_children = SYSCTL_CHILDREN(ctx_oid);
/* Tx Related */
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_processed",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
"No. of packets processed for transmission");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_freed",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
"No. of freed packets");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_transmitted",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
"No. of transmitted packets");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_completed",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
"No. of transmit completions");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_non_tso_pkts",
CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
"No. of non LSO transmited packets");
#ifdef QLNX_TRACE_PERF_DATA
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_trans_ctx",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
"No. of transmitted packets in transmit context");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_compl_ctx",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
"No. of transmit completions in transmit context");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_trans_fp",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
"No. of transmitted packets in taskqueue");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_compl_fp",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
"No. of transmit completions in taskqueue");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_pkts_compl_intr",
CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
"No. of transmit completions in interrupt ctx");
#endif
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_tso_pkts",
CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
"No. of LSO transmited packets");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_lso_wnd_min_len",
CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
"tx_lso_wnd_min_len");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_defrag",
CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
"tx_defrag");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tx_nsegs_gt_elem_left",
CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
"tx_nsegs_gt_elem_left");
SYSCTL_ADD_UINT(ctx, node_children,
OID_AUTO, "tx_tso_max_nsegs",
CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
SYSCTL_ADD_UINT(ctx, node_children,
OID_AUTO, "tx_tso_min_nsegs",
CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
SYSCTL_ADD_UINT(ctx, node_children,
OID_AUTO, "tx_tso_max_pkt_len",
CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
ha->fp_array[i].tx_tso_max_pkt_len,
"tx_tso_max_pkt_len");
SYSCTL_ADD_UINT(ctx, node_children,
OID_AUTO, "tx_tso_min_pkt_len",
CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
ha->fp_array[i].tx_tso_min_pkt_len,
"tx_tso_min_pkt_len");
for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str),
"tx_pkts_nseg_%02d", (j+1));
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, name_str, CTLFLAG_RD,
&ha->fp_array[i].tx_pkts[j], name_str);
}
#ifdef QLNX_TRACE_PERF_DATA
for (j = 0; j < 18; j++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str),
"tx_pkts_hist_%02d", (j+1));
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, name_str, CTLFLAG_RD,
&ha->fp_array[i].tx_pkts_hist[j], name_str);
}
for (j = 0; j < 5; j++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str),
"tx_comInt_%02d", (j+1));
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, name_str, CTLFLAG_RD,
&ha->fp_array[i].tx_comInt[j], name_str);
}
for (j = 0; j < 18; j++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str),
"tx_pkts_q_%02d", (j+1));
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, name_str, CTLFLAG_RD,
&ha->fp_array[i].tx_pkts_q[j], name_str);
}
#endif
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_nsegs_gt_elem_left",
CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
"err_tx_nsegs_gt_elem_left");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_dmamap_create",
CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
"err_tx_dmamap_create");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_defrag_dmamap_load",
CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
"err_tx_defrag_dmamap_load");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_non_tso_max_seg",
CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
"err_tx_non_tso_max_seg");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_dmamap_load",
CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
"err_tx_dmamap_load");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_defrag",
CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
"err_tx_defrag");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_free_pkt_null",
CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
"err_tx_free_pkt_null");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_tx_cons_idx_conflict",
CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
"err_tx_cons_idx_conflict");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "lro_cnt_64",
CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
"lro_cnt_64");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "lro_cnt_128",
CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
"lro_cnt_128");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "lro_cnt_256",
CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
"lro_cnt_256");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "lro_cnt_512",
CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
"lro_cnt_512");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "lro_cnt_1024",
CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
"lro_cnt_1024");
/* Rx Related */
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "rx_pkts",
CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
"No. of received packets");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tpa_start",
CTLFLAG_RD, &ha->fp_array[i].tpa_start,
"No. of tpa_start packets");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tpa_cont",
CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
"No. of tpa_cont packets");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "tpa_end",
CTLFLAG_RD, &ha->fp_array[i].tpa_end,
"No. of tpa_end packets");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_m_getcl",
CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
"err_m_getcl");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_m_getjcl",
CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
"err_m_getjcl");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_rx_hw_errors",
CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
"err_rx_hw_errors");
SYSCTL_ADD_QUAD(ctx, node_children,
OID_AUTO, "err_rx_alloc_errors",
CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
"err_rx_alloc_errors");
}
return;
}
static void
qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
struct sysctl_oid *ctx_oid;
ctx = device_get_sysctl_ctx(ha->pci_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
children = SYSCTL_CHILDREN(ctx_oid);
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "no_buff_discards",
CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
"No. of packets discarded due to lack of buffer");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "packet_too_big_discard",
CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
"No. of packets discarded because packet was too big");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "ttl0_discard",
CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
"ttl0_discard");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_ucast_bytes",
CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
"rx_ucast_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mcast_bytes",
CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
"rx_mcast_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_bcast_bytes",
CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
"rx_bcast_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_ucast_pkts",
CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
"rx_ucast_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mcast_pkts",
CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
"rx_mcast_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_bcast_pkts",
CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
"rx_bcast_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "mftag_filter_discards",
CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
"mftag_filter_discards");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "mac_filter_discards",
CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
"mac_filter_discards");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_ucast_bytes",
CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
"tx_ucast_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_mcast_bytes",
CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
"tx_mcast_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_bcast_bytes",
CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
"tx_bcast_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_ucast_pkts",
CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
"tx_ucast_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_mcast_pkts",
CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
"tx_mcast_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_bcast_pkts",
CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
"tx_bcast_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_err_drop_pkts",
CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
"tx_err_drop_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tpa_coalesced_pkts",
CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
"tpa_coalesced_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tpa_coalesced_events",
CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
"tpa_coalesced_events");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tpa_aborts_num",
CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
"tpa_aborts_num");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tpa_not_coalesced_pkts",
CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
"tpa_not_coalesced_pkts");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tpa_coalesced_bytes",
CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
"tpa_coalesced_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_64_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
"rx_64_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_65_to_127_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
"rx_65_to_127_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_128_to_255_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
"rx_128_to_255_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_256_to_511_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
"rx_256_to_511_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_512_to_1023_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
"rx_512_to_1023_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_1024_to_1518_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
"rx_1024_to_1518_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_1519_to_1522_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
"rx_1519_to_1522_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_1523_to_2047_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
"rx_1523_to_2047_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_2048_to_4095_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
"rx_2048_to_4095_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_4096_to_9216_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
"rx_4096_to_9216_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_9217_to_16383_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
"rx_9217_to_16383_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_crc_errors",
CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
"rx_crc_errors");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mac_crtl_frames",
CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
"rx_mac_crtl_frames");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_pause_frames",
CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
"rx_pause_frames");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_pfc_frames",
CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
"rx_pfc_frames");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_align_errors",
CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
"rx_align_errors");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_carrier_errors",
CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
"rx_carrier_errors");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_oversize_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
"rx_oversize_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_jabbers",
CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
"rx_jabbers");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_undersize_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
"rx_undersize_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_fragments",
CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
"rx_fragments");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_64_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
"tx_64_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_65_to_127_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
"tx_65_to_127_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_128_to_255_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
"tx_128_to_255_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_256_to_511_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
"tx_256_to_511_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_512_to_1023_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
"tx_512_to_1023_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_1024_to_1518_byte_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
"tx_1024_to_1518_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_1519_to_2047_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
"tx_1519_to_2047_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_2048_to_4095_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
"tx_2048_to_4095_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_4096_to_9216_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
"tx_4096_to_9216_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_9217_to_16383_byte_packets",
CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
"tx_9217_to_16383_byte_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_pause_frames",
CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
"tx_pause_frames");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_pfc_frames",
CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
"tx_pfc_frames");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_lpi_entry_count",
CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
"tx_lpi_entry_count");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_total_collisions",
CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
"tx_total_collisions");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "brb_truncates",
CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
"brb_truncates");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "brb_discards",
CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
"brb_discards");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mac_bytes",
CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
"rx_mac_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mac_uc_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
"rx_mac_uc_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mac_mc_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
"rx_mac_mc_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mac_bc_packets",
CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
"rx_mac_bc_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "rx_mac_frames_ok",
CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
"rx_mac_frames_ok");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_mac_bytes",
CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
"tx_mac_bytes");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_mac_uc_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
"tx_mac_uc_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_mac_mc_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
"tx_mac_mc_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_mac_bc_packets",
CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
"tx_mac_bc_packets");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "tx_mac_ctrl_frames",
CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
"tx_mac_ctrl_frames");
return;
}
static void
qlnx_add_sysctls(qlnx_host_t *ha)
{
device_t dev = ha->pci_dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
ctx = device_get_sysctl_ctx(dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
qlnx_add_fp_stats_sysctls(ha);
qlnx_add_sp_stats_sysctls(ha);
if (qlnx_vf_device(ha) != 0)
qlnx_add_hw_stats_sysctls(ha);
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
CTLFLAG_RD, qlnx_ver_str, 0,
"Driver Version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
CTLFLAG_RD, ha->stormfw_ver, 0,
"STORM Firmware Version");
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
CTLFLAG_RD, ha->mfw_ver, 0,
"Management Firmware Version");
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "personality", CTLFLAG_RD,
&ha->personality, ha->personality,
"\tpersonality = 0 => Ethernet Only\n"
"\tpersonality = 3 => Ethernet and RoCE\n"
"\tpersonality = 4 => Ethernet and iWARP\n"
"\tpersonality = 6 => Default in Shared Memory\n");
ha->dbg_level = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "debug", CTLFLAG_RW,
&ha->dbg_level, ha->dbg_level, "Debug Level");
ha->dp_level = 0x01;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "dp_level", CTLFLAG_RW,
&ha->dp_level, ha->dp_level, "DP Level");
ha->dbg_trace_lro_cnt = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
&ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
"Trace LRO Counts");
ha->dbg_trace_tso_pkt_len = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
&ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
"Trace TSO packet lengths");
ha->dp_module = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "dp_module", CTLFLAG_RW,
&ha->dp_module, ha->dp_module, "DP Module");
ha->err_inject = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "err_inject", CTLFLAG_RW,
&ha->err_inject, ha->err_inject, "Error Inject");
ha->storm_stats_enable = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
&ha->storm_stats_enable, ha->storm_stats_enable,
"Enable Storm Statistics Gathering");
ha->storm_stats_index = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "storm_stats_index", CTLFLAG_RD,
&ha->storm_stats_index, ha->storm_stats_index,
"Enable Storm Statistics Gathering Current Index");
ha->grcdump_taken = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "grcdump_taken", CTLFLAG_RD,
&ha->grcdump_taken, ha->grcdump_taken,
"grcdump_taken");
ha->idle_chk_taken = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
&ha->idle_chk_taken, ha->idle_chk_taken,
"idle_chk_taken");
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
&ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
"rx_coalesce_usecs");
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
&ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
"tx_coalesce_usecs");
SYSCTL_ADD_PROC(ctx, children,
OID_AUTO, "trigger_dump",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
SYSCTL_ADD_PROC(ctx, children,
OID_AUTO, "set_rx_coalesce_usecs",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)ha, 0, qlnx_set_rx_coalesce, "I",
"rx interrupt coalesce period microseconds");
SYSCTL_ADD_PROC(ctx, children,
OID_AUTO, "set_tx_coalesce_usecs",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)ha, 0, qlnx_set_tx_coalesce, "I",
"tx interrupt coalesce period microseconds");
ha->rx_pkt_threshold = 128;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
&ha->rx_pkt_threshold, ha->rx_pkt_threshold,
"No. of Rx Pkts to process at a time");
ha->rx_jumbo_buf_eq_mtu = 0;
SYSCTL_ADD_UINT(ctx, children,
OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
&ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
"== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
"otherwise Rx Jumbo buffers are set to >= MTU size\n");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
&ha->err_illegal_intr, "err_illegal_intr");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "err_fp_null", CTLFLAG_RD,
&ha->err_fp_null, "err_fp_null");
SYSCTL_ADD_QUAD(ctx, children,
OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
&ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
return;
}
/*****************************************************************************
* Operating System Network Interface Functions
*****************************************************************************/
static void
qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
{
uint16_t device_id;
if_t ifp;
ifp = ha->ifp = if_alloc(IFT_ETHER);
-
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
device_id = pci_get_device(ha->pci_dev);
if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
if_setbaudrate(ifp, IF_Gbps(40));
else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070))
if_setbaudrate(ifp, IF_Gbps(25));
else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
if_setbaudrate(ifp, IF_Gbps(50));
else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
if_setbaudrate(ifp, IF_Gbps(100));
if_setcapabilities(ifp, IFCAP_LINKSTATE);
if_setinitfn(ifp, qlnx_init);
if_setsoftc(ifp, ha);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, qlnx_ioctl);
if_settransmitfn(ifp, qlnx_transmit);
if_setqflushfn(ifp, qlnx_qflush);
if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
if_setsendqready(ifp);
if_setgetcounterfn(ifp, qlnx_get_counter);
ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
!ha->primary_mac[2] && !ha->primary_mac[3] &&
!ha->primary_mac[4] && !ha->primary_mac[5]) {
uint32_t rnd;
rnd = arc4random();
ha->primary_mac[0] = 0x00;
ha->primary_mac[1] = 0x0e;
ha->primary_mac[2] = 0x1e;
ha->primary_mac[3] = rnd & 0xFF;
ha->primary_mac[4] = (rnd >> 8) & 0xFF;
ha->primary_mac[5] = (rnd >> 16) & 0xFF;
}
ether_ifattach(ifp, ha->primary_mac);
bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
if_setcapabilities(ifp, IFCAP_HWCSUM);
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE -
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_sethwassist(ifp, CSUM_IP);
if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
if_sethwassistbits(ifp, CSUM_TSO, 0);
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
qlnx_media_status);
if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
} else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
ifmedia_add(&ha->media,
(IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
ifmedia_add(&ha->media,
(IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
ifmedia_add(&ha->media,
(IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
}
ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
QL_DPRINT2(ha, "exit\n");
return;
}
static void
qlnx_init_locked(qlnx_host_t *ha)
{
if_t ifp = ha->ifp;
QL_DPRINT1(ha, "Driver Initialization start \n");
qlnx_stop(ha);
if (qlnx_load(ha) == 0) {
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
#ifdef QLNX_ENABLE_IWARP
if (qlnx_vf_device(ha) != 0) {
qlnx_rdma_dev_open(ha);
}
#endif /* #ifdef QLNX_ENABLE_IWARP */
}
return;
}
static void
qlnx_init(void *arg)
{
qlnx_host_t *ha;
ha = (qlnx_host_t *)arg;
QL_DPRINT2(ha, "enter\n");
QLNX_LOCK(ha);
qlnx_init_locked(ha);
QLNX_UNLOCK(ha);
QL_DPRINT2(ha, "exit\n");
return;
}
static int
qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
{
struct ecore_filter_mcast *mcast;
struct ecore_dev *cdev;
int rc;
cdev = &ha->cdev;
mcast = &ha->ecore_mcast;
bzero(mcast, sizeof(struct ecore_filter_mcast));
if (add_mac)
mcast->opcode = ECORE_FILTER_ADD;
else
mcast->opcode = ECORE_FILTER_REMOVE;
mcast->num_mc_addrs = 1;
memcpy(mcast->mac, mac_addr, ETH_ALEN);
rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
return (rc);
}
static int
qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
{
int i;
for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
return 0; /* its been already added */
}
for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
if ((ha->mcast[i].addr[0] == 0) &&
(ha->mcast[i].addr[1] == 0) &&
(ha->mcast[i].addr[2] == 0) &&
(ha->mcast[i].addr[3] == 0) &&
(ha->mcast[i].addr[4] == 0) &&
(ha->mcast[i].addr[5] == 0)) {
if (qlnx_config_mcast_mac_addr(ha, mta, 1))
return (-1);
bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
ha->nmcast++;
return 0;
}
}
return 0;
}
static int
qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
{
int i;
for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
if (qlnx_config_mcast_mac_addr(ha, mta, 0))
return (-1);
ha->mcast[i].addr[0] = 0;
ha->mcast[i].addr[1] = 0;
ha->mcast[i].addr[2] = 0;
ha->mcast[i].addr[3] = 0;
ha->mcast[i].addr[4] = 0;
ha->mcast[i].addr[5] = 0;
ha->nmcast--;
return 0;
}
}
return 0;
}
/*
* Name: qls_hw_set_multi
* Function: Sets the Multicast Addresses provided the host O.S into the
* hardware (for the given interface)
*/
static void
qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
uint32_t add_mac)
{
int i;
for (i = 0; i < mcnt; i++) {
if (add_mac) {
if (qlnx_hw_add_mcast(ha, mta))
break;
} else {
if (qlnx_hw_del_mcast(ha, mta))
break;
}
mta += ETHER_HDR_LEN;
}
return;
}
static u_int
qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
uint8_t *mta = arg;
if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
return (0);
bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
return (1);
}
static int
qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
{
uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
if_t ifp = ha->ifp;
u_int mcnt;
if (qlnx_vf_device(ha) == 0)
return (0);
mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
QLNX_LOCK(ha);
qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
QLNX_UNLOCK(ha);
return (0);
}
static int
qlnx_set_promisc(qlnx_host_t *ha, int enabled)
{
int rc = 0;
uint8_t filter;
if (qlnx_vf_device(ha) == 0)
return (0);
filter = ha->filter;
if (enabled) {
filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
} else {
filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
}
rc = qlnx_set_rx_accept_filter(ha, filter);
return (rc);
}
static int
qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
{
int rc = 0;
uint8_t filter;
if (qlnx_vf_device(ha) == 0)
return (0);
filter = ha->filter;
if (enabled) {
filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
} else {
filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
}
rc = qlnx_set_rx_accept_filter(ha, filter);
return (rc);
}
static int
qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
int ret = 0, mask;
struct ifreq *ifr = (struct ifreq *)data;
#ifdef INET
struct ifaddr *ifa = (struct ifaddr *)data;
#endif
qlnx_host_t *ha;
ha = (qlnx_host_t *)if_getsoftc(ifp);
switch (cmd) {
case SIOCSIFADDR:
QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET) {
if_setflagbits(ifp, IFF_UP, 0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
QLNX_LOCK(ha);
qlnx_init_locked(ha);
QLNX_UNLOCK(ha);
}
QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
arp_ifinit(ifp, ifa);
break;
}
#endif
ether_ioctl(ifp, cmd, data);
break;
case SIOCSIFMTU:
QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
if (ifr->ifr_mtu > QLNX_MAX_MTU) {
ret = EINVAL;
} else {
QLNX_LOCK(ha);
if_setmtu(ifp, ifr->ifr_mtu);
ha->max_frame_size =
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
qlnx_init_locked(ha);
}
QLNX_UNLOCK(ha);
}
break;
case SIOCSIFFLAGS:
QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
QLNX_LOCK(ha);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_PROMISC) {
ret = qlnx_set_promisc(ha, ifp->if_flags & IFF_PROMISC);
} else if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_ALLMULTI) {
ret = qlnx_set_allmulti(ha, ifp->if_flags & IFF_ALLMULTI);
}
} else {
ha->max_frame_size = if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN;
qlnx_init_locked(ha);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
qlnx_stop(ha);
}
ha->if_flags = if_getflags(ifp);
QLNX_UNLOCK(ha);
break;
case SIOCADDMULTI:
QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (qlnx_set_multi(ha, 1))
ret = EINVAL;
}
break;
case SIOCDELMULTI:
QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (qlnx_set_multi(ha, 0))
ret = EINVAL;
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
if (mask & IFCAP_HWCSUM)
if_togglecapenable(ifp, IFCAP_HWCSUM);
if (mask & IFCAP_TSO4)
if_togglecapenable(ifp, IFCAP_TSO4);
if (mask & IFCAP_TSO6)
if_togglecapenable(ifp, IFCAP_TSO6);
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
QLNX_LOCK(ha);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
qlnx_init_locked(ha);
QLNX_UNLOCK(ha);
VLAN_CAPABILITIES(ifp);
break;
case SIOCGI2C:
{
struct ifi2creq i2c;
struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
struct ecore_ptt *p_ptt;
ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (ret)
break;
if ((i2c.len > sizeof (i2c.data)) ||
(i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
ret = EINVAL;
break;
}
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
ret = -1;
break;
}
ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
(ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
i2c.len, &i2c.data[0]);
ecore_ptt_release(p_hwfn, p_ptt);
if (ret) {
ret = -1;
break;
}
ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
len = %d addr = 0x%02x offset = 0x%04x \
data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
0x%02x 0x%02x 0x%02x\n",
ret, i2c.len, i2c.dev_addr, i2c.offset,
i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
break;
}
default:
QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
ret = ether_ioctl(ifp, cmd, data);
break;
}
return (ret);
}
static int
qlnx_media_change(if_t ifp)
{
qlnx_host_t *ha;
struct ifmedia *ifm;
int ret = 0;
ha = (qlnx_host_t *)if_getsoftc(ifp);
QL_DPRINT2(ha, "enter\n");
ifm = &ha->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
ret = EINVAL;
QL_DPRINT2(ha, "exit\n");
return (ret);
}
static void
qlnx_media_status(if_t ifp, struct ifmediareq *ifmr)
{
qlnx_host_t *ha;
ha = (qlnx_host_t *)if_getsoftc(ifp);
QL_DPRINT2(ha, "enter\n");
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (ha->link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |=
(IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
if (ha->if_link.link_partner_caps &
(QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
ifmr->ifm_active |=
(IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
}
QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
return;
}
static void
qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq)
{
u16 idx;
struct mbuf *mp;
bus_dmamap_t map;
int i;
// struct eth_tx_bd *tx_data_bd;
struct eth_tx_1st_bd *first_bd;
int nbds = 0;
idx = txq->sw_tx_cons;
mp = txq->sw_tx_ring[idx].mp;
map = txq->sw_tx_ring[idx].map;
if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
QL_DPRINT1(ha, "(mp == NULL) "
" tx_idx = 0x%x"
" ecore_prod_idx = 0x%x"
" ecore_cons_idx = 0x%x"
" hw_bd_cons = 0x%x"
" txq_db_last = 0x%x"
" elem_left = 0x%x\n",
fp->rss_id,
ecore_chain_get_prod_idx(&txq->tx_pbl),
ecore_chain_get_cons_idx(&txq->tx_pbl),
le16toh(*txq->hw_cons_ptr),
txq->tx_db.raw,
ecore_chain_get_elem_left(&txq->tx_pbl));
fp->err_tx_free_pkt_null++;
//DEBUG
qlnx_trigger_dump(ha);
return;
} else {
QLNX_INC_OPACKETS((ha->ifp));
QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ha->tx_tag, map);
fp->tx_pkts_freed++;
fp->tx_pkts_completed++;
m_freem(mp);
}
first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
nbds = first_bd->data.nbds;
// BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
for (i = 1; i < nbds; i++) {
/* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
// BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
}
txq->sw_tx_ring[idx].flags = 0;
txq->sw_tx_ring[idx].mp = NULL;
txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
return;
}
static void
qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq)
{
u16 hw_bd_cons;
u16 ecore_cons_idx;
uint16_t diff;
uint16_t idx, idx2;
hw_bd_cons = le16toh(*txq->hw_cons_ptr);
while (hw_bd_cons !=
(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
diff = hw_bd_cons - ecore_cons_idx;
if ((diff > TX_RING_SIZE) ||
QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
QL_DPRINT1(ha, "(diff = 0x%x) "
" tx_idx = 0x%x"
" ecore_prod_idx = 0x%x"
" ecore_cons_idx = 0x%x"
" hw_bd_cons = 0x%x"
" txq_db_last = 0x%x"
" elem_left = 0x%x\n",
diff,
fp->rss_id,
ecore_chain_get_prod_idx(&txq->tx_pbl),
ecore_chain_get_cons_idx(&txq->tx_pbl),
le16toh(*txq->hw_cons_ptr),
txq->tx_db.raw,
ecore_chain_get_elem_left(&txq->tx_pbl));
fp->err_tx_cons_idx_conflict++;
//DEBUG
qlnx_trigger_dump(ha);
}
idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
prefetch(txq->sw_tx_ring[idx].mp);
prefetch(txq->sw_tx_ring[idx2].mp);
qlnx_free_tx_pkt(ha, fp, txq);
txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
}
return;
}
static int
qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp)
{
int ret = 0;
struct qlnx_tx_queue *txq;
qlnx_host_t * ha;
uint16_t elem_left;
txq = fp->txq[0];
ha = (qlnx_host_t *)fp->edev;
if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
if(mp != NULL)
ret = drbr_enqueue(ifp, fp->tx_br, mp);
return (ret);
}
if(mp != NULL)
ret = drbr_enqueue(ifp, fp->tx_br, mp);
mp = drbr_peek(ifp, fp->tx_br);
while (mp != NULL) {
if (qlnx_send(ha, fp, &mp)) {
if (mp != NULL) {
drbr_putback(ifp, fp->tx_br, mp);
} else {
fp->tx_pkts_processed++;
drbr_advance(ifp, fp->tx_br);
}
goto qlnx_transmit_locked_exit;
} else {
drbr_advance(ifp, fp->tx_br);
fp->tx_pkts_transmitted++;
fp->tx_pkts_processed++;
}
mp = drbr_peek(ifp, fp->tx_br);
}
qlnx_transmit_locked_exit:
if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
< QLNX_TX_ELEM_MAX_THRESH))
(void)qlnx_tx_int(ha, fp, fp->txq[0]);
QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
return ret;
}
static int
qlnx_transmit(if_t ifp, struct mbuf *mp)
{
qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp);
struct qlnx_fastpath *fp;
int rss_id = 0, ret = 0;
#ifdef QLNX_TRACEPERF_DATA
uint64_t tx_pkts = 0, tx_compl = 0;
#endif
QL_DPRINT2(ha, "enter\n");
if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
ha->num_rss;
fp = &ha->fp_array[rss_id];
if (fp->tx_br == NULL) {
ret = EINVAL;
goto qlnx_transmit_exit;
}
if (mtx_trylock(&fp->tx_mtx)) {
#ifdef QLNX_TRACEPERF_DATA
tx_pkts = fp->tx_pkts_transmitted;
tx_compl = fp->tx_pkts_completed;
#endif
ret = qlnx_transmit_locked(ifp, fp, mp);
#ifdef QLNX_TRACEPERF_DATA
fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
#endif
mtx_unlock(&fp->tx_mtx);
} else {
if (mp != NULL && (fp->fp_taskqueue != NULL)) {
ret = drbr_enqueue(ifp, fp->tx_br, mp);
taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
}
}
qlnx_transmit_exit:
QL_DPRINT2(ha, "exit ret = %d\n", ret);
return ret;
}
static void
qlnx_qflush(if_t ifp)
{
int rss_id;
struct qlnx_fastpath *fp;
struct mbuf *mp;
qlnx_host_t *ha;
ha = (qlnx_host_t *)if_getsoftc(ifp);
QL_DPRINT2(ha, "enter\n");
for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
fp = &ha->fp_array[rss_id];
if (fp == NULL)
continue;
if (fp->tx_br) {
mtx_lock(&fp->tx_mtx);
while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
fp->tx_pkts_freed++;
m_freem(mp);
}
mtx_unlock(&fp->tx_mtx);
}
}
QL_DPRINT2(ha, "exit\n");
return;
}
static void
qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
{
uint32_t offset;
offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
bus_write_4(ha->pci_dbells, offset, value);
bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
return;
}
static uint32_t
qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
{
struct ether_vlan_header *eh = NULL;
struct ip *ip = NULL;
struct ip6_hdr *ip6 = NULL;
struct tcphdr *th = NULL;
uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
uint16_t etype = 0;
uint8_t buf[sizeof(struct ip6_hdr)];
eh = mtod(mp, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
etype = ntohs(eh->evl_proto);
} else {
ehdrlen = ETHER_HDR_LEN;
etype = ntohs(eh->evl_encap_proto);
}
switch (etype) {
case ETHERTYPE_IP:
ip = (struct ip *)(mp->m_data + ehdrlen);
ip_hlen = sizeof (struct ip);
if (mp->m_len < (ehdrlen + ip_hlen)) {
m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
ip = (struct ip *)buf;
}
th = (struct tcphdr *)(ip + 1);
offset = ip_hlen + ehdrlen + (th->th_off << 2);
break;
case ETHERTYPE_IPV6:
ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
ip_hlen = sizeof(struct ip6_hdr);
if (mp->m_len < (ehdrlen + ip_hlen)) {
m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
buf);
ip6 = (struct ip6_hdr *)buf;
}
th = (struct tcphdr *)(ip6 + 1);
offset = ip_hlen + ehdrlen + (th->th_off << 2);
break;
default:
break;
}
return (offset);
}
static __inline int
qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
uint32_t offset)
{
int i;
uint32_t sum, nbds_in_hdr = 1;
uint32_t window;
bus_dma_segment_t *s_seg;
/* If the header spans multiple segments, skip those segments */
if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
return (0);
i = 0;
while ((i < nsegs) && (offset >= segs->ds_len)) {
offset = offset - segs->ds_len;
segs++;
i++;
nbds_in_hdr++;
}
window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
nsegs = nsegs - i;
while (nsegs >= window) {
sum = 0;
s_seg = segs;
for (i = 0; i < window; i++){
sum += s_seg->ds_len;
s_seg++;
}
if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
fp->tx_lso_wnd_min_len++;
return (-1);
}
nsegs = nsegs - 1;
segs++;
}
return (0);
}
static int
qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
{
bus_dma_segment_t *segs;
bus_dmamap_t map = 0;
uint32_t nsegs = 0;
int ret = -1;
struct mbuf *m_head = *m_headp;
uint16_t idx = 0;
uint16_t elem_left;
uint8_t nbd = 0;
struct qlnx_tx_queue *txq;
struct eth_tx_1st_bd *first_bd;
struct eth_tx_2nd_bd *second_bd;
struct eth_tx_3rd_bd *third_bd;
struct eth_tx_bd *tx_data_bd;
int seg_idx = 0;
uint32_t nbds_in_hdr = 0;
uint32_t offset = 0;
#ifdef QLNX_TRACE_PERF_DATA
uint16_t bd_used;
#endif
QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
if (!ha->link_up)
return (-1);
first_bd = NULL;
second_bd = NULL;
third_bd = NULL;
tx_data_bd = NULL;
txq = fp->txq[0];
if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
QLNX_TX_ELEM_MIN_THRESH) {
fp->tx_nsegs_gt_elem_left++;
fp->err_tx_nsegs_gt_elem_left++;
return (ENOBUFS);
}
idx = txq->sw_tx_prod;
map = txq->sw_tx_ring[idx].map;
segs = txq->segs;
ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
BUS_DMA_NOWAIT);
if (ha->dbg_trace_tso_pkt_len) {
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
if (!fp->tx_tso_min_pkt_len) {
fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
} else {
if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
fp->tx_tso_min_pkt_len =
m_head->m_pkthdr.len;
if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
fp->tx_tso_max_pkt_len =
m_head->m_pkthdr.len;
}
}
}
if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
offset = qlnx_tcp_offset(ha, m_head);
if ((ret == EFBIG) ||
((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
(!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
qlnx_tso_check(fp, segs, nsegs, offset))))) {
struct mbuf *m;
QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
fp->tx_defrag++;
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
fp->err_tx_defrag++;
fp->tx_pkts_freed++;
m_freem(m_head);
*m_headp = NULL;
QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
return (ENOBUFS);
}
m_head = m;
*m_headp = m_head;
if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
segs, &nsegs, BUS_DMA_NOWAIT))) {
fp->err_tx_defrag_dmamap_load++;
QL_DPRINT1(ha,
"bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
ret, m_head->m_pkthdr.len);
fp->tx_pkts_freed++;
m_freem(m_head);
*m_headp = NULL;
return (ret);
}
if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
fp->err_tx_non_tso_max_seg++;
QL_DPRINT1(ha,
"(%d) nsegs too many for non-TSO [%d, %d]\n",
ret, nsegs, m_head->m_pkthdr.len);
fp->tx_pkts_freed++;
m_freem(m_head);
*m_headp = NULL;
return (ret);
}
if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
offset = qlnx_tcp_offset(ha, m_head);
} else if (ret) {
fp->err_tx_dmamap_load++;
QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
ret, m_head->m_pkthdr.len);
fp->tx_pkts_freed++;
m_freem(m_head);
*m_headp = NULL;
return (ret);
}
QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
if (ha->dbg_trace_tso_pkt_len) {
if (nsegs < QLNX_FP_MAX_SEGS)
fp->tx_pkts[(nsegs - 1)]++;
else
fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
}
#ifdef QLNX_TRACE_PERF_DATA
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
if(m_head->m_pkthdr.len <= 2048)
fp->tx_pkts_hist[0]++;
else if((m_head->m_pkthdr.len > 2048) &&
(m_head->m_pkthdr.len <= 4096))
fp->tx_pkts_hist[1]++;
else if((m_head->m_pkthdr.len > 4096) &&
(m_head->m_pkthdr.len <= 8192))
fp->tx_pkts_hist[2]++;
else if((m_head->m_pkthdr.len > 8192) &&
(m_head->m_pkthdr.len <= 12288 ))
fp->tx_pkts_hist[3]++;
else if((m_head->m_pkthdr.len > 11288) &&
(m_head->m_pkthdr.len <= 16394))
fp->tx_pkts_hist[4]++;
else if((m_head->m_pkthdr.len > 16384) &&
(m_head->m_pkthdr.len <= 20480))
fp->tx_pkts_hist[5]++;
else if((m_head->m_pkthdr.len > 20480) &&
(m_head->m_pkthdr.len <= 24576))
fp->tx_pkts_hist[6]++;
else if((m_head->m_pkthdr.len > 24576) &&
(m_head->m_pkthdr.len <= 28672))
fp->tx_pkts_hist[7]++;
else if((m_head->m_pkthdr.len > 28762) &&
(m_head->m_pkthdr.len <= 32768))
fp->tx_pkts_hist[8]++;
else if((m_head->m_pkthdr.len > 32768) &&
(m_head->m_pkthdr.len <= 36864))
fp->tx_pkts_hist[9]++;
else if((m_head->m_pkthdr.len > 36864) &&
(m_head->m_pkthdr.len <= 40960))
fp->tx_pkts_hist[10]++;
else if((m_head->m_pkthdr.len > 40960) &&
(m_head->m_pkthdr.len <= 45056))
fp->tx_pkts_hist[11]++;
else if((m_head->m_pkthdr.len > 45056) &&
(m_head->m_pkthdr.len <= 49152))
fp->tx_pkts_hist[12]++;
else if((m_head->m_pkthdr.len > 49512) &&
m_head->m_pkthdr.len <= 53248))
fp->tx_pkts_hist[13]++;
else if((m_head->m_pkthdr.len > 53248) &&
(m_head->m_pkthdr.len <= 57344))
fp->tx_pkts_hist[14]++;
else if((m_head->m_pkthdr.len > 53248) &&
(m_head->m_pkthdr.len <= 57344))
fp->tx_pkts_hist[15]++;
else if((m_head->m_pkthdr.len > 57344) &&
(m_head->m_pkthdr.len <= 61440))
fp->tx_pkts_hist[16]++;
else
fp->tx_pkts_hist[17]++;
}
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
bd_used = TX_RING_SIZE - elem_left;
if(bd_used <= 100)
fp->tx_pkts_q[0]++;
else if((bd_used > 100) && (bd_used <= 500))
fp->tx_pkts_q[1]++;
else if((bd_used > 500) && (bd_used <= 1000))
fp->tx_pkts_q[2]++;
else if((bd_used > 1000) && (bd_used <= 2000))
fp->tx_pkts_q[3]++;
else if((bd_used > 3000) && (bd_used <= 4000))
fp->tx_pkts_q[4]++;
else if((bd_used > 4000) && (bd_used <= 5000))
fp->tx_pkts_q[5]++;
else if((bd_used > 6000) && (bd_used <= 7000))
fp->tx_pkts_q[6]++;
else if((bd_used > 7000) && (bd_used <= 8000))
fp->tx_pkts_q[7]++;
else if((bd_used > 8000) && (bd_used <= 9000))
fp->tx_pkts_q[8]++;
else if((bd_used > 9000) && (bd_used <= 10000))
fp->tx_pkts_q[9]++;
else if((bd_used > 10000) && (bd_used <= 11000))
fp->tx_pkts_q[10]++;
else if((bd_used > 11000) && (bd_used <= 12000))
fp->tx_pkts_q[11]++;
else if((bd_used > 12000) && (bd_used <= 13000))
fp->tx_pkts_q[12]++;
else if((bd_used > 13000) && (bd_used <= 14000))
fp->tx_pkts_q[13]++;
else if((bd_used > 14000) && (bd_used <= 15000))
fp->tx_pkts_q[14]++;
else if((bd_used > 15000) && (bd_used <= 16000))
fp->tx_pkts_q[15]++;
else
fp->tx_pkts_q[16]++;
}
#endif /* end of QLNX_TRACE_PERF_DATA */
if ((nsegs + QLNX_TX_ELEM_RESERVE) >
(int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
" in chain[%d] trying to free packets\n",
nsegs, elem_left, fp->rss_id);
fp->tx_nsegs_gt_elem_left++;
(void)qlnx_tx_int(ha, fp, txq);
if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
ecore_chain_get_elem_left(&txq->tx_pbl))) {
QL_DPRINT1(ha,
"(%d, 0x%x) insuffient BDs in chain[%d]\n",
nsegs, elem_left, fp->rss_id);
fp->err_tx_nsegs_gt_elem_left++;
fp->tx_ring_full = 1;
if (ha->storm_stats_enable)
ha->storm_stats_gather = 1;
return (ENOBUFS);
}
}
bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
txq->sw_tx_ring[idx].mp = m_head;
first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
first_bd->data.bd_flags.bitfields =
1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
nbd++;
if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
first_bd->data.bd_flags.bitfields |=
(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
}
if (m_head->m_pkthdr.csum_flags &
(CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
first_bd->data.bd_flags.bitfields |=
(1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
}
if (m_head->m_flags & M_VLANTAG) {
first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
first_bd->data.bd_flags.bitfields |=
(1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
}
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
first_bd->data.bd_flags.bitfields |=
(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
first_bd->data.bd_flags.bitfields |=
(1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
nbds_in_hdr = 1;
if (offset == segs->ds_len) {
BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
segs++;
seg_idx++;
second_bd = (struct eth_tx_2nd_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(second_bd, 0, sizeof(*second_bd));
nbd++;
if (seg_idx < nsegs) {
BD_SET_UNMAP_ADDR_LEN(second_bd, \
(segs->ds_addr), (segs->ds_len));
segs++;
seg_idx++;
}
third_bd = (struct eth_tx_3rd_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(third_bd, 0, sizeof(*third_bd));
third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
third_bd->data.bitfields |=
(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
nbd++;
if (seg_idx < nsegs) {
BD_SET_UNMAP_ADDR_LEN(third_bd, \
(segs->ds_addr), (segs->ds_len));
segs++;
seg_idx++;
}
for (; seg_idx < nsegs; seg_idx++) {
tx_data_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_data_bd, 0, sizeof(*tx_data_bd));
BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
segs->ds_addr,\
segs->ds_len);
segs++;
nbd++;
}
} else if (offset < segs->ds_len) {
BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
second_bd = (struct eth_tx_2nd_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(second_bd, 0, sizeof(*second_bd));
BD_SET_UNMAP_ADDR_LEN(second_bd, \
(segs->ds_addr + offset),\
(segs->ds_len - offset));
nbd++;
segs++;
third_bd = (struct eth_tx_3rd_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(third_bd, 0, sizeof(*third_bd));
BD_SET_UNMAP_ADDR_LEN(third_bd, \
segs->ds_addr,\
segs->ds_len);
third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
third_bd->data.bitfields |=
(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
segs++;
nbd++;
for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
tx_data_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_data_bd, 0, sizeof(*tx_data_bd));
BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
segs->ds_addr,\
segs->ds_len);
segs++;
nbd++;
}
} else {
offset = offset - segs->ds_len;
segs++;
for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
if (offset)
nbds_in_hdr++;
tx_data_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_data_bd, 0, sizeof(*tx_data_bd));
if (second_bd == NULL) {
second_bd = (struct eth_tx_2nd_bd *)
tx_data_bd;
} else if (third_bd == NULL) {
third_bd = (struct eth_tx_3rd_bd *)
tx_data_bd;
}
if (offset && (offset < segs->ds_len)) {
BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
segs->ds_addr, offset);
tx_data_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_data_bd, 0,
sizeof(*tx_data_bd));
if (second_bd == NULL) {
second_bd =
(struct eth_tx_2nd_bd *)tx_data_bd;
} else if (third_bd == NULL) {
third_bd =
(struct eth_tx_3rd_bd *)tx_data_bd;
}
BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
(segs->ds_addr + offset), \
(segs->ds_len - offset));
nbd++;
offset = 0;
} else {
if (offset)
offset = offset - segs->ds_len;
BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
segs->ds_addr, segs->ds_len);
}
segs++;
nbd++;
}
if (third_bd == NULL) {
third_bd = (struct eth_tx_3rd_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(third_bd, 0, sizeof(*third_bd));
}
third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
third_bd->data.bitfields |=
(nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
}
fp->tx_tso_pkts++;
} else {
segs++;
for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
tx_data_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_data_bd, 0, sizeof(*tx_data_bd));
BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
segs->ds_len);
segs++;
nbd++;
}
first_bd->data.bitfields =
(m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
first_bd->data.bitfields =
htole16(first_bd->data.bitfields);
fp->tx_non_tso_pkts++;
}
first_bd->data.nbds = nbd;
if (ha->dbg_trace_tso_pkt_len) {
if (fp->tx_tso_max_nsegs < nsegs)
fp->tx_tso_max_nsegs = nsegs;
if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
fp->tx_tso_min_nsegs = nsegs;
}
txq->sw_tx_ring[idx].nsegs = nsegs;
txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
txq->tx_db.data.bd_prod =
htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
return (0);
}
static void
qlnx_stop(qlnx_host_t *ha)
{
if_t ifp = ha->ifp;
int i;
if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
/*
* We simply lock and unlock each fp->tx_mtx to
* propagate the if_drv_flags
* state to each tx thread
*/
QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
if (ha->state == QLNX_STATE_OPEN) {
for (i = 0; i < ha->num_rss; i++) {
struct qlnx_fastpath *fp = &ha->fp_array[i];
mtx_lock(&fp->tx_mtx);
mtx_unlock(&fp->tx_mtx);
if (fp->fp_taskqueue != NULL)
taskqueue_enqueue(fp->fp_taskqueue,
&fp->fp_task);
}
}
#ifdef QLNX_ENABLE_IWARP
if (qlnx_vf_device(ha) != 0) {
qlnx_rdma_dev_close(ha);
}
#endif /* #ifdef QLNX_ENABLE_IWARP */
qlnx_unload(ha);
return;
}
static int
qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
{
return(TX_RING_SIZE - 1);
}
uint8_t *
qlnx_get_mac_addr(qlnx_host_t *ha)
{
struct ecore_hwfn *p_hwfn;
unsigned char mac[ETHER_ADDR_LEN];
uint8_t p_is_forced;
p_hwfn = &ha->cdev.hwfns[0];
if (qlnx_vf_device(ha) != 0)
return (p_hwfn->hw_info.hw_mac_addr);
ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
true) {
device_printf(ha->pci_dev, "%s: p_is_forced = %d"
" mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
memcpy(ha->primary_mac, mac, ETH_ALEN);
}
return (ha->primary_mac);
}
static uint32_t
qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
{
uint32_t ifm_type = 0;
switch (if_link->media_type) {
case MEDIA_MODULE_FIBER:
case MEDIA_UNSPECIFIED:
if (if_link->speed == (100 * 1000))
ifm_type = QLNX_IFM_100G_SR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_SR4;
else if (if_link->speed == (25 * 1000))
ifm_type = QLNX_IFM_25G_SR;
else if (if_link->speed == (10 * 1000))
ifm_type = (IFM_10G_LR | IFM_10G_SR);
else if (if_link->speed == (1 * 1000))
ifm_type = (IFM_1000_SX | IFM_1000_LX);
break;
case MEDIA_DA_TWINAX:
if (if_link->speed == (100 * 1000))
ifm_type = QLNX_IFM_100G_CR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_CR4;
else if (if_link->speed == (25 * 1000))
ifm_type = QLNX_IFM_25G_CR;
else if (if_link->speed == (10 * 1000))
ifm_type = IFM_10G_TWINAX;
break;
default :
ifm_type = IFM_UNKNOWN;
break;
}
return (ifm_type);
}
/*****************************************************************************
* Interrupt Service Functions
*****************************************************************************/
static int
qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct mbuf *mp_head, uint16_t len)
{
struct mbuf *mp, *mpf, *mpl;
struct sw_rx_data *sw_rx_data;
struct qlnx_rx_queue *rxq;
uint16_t len_in_buffer;
rxq = fp->rxq;
mpf = mpl = mp = NULL;
while (len) {
rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
mp = sw_rx_data->data;
if (mp == NULL) {
QL_DPRINT1(ha, "mp = NULL\n");
fp->err_rx_mp_null++;
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
if (mpf != NULL)
m_freem(mpf);
return (-1);
}
bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
BUS_DMASYNC_POSTREAD);
if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
QL_DPRINT1(ha, "New buffer allocation failed, dropping"
" incoming packet and reusing its buffer\n");
qlnx_reuse_rx_data(rxq);
fp->err_rx_alloc_errors++;
if (mpf != NULL)
m_freem(mpf);
return (-1);
}
ecore_chain_consume(&rxq->rx_bd_ring);
if (len > rxq->rx_buf_size)
len_in_buffer = rxq->rx_buf_size;
else
len_in_buffer = len;
len = len - len_in_buffer;
mp->m_flags &= ~M_PKTHDR;
mp->m_next = NULL;
mp->m_len = len_in_buffer;
if (mpf == NULL)
mpf = mpl = mp;
else {
mpl->m_next = mp;
mpl = mp;
}
}
if (mpf != NULL)
mp_head->m_next = mpf;
return (0);
}
static void
qlnx_tpa_start(qlnx_host_t *ha,
struct qlnx_fastpath *fp,
struct qlnx_rx_queue *rxq,
struct eth_fast_path_rx_tpa_start_cqe *cqe)
{
uint32_t agg_index;
if_t ifp = ha->ifp;
struct mbuf *mp;
struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
struct sw_rx_data *sw_rx_data;
dma_addr_t addr;
bus_dmamap_t map;
struct eth_rx_bd *rx_bd;
int i;
uint8_t hash_type;
agg_index = cqe->tpa_agg_index;
QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
\t type = 0x%x\n \
\t bitfields = 0x%x\n \
\t seg_len = 0x%x\n \
\t pars_flags = 0x%x\n \
\t vlan_tag = 0x%x\n \
\t rss_hash = 0x%x\n \
\t len_on_first_bd = 0x%x\n \
\t placement_offset = 0x%x\n \
\t tpa_agg_index = 0x%x\n \
\t header_len = 0x%x\n \
\t ext_bd_len_list[0] = 0x%x\n \
\t ext_bd_len_list[1] = 0x%x\n \
\t ext_bd_len_list[2] = 0x%x\n \
\t ext_bd_len_list[3] = 0x%x\n \
\t ext_bd_len_list[4] = 0x%x\n",
fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
cqe->pars_flags.flags, cqe->vlan_tag,
cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
cqe->tpa_agg_index, cqe->header_len,
cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
cqe->ext_bd_len_list[4]);
if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
fp->err_rx_tpa_invalid_agg_num++;
return;
}
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
mp = sw_rx_data->data;
QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
if (mp == NULL) {
QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
fp->err_rx_mp_null++;
rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
return;
}
if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
" flags = %x, dropping incoming packet\n", fp->rss_id,
rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
fp->err_rx_hw_errors++;
qlnx_reuse_rx_data(rxq);
QLNX_INC_IERRORS(ifp);
return;
}
if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
" dropping incoming packet and reusing its buffer\n",
fp->rss_id);
fp->err_rx_alloc_errors++;
QLNX_INC_IQDROPS(ifp);
/*
* Load the tpa mbuf into the rx ring and save the
* posted mbuf
*/
map = sw_rx_data->map;
addr = sw_rx_data->dma_addr;
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
rxq->tpa_info[agg_index].rx_buf.data = mp;
rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
rxq->tpa_info[agg_index].rx_buf.map = map;
rx_bd = (struct eth_rx_bd *)
ecore_chain_produce(&rxq->rx_bd_ring);
rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
BUS_DMASYNC_PREREAD);
rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
ecore_chain_consume(&rxq->rx_bd_ring);
/* Now reuse any buffers posted in ext_bd_len_list */
for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
if (cqe->ext_bd_len_list[i] == 0)
break;
qlnx_reuse_rx_data(rxq);
}
rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
return;
}
if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
" dropping incoming packet and reusing its buffer\n",
fp->rss_id);
QLNX_INC_IQDROPS(ifp);
/* if we already have mbuf head in aggregation free it */
if (rxq->tpa_info[agg_index].mpf) {
m_freem(rxq->tpa_info[agg_index].mpf);
rxq->tpa_info[agg_index].mpl = NULL;
}
rxq->tpa_info[agg_index].mpf = mp;
rxq->tpa_info[agg_index].mpl = NULL;
rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
ecore_chain_consume(&rxq->rx_bd_ring);
/* Now reuse any buffers posted in ext_bd_len_list */
for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
if (cqe->ext_bd_len_list[i] == 0)
break;
qlnx_reuse_rx_data(rxq);
}
rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
return;
}
/*
* first process the ext_bd_len_list
* if this fails then we simply drop the packet
*/
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
if (cqe->ext_bd_len_list[i] == 0)
break;
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
BUS_DMASYNC_POSTREAD);
mpc = sw_rx_data->data;
if (mpc == NULL) {
QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
fp->err_rx_mp_null++;
if (mpf != NULL)
m_freem(mpf);
mpf = mpl = NULL;
rxq->tpa_info[agg_index].agg_state =
QLNX_AGG_STATE_ERROR;
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
continue;
}
if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
" dropping incoming packet and reusing its"
" buffer\n", fp->rss_id);
qlnx_reuse_rx_data(rxq);
if (mpf != NULL)
m_freem(mpf);
mpf = mpl = NULL;
rxq->tpa_info[agg_index].agg_state =
QLNX_AGG_STATE_ERROR;
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
continue;
}
mpc->m_flags &= ~M_PKTHDR;
mpc->m_next = NULL;
mpc->m_len = cqe->ext_bd_len_list[i];
if (mpf == NULL) {
mpf = mpl = mpc;
} else {
mpl->m_len = ha->rx_buf_size;
mpl->m_next = mpc;
mpl = mpc;
}
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
}
if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
" incoming packet and reusing its buffer\n",
fp->rss_id);
QLNX_INC_IQDROPS(ifp);
rxq->tpa_info[agg_index].mpf = mp;
rxq->tpa_info[agg_index].mpl = NULL;
return;
}
rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
if (mpf != NULL) {
mp->m_len = ha->rx_buf_size;
mp->m_next = mpf;
rxq->tpa_info[agg_index].mpf = mp;
rxq->tpa_info[agg_index].mpl = mpl;
} else {
mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
rxq->tpa_info[agg_index].mpf = mp;
rxq->tpa_info[agg_index].mpl = mp;
mp->m_next = NULL;
}
mp->m_flags |= M_PKTHDR;
/* assign packet to this interface interface */
mp->m_pkthdr.rcvif = ifp;
/* assume no hardware checksum has complated */
mp->m_pkthdr.csum_flags = 0;
//mp->m_pkthdr.flowid = fp->rss_id;
mp->m_pkthdr.flowid = cqe->rss_hash;
hash_type = cqe->bitfields &
(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
switch (hash_type) {
case RSS_HASH_TYPE_IPV4:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
break;
case RSS_HASH_TYPE_TCP_IPV4:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
break;
case RSS_HASH_TYPE_IPV6:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
break;
case RSS_HASH_TYPE_TCP_IPV6:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
break;
default:
M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
break;
}
mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
mp->m_pkthdr.csum_data = 0xFFFF;
if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
mp->m_flags |= M_VLANTAG;
}
rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
fp->rss_id, rxq->tpa_info[agg_index].agg_state,
rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
return;
}
static void
qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_rx_queue *rxq,
struct eth_fast_path_rx_tpa_cont_cqe *cqe)
{
struct sw_rx_data *sw_rx_data;
int i;
struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
struct mbuf *mp;
uint32_t agg_index;
QL_DPRINT7(ha, "[%d]: enter\n \
\t type = 0x%x\n \
\t tpa_agg_index = 0x%x\n \
\t len_list[0] = 0x%x\n \
\t len_list[1] = 0x%x\n \
\t len_list[2] = 0x%x\n \
\t len_list[3] = 0x%x\n \
\t len_list[4] = 0x%x\n \
\t len_list[5] = 0x%x\n",
fp->rss_id, cqe->type, cqe->tpa_agg_index,
cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
agg_index = cqe->tpa_agg_index;
if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
fp->err_rx_tpa_invalid_agg_num++;
return;
}
for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
if (cqe->len_list[i] == 0)
break;
if (rxq->tpa_info[agg_index].agg_state !=
QLNX_AGG_STATE_START) {
qlnx_reuse_rx_data(rxq);
continue;
}
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
BUS_DMASYNC_POSTREAD);
mpc = sw_rx_data->data;
if (mpc == NULL) {
QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
fp->err_rx_mp_null++;
if (mpf != NULL)
m_freem(mpf);
mpf = mpl = NULL;
rxq->tpa_info[agg_index].agg_state =
QLNX_AGG_STATE_ERROR;
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
continue;
}
if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
" dropping incoming packet and reusing its"
" buffer\n", fp->rss_id);
qlnx_reuse_rx_data(rxq);
if (mpf != NULL)
m_freem(mpf);
mpf = mpl = NULL;
rxq->tpa_info[agg_index].agg_state =
QLNX_AGG_STATE_ERROR;
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
continue;
}
mpc->m_flags &= ~M_PKTHDR;
mpc->m_next = NULL;
mpc->m_len = cqe->len_list[i];
if (mpf == NULL) {
mpf = mpl = mpc;
} else {
mpl->m_len = ha->rx_buf_size;
mpl->m_next = mpc;
mpl = mpc;
}
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
}
QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
fp->rss_id, mpf, mpl);
if (mpf != NULL) {
mp = rxq->tpa_info[agg_index].mpl;
mp->m_len = ha->rx_buf_size;
mp->m_next = mpf;
rxq->tpa_info[agg_index].mpl = mpl;
}
return;
}
static int
qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_rx_queue *rxq,
struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
struct sw_rx_data *sw_rx_data;
int i;
struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
struct mbuf *mp;
uint32_t agg_index;
uint32_t len = 0;
if_t ifp = ha->ifp;
QL_DPRINT7(ha, "[%d]: enter\n \
\t type = 0x%x\n \
\t tpa_agg_index = 0x%x\n \
\t total_packet_len = 0x%x\n \
\t num_of_bds = 0x%x\n \
\t end_reason = 0x%x\n \
\t num_of_coalesced_segs = 0x%x\n \
\t ts_delta = 0x%x\n \
\t len_list[0] = 0x%x\n \
\t len_list[1] = 0x%x\n \
\t len_list[2] = 0x%x\n \
\t len_list[3] = 0x%x\n",
fp->rss_id, cqe->type, cqe->tpa_agg_index,
cqe->total_packet_len, cqe->num_of_bds,
cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
cqe->len_list[3]);
agg_index = cqe->tpa_agg_index;
if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
fp->err_rx_tpa_invalid_agg_num++;
return (0);
}
for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
if (cqe->len_list[i] == 0)
break;
if (rxq->tpa_info[agg_index].agg_state !=
QLNX_AGG_STATE_START) {
QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
qlnx_reuse_rx_data(rxq);
continue;
}
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
BUS_DMASYNC_POSTREAD);
mpc = sw_rx_data->data;
if (mpc == NULL) {
QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
fp->err_rx_mp_null++;
if (mpf != NULL)
m_freem(mpf);
mpf = mpl = NULL;
rxq->tpa_info[agg_index].agg_state =
QLNX_AGG_STATE_ERROR;
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
continue;
}
if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
" dropping incoming packet and reusing its"
" buffer\n", fp->rss_id);
qlnx_reuse_rx_data(rxq);
if (mpf != NULL)
m_freem(mpf);
mpf = mpl = NULL;
rxq->tpa_info[agg_index].agg_state =
QLNX_AGG_STATE_ERROR;
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
continue;
}
mpc->m_flags &= ~M_PKTHDR;
mpc->m_next = NULL;
mpc->m_len = cqe->len_list[i];
if (mpf == NULL) {
mpf = mpl = mpc;
} else {
mpl->m_len = ha->rx_buf_size;
mpl->m_next = mpc;
mpl = mpc;
}
ecore_chain_consume(&rxq->rx_bd_ring);
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
}
QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
if (mpf != NULL) {
QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
mp = rxq->tpa_info[agg_index].mpl;
mp->m_len = ha->rx_buf_size;
mp->m_next = mpf;
}
if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
if (rxq->tpa_info[agg_index].mpf != NULL)
m_freem(rxq->tpa_info[agg_index].mpf);
rxq->tpa_info[agg_index].mpf = NULL;
rxq->tpa_info[agg_index].mpl = NULL;
rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
return (0);
}
mp = rxq->tpa_info[agg_index].mpf;
m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
mp->m_pkthdr.len = cqe->total_packet_len;
if (mp->m_next == NULL)
mp->m_len = mp->m_pkthdr.len;
else {
/* compute the total packet length */
mpf = mp;
while (mpf != NULL) {
len += mpf->m_len;
mpf = mpf->m_next;
}
if (cqe->total_packet_len > len) {
mpl = rxq->tpa_info[agg_index].mpl;
mpl->m_len += (cqe->total_packet_len - len);
}
}
QLNX_INC_IPACKETS(ifp);
QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
m_len = 0x%x m_pkthdr_len = 0x%x\n",
fp->rss_id, mp->m_pkthdr.csum_data,
(uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
if_input(ifp, mp);
rxq->tpa_info[agg_index].mpf = NULL;
rxq->tpa_info[agg_index].mpl = NULL;
rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
return (cqe->num_of_coalesced_segs);
}
static int
qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
int lro_enable)
{
uint16_t hw_comp_cons, sw_comp_cons;
int rx_pkt = 0;
struct qlnx_rx_queue *rxq = fp->rxq;
if_t ifp = ha->ifp;
struct ecore_dev *cdev = &ha->cdev;
struct ecore_hwfn *p_hwfn;
#ifdef QLNX_SOFT_LRO
struct lro_ctrl *lro;
lro = &rxq->lro;
#endif /* #ifdef QLNX_SOFT_LRO */
hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
/* Memory barrier to prevent the CPU from doing speculative reads of CQE
* / BD in the while-loop before reading hw_comp_cons. If the CQE is
* read before it is written by FW, then FW writes CQE and SB, and then
* the CPU reads the hw_comp_cons, it will use an old CQE.
*/
/* Loop to complete all indicated BDs */
while (sw_comp_cons != hw_comp_cons) {
union eth_rx_cqe *cqe;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
struct sw_rx_data *sw_rx_data;
register struct mbuf *mp;
enum eth_rx_cqe_type cqe_type;
uint16_t len, pad, len_on_first_bd;
uint8_t *data;
uint8_t hash_type;
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)
ecore_chain_consume(&rxq->rx_comp_ring);
cqe_type = cqe->fast_path_regular.type;
if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
QL_DPRINT3(ha, "Got a slowath CQE\n");
ecore_eth_cqe_completion(p_hwfn,
(struct eth_slow_path_rx_cqe *)cqe);
goto next_cqe;
}
if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
switch (cqe_type) {
case ETH_RX_CQE_TYPE_TPA_START:
qlnx_tpa_start(ha, fp, rxq,
&cqe->fast_path_tpa_start);
fp->tpa_start++;
break;
case ETH_RX_CQE_TYPE_TPA_CONT:
qlnx_tpa_cont(ha, fp, rxq,
&cqe->fast_path_tpa_cont);
fp->tpa_cont++;
break;
case ETH_RX_CQE_TYPE_TPA_END:
rx_pkt += qlnx_tpa_end(ha, fp, rxq,
&cqe->fast_path_tpa_end);
fp->tpa_end++;
break;
default:
break;
}
goto next_cqe;
}
/* Get the data from the SW ring */
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
mp = sw_rx_data->data;
if (mp == NULL) {
QL_DPRINT1(ha, "mp = NULL\n");
fp->err_rx_mp_null++;
rxq->sw_rx_cons =
(rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
goto next_cqe;
}
bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
BUS_DMASYNC_POSTREAD);
/* non GRO */
fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
len = le16toh(fp_cqe->pkt_len);
pad = fp_cqe->placement_offset;
#if 0
QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
" len %u, parsing flags = %d pad = %d\n",
cqe_type, fp_cqe->bitfields,
le16toh(fp_cqe->vlan_tag),
len, le16toh(fp_cqe->pars_flags.flags), pad);
#endif
data = mtod(mp, uint8_t *);
data = data + pad;
if (0)
qlnx_dump_buf8(ha, __func__, data, len);
/* For every Rx BD consumed, we allocate a new BD so the BD ring
* is always with a fixed size. If allocation fails, we take the
* consumed BD and return it to the ring in the PROD position.
* The packet that was received on that BD will be dropped (and
* not passed to the upper stack).
*/
/* If this is an error packet then drop it */
if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
CQE_FLAGS_ERR) {
QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
" dropping incoming packet\n", sw_comp_cons,
le16toh(cqe->fast_path_regular.pars_flags.flags));
fp->err_rx_hw_errors++;
qlnx_reuse_rx_data(rxq);
QLNX_INC_IERRORS(ifp);
goto next_cqe;
}
if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
QL_DPRINT1(ha, "New buffer allocation failed, dropping"
" incoming packet and reusing its buffer\n");
qlnx_reuse_rx_data(rxq);
fp->err_rx_alloc_errors++;
QLNX_INC_IQDROPS(ifp);
goto next_cqe;
}
ecore_chain_consume(&rxq->rx_bd_ring);
len_on_first_bd = fp_cqe->len_on_first_bd;
m_adj(mp, pad);
mp->m_pkthdr.len = len;
if ((len > 60 ) && (len > len_on_first_bd)) {
mp->m_len = len_on_first_bd;
if (qlnx_rx_jumbo_chain(ha, fp, mp,
(len - len_on_first_bd)) != 0) {
m_freem(mp);
QLNX_INC_IQDROPS(ifp);
goto next_cqe;
}
} else if (len_on_first_bd < len) {
fp->err_rx_jumbo_chain_pkts++;
} else {
mp->m_len = len;
}
mp->m_flags |= M_PKTHDR;
/* assign packet to this interface interface */
mp->m_pkthdr.rcvif = ifp;
/* assume no hardware checksum has complated */
mp->m_pkthdr.csum_flags = 0;
mp->m_pkthdr.flowid = fp_cqe->rss_hash;
hash_type = fp_cqe->bitfields &
(ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
switch (hash_type) {
case RSS_HASH_TYPE_IPV4:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
break;
case RSS_HASH_TYPE_TCP_IPV4:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
break;
case RSS_HASH_TYPE_IPV6:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
break;
case RSS_HASH_TYPE_TCP_IPV6:
M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
break;
default:
M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
break;
}
if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
}
if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
}
if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
mp->m_pkthdr.csum_data = 0xFFFF;
mp->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
}
if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
mp->m_flags |= M_VLANTAG;
}
QLNX_INC_IPACKETS(ifp);
QLNX_INC_IBYTES(ifp, len);
#ifdef QLNX_SOFT_LRO
if (lro_enable)
tcp_lro_queue_mbuf(lro, mp);
else
if_input(ifp, mp);
#else
if_input(ifp, mp);
#endif /* #ifdef QLNX_SOFT_LRO */
rx_pkt++;
rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
next_cqe: /* don't consume bd rx buffer */
ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
/* CR TPA - revisit how to handle budget in TPA perhaps
increase on "end" */
if (rx_pkt == budget)
break;
} /* repeat while sw_comp_cons != hw_comp_cons... */
/* Update producers */
qlnx_update_rx_prod(p_hwfn, rxq);
return rx_pkt;
}
/*
* fast path interrupt
*/
static void
qlnx_fp_isr(void *arg)
{
qlnx_ivec_t *ivec = arg;
qlnx_host_t *ha;
struct qlnx_fastpath *fp = NULL;
int idx;
ha = ivec->ha;
if (ha->state != QLNX_STATE_OPEN) {
return;
}
idx = ivec->rss_idx;
if ((idx = ivec->rss_idx) >= ha->num_rss) {
QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
ha->err_illegal_intr++;
return;
}
fp = &ha->fp_array[idx];
if (fp == NULL) {
ha->err_fp_null++;
} else {
int rx_int = 0;
#ifdef QLNX_SOFT_LRO
int total_rx_count = 0;
#endif
int lro_enable, tc;
struct qlnx_tx_queue *txq;
uint16_t elem_left;
lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
do {
for (tc = 0; tc < ha->num_tc; tc++) {
txq = fp->txq[tc];
if((int)(elem_left =
ecore_chain_get_elem_left(&txq->tx_pbl)) <
QLNX_TX_ELEM_THRESH) {
if (mtx_trylock(&fp->tx_mtx)) {
#ifdef QLNX_TRACE_PERF_DATA
tx_compl = fp->tx_pkts_completed;
#endif
qlnx_tx_int(ha, fp, fp->txq[tc]);
#ifdef QLNX_TRACE_PERF_DATA
fp->tx_pkts_compl_intr +=
(fp->tx_pkts_completed - tx_compl);
if ((fp->tx_pkts_completed - tx_compl) <= 32)
fp->tx_comInt[0]++;
else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
((fp->tx_pkts_completed - tx_compl) <= 64))
fp->tx_comInt[1]++;
else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
((fp->tx_pkts_completed - tx_compl) <= 128))
fp->tx_comInt[2]++;
else if(((fp->tx_pkts_completed - tx_compl) > 128))
fp->tx_comInt[3]++;
#endif
mtx_unlock(&fp->tx_mtx);
}
}
}
rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
lro_enable);
if (rx_int) {
fp->rx_pkts += rx_int;
#ifdef QLNX_SOFT_LRO
total_rx_count += rx_int;
#endif
}
} while (rx_int);
#ifdef QLNX_SOFT_LRO
{
struct lro_ctrl *lro;
lro = &fp->rxq->lro;
if (lro_enable && total_rx_count) {
#ifdef QLNX_TRACE_LRO_CNT
if (lro->lro_mbuf_count & ~1023)
fp->lro_cnt_1024++;
else if (lro->lro_mbuf_count & ~511)
fp->lro_cnt_512++;
else if (lro->lro_mbuf_count & ~255)
fp->lro_cnt_256++;
else if (lro->lro_mbuf_count & ~127)
fp->lro_cnt_128++;
else if (lro->lro_mbuf_count & ~63)
fp->lro_cnt_64++;
#endif /* #ifdef QLNX_TRACE_LRO_CNT */
tcp_lro_flush_all(lro);
}
}
#endif /* #ifdef QLNX_SOFT_LRO */
ecore_sb_update_sb_idx(fp->sb_info);
rmb();
ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
}
return;
}
/*
* slow path interrupt processing function
* can be invoked in polled mode or in interrupt mode via taskqueue.
*/
void
qlnx_sp_isr(void *arg)
{
struct ecore_hwfn *p_hwfn;
qlnx_host_t *ha;
p_hwfn = arg;
ha = (qlnx_host_t *)p_hwfn->p_dev;
ha->sp_interrupts++;
QL_DPRINT2(ha, "enter\n");
ecore_int_sp_dpc(p_hwfn);
QL_DPRINT2(ha, "exit\n");
return;
}
/*****************************************************************************
* Support Functions for DMA'able Memory
*****************************************************************************/
static void
qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
*((bus_addr_t *)arg) = 0;
if (error) {
printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
return;
}
*((bus_addr_t *)arg) = segs[0].ds_addr;
return;
}
static int
qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
{
int ret = 0;
bus_addr_t b_addr;
ret = bus_dma_tag_create(
ha->parent_tag,/* parent */
dma_buf->alignment,
((bus_size_t)(1ULL << 32)),/* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dma_buf->size, /* maxsize */
1, /* nsegments */
dma_buf->size, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&dma_buf->dma_tag);
if (ret) {
QL_DPRINT1(ha, "could not create dma tag\n");
goto qlnx_alloc_dmabuf_exit;
}
ret = bus_dmamem_alloc(dma_buf->dma_tag,
(void **)&dma_buf->dma_b,
(BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
&dma_buf->dma_map);
if (ret) {
bus_dma_tag_destroy(dma_buf->dma_tag);
QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
goto qlnx_alloc_dmabuf_exit;
}
ret = bus_dmamap_load(dma_buf->dma_tag,
dma_buf->dma_map,
dma_buf->dma_b,
dma_buf->size,
qlnx_dmamap_callback,
&b_addr, BUS_DMA_NOWAIT);
if (ret || !b_addr) {
bus_dma_tag_destroy(dma_buf->dma_tag);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
dma_buf->dma_map);
ret = -1;
goto qlnx_alloc_dmabuf_exit;
}
dma_buf->dma_addr = b_addr;
qlnx_alloc_dmabuf_exit:
return ret;
}
static void
qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
{
bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
bus_dma_tag_destroy(dma_buf->dma_tag);
return;
}
void *
qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
{
qlnx_dma_t dma_buf;
qlnx_dma_t *dma_p;
qlnx_host_t *ha __unused;
ha = (qlnx_host_t *)ecore_dev;
size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
memset(&dma_buf, 0, sizeof (qlnx_dma_t));
dma_buf.size = size + PAGE_SIZE;
dma_buf.alignment = 8;
if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
return (NULL);
bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
*phys = dma_buf.dma_addr;
dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
(void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
return (dma_buf.dma_b);
}
void
qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
uint32_t size)
{
qlnx_dma_t dma_buf, *dma_p;
qlnx_host_t *ha;
ha = (qlnx_host_t *)ecore_dev;
if (v_addr == NULL)
return;
size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
(void *)dma_p->dma_map, (void *)dma_p->dma_tag,
dma_p->dma_b, (void *)dma_p->dma_addr, size);
dma_buf = *dma_p;
if (!ha->qlnxr_debug)
qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
return;
}
static int
qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
{
int ret;
device_t dev;
dev = ha->pci_dev;
/*
* Allocate parent DMA Tag
*/
ret = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ha->parent_tag);
if (ret) {
QL_DPRINT1(ha, "could not create parent dma tag\n");
return (-1);
}
ha->flags.parent_tag = 1;
return (0);
}
static void
qlnx_free_parent_dma_tag(qlnx_host_t *ha)
{
if (ha->parent_tag != NULL) {
bus_dma_tag_destroy(ha->parent_tag);
ha->parent_tag = NULL;
}
return;
}
static int
qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
{
if (bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
QLNX_MAX_SEGMENTS, /* nsegments */
QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&ha->tx_tag)) {
QL_DPRINT1(ha, "tx_tag alloc failed\n");
return (-1);
}
return (0);
}
static void
qlnx_free_tx_dma_tag(qlnx_host_t *ha)
{
if (ha->tx_tag != NULL) {
bus_dma_tag_destroy(ha->tx_tag);
ha->tx_tag = NULL;
}
return;
}
static int
qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
{
if (bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUM9BYTES, /* maxsize */
1, /* nsegments */
MJUM9BYTES, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&ha->rx_tag)) {
QL_DPRINT1(ha, " rx_tag alloc failed\n");
return (-1);
}
return (0);
}
static void
qlnx_free_rx_dma_tag(qlnx_host_t *ha)
{
if (ha->rx_tag != NULL) {
bus_dma_tag_destroy(ha->rx_tag);
ha->rx_tag = NULL;
}
return;
}
/*********************************
* Exported functions
*********************************/
uint32_t
qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
{
uint32_t bar_size;
bar_id = bar_id * 2;
bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
SYS_RES_MEMORY,
PCIR_BAR(bar_id));
return (bar_size);
}
uint32_t
qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
{
*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
pci_reg, 1);
return 0;
}
uint32_t
qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
uint16_t *reg_value)
{
*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
pci_reg, 2);
return 0;
}
uint32_t
qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
uint32_t *reg_value)
{
*reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
pci_reg, 4);
return 0;
}
void
qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
{
pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
pci_reg, reg_value, 1);
return;
}
void
qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
uint16_t reg_value)
{
pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
pci_reg, reg_value, 2);
return;
}
void
qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
uint32_t reg_value)
{
pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
pci_reg, reg_value, 4);
return;
}
int
qlnx_pci_find_capability(void *ecore_dev, int cap)
{
int reg;
qlnx_host_t *ha;
ha = ecore_dev;
if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
return reg;
else {
QL_DPRINT1(ha, "failed\n");
return 0;
}
}
int
qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
{
int reg;
qlnx_host_t *ha;
ha = ecore_dev;
if (pci_find_extcap(ha->pci_dev, ext_cap, &reg) == 0)
return reg;
else {
QL_DPRINT1(ha, "failed\n");
return 0;
}
}
uint32_t
qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
{
uint32_t data32;
struct ecore_hwfn *p_hwfn;
p_hwfn = hwfn;
data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
(bus_size_t)(p_hwfn->reg_offset + reg_addr));
return (data32);
}
void
qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
{
struct ecore_hwfn *p_hwfn = hwfn;
bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
return;
}
void
qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
{
struct ecore_hwfn *p_hwfn = hwfn;
bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
(bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
return;
}
void
qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
{
struct ecore_dev *cdev;
struct ecore_hwfn *p_hwfn;
uint32_t offset;
p_hwfn = hwfn;
cdev = p_hwfn->p_dev;
offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
return;
}
void
qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
{
struct ecore_hwfn *p_hwfn = hwfn;
bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
(bus_size_t)(p_hwfn->db_offset + reg_addr), value);
return;
}
uint32_t
qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
{
uint32_t data32;
bus_size_t offset;
struct ecore_dev *cdev;
cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
return (data32);
}
void
qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
{
bus_size_t offset;
struct ecore_dev *cdev;
cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
return;
}
void
qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
{
bus_size_t offset;
struct ecore_dev *cdev;
cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
return;
}
void *
qlnx_zalloc(uint32_t size)
{
caddr_t va;
va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
bzero(va, size);
return ((void *)va);
}
void
qlnx_barrier(void *p_dev)
{
qlnx_host_t *ha;
ha = ((struct ecore_dev *) p_dev)->ha;
bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
}
void
qlnx_link_update(void *p_hwfn)
{
qlnx_host_t *ha;
int prev_link_state;
ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
qlnx_fill_link(ha, p_hwfn, &ha->if_link);
prev_link_state = ha->link_up;
ha->link_up = ha->if_link.link_up;
if (prev_link_state != ha->link_up) {
if (ha->link_up) {
if_link_state_change(ha->ifp, LINK_STATE_UP);
} else {
if_link_state_change(ha->ifp, LINK_STATE_DOWN);
}
}
#ifndef QLNX_VF
#ifdef CONFIG_ECORE_SRIOV
if (qlnx_vf_device(ha) != 0) {
if (ha->sriov_initialized)
qlnx_inform_vf_link_state(p_hwfn, ha);
}
#endif /* #ifdef CONFIG_ECORE_SRIOV */
#endif /* #ifdef QLNX_VF */
return;
}
static void
__qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
struct ecore_vf_acquire_sw_info *p_sw_info)
{
p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
(QLNX_VERSION_MINOR << 16) |
QLNX_VERSION_BUILD;
p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
return;
}
void
qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
void *p_sw_info)
{
__qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
return;
}
void
qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
struct qlnx_link_output *if_link)
{
struct ecore_mcp_link_params link_params;
struct ecore_mcp_link_state link_state;
uint8_t p_change;
struct ecore_ptt *p_ptt = NULL;
memset(if_link, 0, sizeof(*if_link));
memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
ha = (qlnx_host_t *)hwfn->p_dev;
/* Prepare source inputs */
/* we only deal with physical functions */
if (qlnx_vf_device(ha) != 0) {
p_ptt = ecore_ptt_acquire(hwfn);
if (p_ptt == NULL) {
QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
return;
}
ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
ecore_ptt_release(hwfn, p_ptt);
memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
sizeof(link_params));
memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
sizeof(link_state));
} else {
ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
ecore_vf_read_bulletin(hwfn, &p_change);
ecore_vf_get_link_params(hwfn, &link_params);
ecore_vf_get_link_state(hwfn, &link_state);
}
/* Set the link parameters to pass to protocol driver */
if (link_state.link_up) {
if_link->link_up = true;
if_link->speed = link_state.speed;
}
if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
if (link_params.speed.autoneg)
if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
if (link_params.pause.autoneg ||
(link_params.pause.forced_rx && link_params.pause.forced_tx))
if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
if (link_params.pause.autoneg || link_params.pause.forced_rx ||
link_params.pause.forced_tx)
if_link->supported_caps |= QLNX_LINK_CAP_Pause;
if (link_params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
QLNX_LINK_CAP_1000baseT_Full;
if (link_params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
if (link_params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
if (link_params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
if (link_params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
if (link_params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
if_link->advertised_caps = if_link->supported_caps;
if_link->autoneg = link_params.speed.autoneg;
if_link->duplex = QLNX_LINK_DUPLEX;
/* Link partner capabilities */
if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
if (link_state.an_complete)
if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
if (link_state.partner_adv_pause)
if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
if ((link_state.partner_adv_pause ==
ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
(link_state.partner_adv_pause ==
ECORE_LINK_PARTNER_BOTH_PAUSE))
if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
return;
}
void
qlnx_schedule_recovery(void *p_hwfn)
{
qlnx_host_t *ha;
ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
if (qlnx_vf_device(ha) != 0) {
taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
}
return;
}
static int
qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
{
int rc, i;
for (i = 0; i < cdev->num_hwfns; i++) {
struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->pf_params = *func_params;
#ifdef QLNX_ENABLE_IWARP
if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
p_hwfn->using_ll2 = true;
}
#endif /* #ifdef QLNX_ENABLE_IWARP */
}
rc = ecore_resc_alloc(cdev);
if (rc)
goto qlnx_nic_setup_exit;
ecore_resc_setup(cdev);
qlnx_nic_setup_exit:
return rc;
}
static int
qlnx_nic_start(struct ecore_dev *cdev)
{
int rc;
struct ecore_hw_init_params params;
bzero(&params, sizeof (struct ecore_hw_init_params));
params.p_tunn = NULL;
params.b_hw_start = true;
params.int_mode = cdev->int_mode;
params.allow_npar_tx_switch = true;
params.bin_fw_data = NULL;
rc = ecore_hw_init(cdev, &params);
if (rc) {
ecore_resc_free(cdev);
return rc;
}
return 0;
}
static int
qlnx_slowpath_start(qlnx_host_t *ha)
{
struct ecore_dev *cdev;
struct ecore_pf_params pf_params;
int rc;
memset(&pf_params, 0, sizeof(struct ecore_pf_params));
pf_params.eth_pf_params.num_cons =
(ha->num_rss) * (ha->num_tc + 1);
#ifdef QLNX_ENABLE_IWARP
if (qlnx_vf_device(ha) != 0) {
if(ha->personality == ECORE_PCI_ETH_IWARP) {
device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
pf_params.rdma_pf_params.num_qps = 1024;
pf_params.rdma_pf_params.num_srqs = 1024;
pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
} else if(ha->personality == ECORE_PCI_ETH_ROCE) {
device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
pf_params.rdma_pf_params.num_qps = 8192;
pf_params.rdma_pf_params.num_srqs = 8192;
//pf_params.rdma_pf_params.min_dpis = 0;
pf_params.rdma_pf_params.min_dpis = 8;
pf_params.rdma_pf_params.roce_edpm_mode = 0;
pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
}
}
#endif /* #ifdef QLNX_ENABLE_IWARP */
cdev = &ha->cdev;
rc = qlnx_nic_setup(cdev, &pf_params);
if (rc)
goto qlnx_slowpath_start_exit;
cdev->int_mode = ECORE_INT_MODE_MSIX;
cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
#ifdef QLNX_MAX_COALESCE
cdev->rx_coalesce_usecs = 255;
cdev->tx_coalesce_usecs = 255;
#endif
rc = qlnx_nic_start(cdev);
ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
#ifdef QLNX_USER_LLDP
(void)qlnx_set_lldp_tlvx(ha, NULL);
#endif /* #ifdef QLNX_USER_LLDP */
qlnx_slowpath_start_exit:
return (rc);
}
static int
qlnx_slowpath_stop(qlnx_host_t *ha)
{
struct ecore_dev *cdev;
device_t dev = ha->pci_dev;
int i;
cdev = &ha->cdev;
ecore_hw_stop(cdev);
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (ha->sp_handle[i])
(void)bus_teardown_intr(dev, ha->sp_irq[i],
ha->sp_handle[i]);
ha->sp_handle[i] = NULL;
if (ha->sp_irq[i])
(void) bus_release_resource(dev, SYS_RES_IRQ,
ha->sp_irq_rid[i], ha->sp_irq[i]);
ha->sp_irq[i] = NULL;
}
ecore_resc_free(cdev);
return 0;
}
static void
qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
char ver_str[VER_SIZE])
{
int i;
memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i) {
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
}
cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
return ;
}
void
qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
{
enum ecore_mcp_protocol_type type;
union ecore_mcp_protocol_stats *stats;
struct ecore_eth_stats eth_stats;
qlnx_host_t *ha;
ha = cdev;
stats = proto_stats;
type = proto_type;
switch (type) {
case ECORE_MCP_LAN_STATS:
ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
default:
ha->err_get_proto_invalid_type++;
QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
break;
}
return;
}
static int
qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
{
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
p_hwfn = &ha->cdev.hwfns[0];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (p_ptt == NULL) {
QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
return (-1);
}
ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
ecore_ptt_release(p_hwfn, p_ptt);
return (0);
}
static int
qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
{
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
p_hwfn = &ha->cdev.hwfns[0];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (p_ptt == NULL) {
QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
return (-1);
}
ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
ecore_ptt_release(p_hwfn, p_ptt);
return (0);
}
static int
qlnx_alloc_mem_arrays(qlnx_host_t *ha)
{
bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
return 0;
}
static void
qlnx_init_fp(qlnx_host_t *ha)
{
int rss_id, txq_array_index, tc;
for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
fp->rss_id = rss_id;
fp->edev = ha;
fp->sb_info = &ha->sb_array[rss_id];
fp->rxq = &ha->rxq_array[rss_id];
fp->rxq->rxq_id = rss_id;
for (tc = 0; tc < ha->num_tc; tc++) {
txq_array_index = tc * ha->num_rss + rss_id;
fp->txq[tc] = &ha->txq_array[txq_array_index];
fp->txq[tc]->index = txq_array_index;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
rss_id);
fp->tx_ring_full = 0;
/* reset all the statistics counters */
fp->tx_pkts_processed = 0;
fp->tx_pkts_freed = 0;
fp->tx_pkts_transmitted = 0;
fp->tx_pkts_completed = 0;
#ifdef QLNX_TRACE_PERF_DATA
fp->tx_pkts_trans_ctx = 0;
fp->tx_pkts_compl_ctx = 0;
fp->tx_pkts_trans_fp = 0;
fp->tx_pkts_compl_fp = 0;
fp->tx_pkts_compl_intr = 0;
#endif
fp->tx_lso_wnd_min_len = 0;
fp->tx_defrag = 0;
fp->tx_nsegs_gt_elem_left = 0;
fp->tx_tso_max_nsegs = 0;
fp->tx_tso_min_nsegs = 0;
fp->err_tx_nsegs_gt_elem_left = 0;
fp->err_tx_dmamap_create = 0;
fp->err_tx_defrag_dmamap_load = 0;
fp->err_tx_non_tso_max_seg = 0;
fp->err_tx_dmamap_load = 0;
fp->err_tx_defrag = 0;
fp->err_tx_free_pkt_null = 0;
fp->err_tx_cons_idx_conflict = 0;
fp->rx_pkts = 0;
fp->err_m_getcl = 0;
fp->err_m_getjcl = 0;
}
return;
}
void
qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
{
struct ecore_dev *cdev;
cdev = &ha->cdev;
if (sb_info->sb_virt) {
OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
(sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
sb_info->sb_virt = NULL;
}
}
static int
qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
{
struct ecore_hwfn *p_hwfn;
int hwfn_index, rc;
u16 rel_sb_id;
hwfn_index = sb_id % cdev->num_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
rel_sb_id = sb_id / cdev->num_hwfns;
QL_DPRINT2(((qlnx_host_t *)cdev),
"hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
sb_virt_addr, (void *)sb_phy_addr);
rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
sb_virt_addr, sb_phy_addr, rel_sb_id);
return rc;
}
/* This function allocates fast-path status block memory */
int
qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
{
struct status_block_e4 *sb_virt;
bus_addr_t sb_phys;
int rc;
uint32_t size;
struct ecore_dev *cdev;
cdev = &ha->cdev;
size = sizeof(*sb_virt);
sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
if (!sb_virt) {
QL_DPRINT1(ha, "Status block allocation failed\n");
return -ENOMEM;
}
rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
if (rc) {
OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
}
return rc;
}
static void
qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
{
int i;
struct sw_rx_data *rx_buf;
for (i = 0; i < rxq->num_rx_buffers; i++) {
rx_buf = &rxq->sw_rx_ring[i];
if (rx_buf->data != NULL) {
if (rx_buf->map != NULL) {
bus_dmamap_unload(ha->rx_tag, rx_buf->map);
bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
rx_buf->map = NULL;
}
m_freem(rx_buf->data);
rx_buf->data = NULL;
}
}
return;
}
static void
qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
{
struct ecore_dev *cdev;
int i;
cdev = &ha->cdev;
qlnx_free_rx_buffers(ha, rxq);
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
if (rxq->tpa_info[i].mpf != NULL)
m_freem(rxq->tpa_info[i].mpf);
}
bzero((void *)&rxq->sw_rx_ring[0],
(sizeof (struct sw_rx_data) * RX_RING_SIZE));
/* Free the real RQ ring used by FW */
if (rxq->rx_bd_ring.p_virt_addr) {
ecore_chain_free(cdev, &rxq->rx_bd_ring);
rxq->rx_bd_ring.p_virt_addr = NULL;
}
/* Free the real completion ring used by FW */
if (rxq->rx_comp_ring.p_virt_addr &&
rxq->rx_comp_ring.pbl_sp.p_virt_table) {
ecore_chain_free(cdev, &rxq->rx_comp_ring);
rxq->rx_comp_ring.p_virt_addr = NULL;
rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
}
#ifdef QLNX_SOFT_LRO
{
struct lro_ctrl *lro;
lro = &rxq->lro;
tcp_lro_free(lro);
}
#endif /* #ifdef QLNX_SOFT_LRO */
return;
}
static int
qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
{
register struct mbuf *mp;
uint16_t rx_buf_size;
struct sw_rx_data *sw_rx_data;
struct eth_rx_bd *rx_bd;
dma_addr_t dma_addr;
bus_dmamap_t map;
bus_dma_segment_t segs[1];
int nsegs;
int ret;
rx_buf_size = rxq->rx_buf_size;
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
if (mp == NULL) {
QL_DPRINT1(ha, "Failed to allocate Rx data\n");
return -ENOMEM;
}
mp->m_len = mp->m_pkthdr.len = rx_buf_size;
map = (bus_dmamap_t)0;
ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
BUS_DMA_NOWAIT);
dma_addr = segs[0].ds_addr;
if (ret || !dma_addr || (nsegs != 1)) {
m_freem(mp);
QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
ret, (long long unsigned int)dma_addr, nsegs);
return -ENOMEM;
}
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
sw_rx_data->data = mp;
sw_rx_data->dma_addr = dma_addr;
sw_rx_data->map = map;
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
rx_bd->addr.hi = htole32(U64_HI(dma_addr));
rx_bd->addr.lo = htole32(U64_LO(dma_addr));
bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
return 0;
}
static int
qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
struct qlnx_agg_info *tpa)
{
struct mbuf *mp;
dma_addr_t dma_addr;
bus_dmamap_t map;
bus_dma_segment_t segs[1];
int nsegs;
int ret;
struct sw_rx_data *rx_buf;
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
if (mp == NULL) {
QL_DPRINT1(ha, "Failed to allocate Rx data\n");
return -ENOMEM;
}
mp->m_len = mp->m_pkthdr.len = rx_buf_size;
map = (bus_dmamap_t)0;
ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
BUS_DMA_NOWAIT);
dma_addr = segs[0].ds_addr;
if (ret || !dma_addr || (nsegs != 1)) {
m_freem(mp);
QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
ret, (long long unsigned int)dma_addr, nsegs);
return -ENOMEM;
}
rx_buf = &tpa->rx_buf;
memset(rx_buf, 0, sizeof (struct sw_rx_data));
rx_buf->data = mp;
rx_buf->dma_addr = dma_addr;
rx_buf->map = map;
bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
return (0);
}
static void
qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
{
struct sw_rx_data *rx_buf;
rx_buf = &tpa->rx_buf;
if (rx_buf->data != NULL) {
if (rx_buf->map != NULL) {
bus_dmamap_unload(ha->rx_tag, rx_buf->map);
bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
rx_buf->map = NULL;
}
m_freem(rx_buf->data);
rx_buf->data = NULL;
}
return;
}
/* This function allocates all memory needed per Rx queue */
static int
qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
{
int i, rc, num_allocated;
struct ecore_dev *cdev;
cdev = &ha->cdev;
rxq->num_rx_buffers = RX_RING_SIZE;
rxq->rx_buf_size = ha->rx_buf_size;
/* Allocate the parallel driver ring for Rx buffers */
bzero((void *)&rxq->sw_rx_ring[0],
(sizeof (struct sw_rx_data) * RX_RING_SIZE));
/* Allocate FW Rx ring */
rc = ecore_chain_alloc(cdev,
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
ECORE_CHAIN_MODE_NEXT_PTR,
ECORE_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring, NULL);
if (rc)
goto err;
/* Allocate FW completion ring */
rc = ecore_chain_alloc(cdev,
ECORE_CHAIN_USE_TO_CONSUME,
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring, NULL);
if (rc)
goto err;
/* Allocate buffers for the Rx ring */
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
&rxq->tpa_info[i]);
if (rc)
break;
}
for (i = 0; i < rxq->num_rx_buffers; i++) {
rc = qlnx_alloc_rx_buffer(ha, rxq);
if (rc)
break;
}
num_allocated = i;
if (!num_allocated) {
QL_DPRINT1(ha, "Rx buffers allocation failed\n");
goto err;
} else if (num_allocated < rxq->num_rx_buffers) {
QL_DPRINT1(ha, "Allocated less buffers than"
" desired (%d allocated)\n", num_allocated);
}
#ifdef QLNX_SOFT_LRO
{
struct lro_ctrl *lro;
lro = &rxq->lro;
if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
rxq->rxq_id);
goto err;
}
lro->ifp = ha->ifp;
}
#endif /* #ifdef QLNX_SOFT_LRO */
return 0;
err:
qlnx_free_mem_rxq(ha, rxq);
return -ENOMEM;
}
static void
qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq)
{
struct ecore_dev *cdev;
cdev = &ha->cdev;
bzero((void *)&txq->sw_tx_ring[0],
(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
/* Free the real RQ ring used by FW */
if (txq->tx_pbl.p_virt_addr) {
ecore_chain_free(cdev, &txq->tx_pbl);
txq->tx_pbl.p_virt_addr = NULL;
}
return;
}
/* This function allocates all memory needed per Tx queue */
static int
qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq)
{
int ret = ECORE_SUCCESS;
union eth_tx_bd_types *p_virt;
struct ecore_dev *cdev;
cdev = &ha->cdev;
bzero((void *)&txq->sw_tx_ring[0],
(sizeof (struct sw_tx_bd) * TX_RING_SIZE));
/* Allocate the real Tx ring to be used by FW */
ret = ecore_chain_alloc(cdev,
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
TX_RING_SIZE,
sizeof(*p_virt),
&txq->tx_pbl, NULL);
if (ret != ECORE_SUCCESS) {
goto err;
}
txq->num_tx_buffers = TX_RING_SIZE;
return 0;
err:
qlnx_free_mem_txq(ha, fp, txq);
return -ENOMEM;
}
static void
qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
{
struct mbuf *mp;
if_t ifp = ha->ifp;
if (mtx_initialized(&fp->tx_mtx)) {
if (fp->tx_br != NULL) {
mtx_lock(&fp->tx_mtx);
while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
fp->tx_pkts_freed++;
m_freem(mp);
}
mtx_unlock(&fp->tx_mtx);
buf_ring_free(fp->tx_br, M_DEVBUF);
fp->tx_br = NULL;
}
mtx_destroy(&fp->tx_mtx);
}
return;
}
static void
qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
{
int tc;
qlnx_free_mem_sb(ha, fp->sb_info);
qlnx_free_mem_rxq(ha, fp->rxq);
for (tc = 0; tc < ha->num_tc; tc++)
qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
return;
}
static int
qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
{
snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
"qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
M_NOWAIT, &fp->tx_mtx);
if (fp->tx_br == NULL) {
QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
ha->dev_unit, fp->rss_id);
return -ENOMEM;
}
return 0;
}
static int
qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
{
int rc, tc;
rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
if (rc)
goto err;
if (ha->rx_jumbo_buf_eq_mtu) {
if (ha->max_frame_size <= MCLBYTES)
ha->rx_buf_size = MCLBYTES;
else if (ha->max_frame_size <= MJUMPAGESIZE)
ha->rx_buf_size = MJUMPAGESIZE;
else if (ha->max_frame_size <= MJUM9BYTES)
ha->rx_buf_size = MJUM9BYTES;
else if (ha->max_frame_size <= MJUM16BYTES)
ha->rx_buf_size = MJUM16BYTES;
} else {
if (ha->max_frame_size <= MCLBYTES)
ha->rx_buf_size = MCLBYTES;
else
ha->rx_buf_size = MJUMPAGESIZE;
}
rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
if (rc)
goto err;
for (tc = 0; tc < ha->num_tc; tc++) {
rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
if (rc)
goto err;
}
return 0;
err:
qlnx_free_mem_fp(ha, fp);
return -ENOMEM;
}
static void
qlnx_free_mem_load(qlnx_host_t *ha)
{
int i;
for (i = 0; i < ha->num_rss; i++) {
struct qlnx_fastpath *fp = &ha->fp_array[i];
qlnx_free_mem_fp(ha, fp);
}
return;
}
static int
qlnx_alloc_mem_load(qlnx_host_t *ha)
{
int rc = 0, rss_id;
for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
rc = qlnx_alloc_mem_fp(ha, fp);
if (rc)
break;
}
return (rc);
}
static int
qlnx_start_vport(struct ecore_dev *cdev,
u8 vport_id,
u16 mtu,
u8 drop_ttl0_flg,
u8 inner_vlan_removal_en_flg,
u8 tx_switching,
u8 hw_lro_enable)
{
int rc, i;
struct ecore_sp_vport_start_params vport_start_params = { 0 };
qlnx_host_t *ha __unused;
ha = (qlnx_host_t *)cdev;
vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
vport_start_params.tx_switching = 0;
vport_start_params.handle_ptp_pkts = 0;
vport_start_params.only_untagged = 0;
vport_start_params.drop_ttl0 = drop_ttl0_flg;
vport_start_params.tpa_mode =
(hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
vport_start_params.vport_id = vport_id;
vport_start_params.mtu = mtu;
QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
for_each_hwfn(cdev, i) {
struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
if (rc) {
QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
" with MTU %d\n" , vport_id, mtu);
return -ENOMEM;
}
ecore_hw_start_fastpath(p_hwfn);
QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
vport_id, mtu);
}
return 0;
}
static int
qlnx_update_vport(struct ecore_dev *cdev,
struct qlnx_update_vport_params *params)
{
struct ecore_sp_vport_update_params sp_params;
int rc, i, j, fp_index;
struct ecore_hwfn *p_hwfn;
struct ecore_rss_params *rss;
qlnx_host_t *ha = (qlnx_host_t *)cdev;
struct qlnx_fastpath *fp;
memset(&sp_params, 0, sizeof(sp_params));
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
sp_params.update_vport_active_rx_flg =
params->update_vport_active_rx_flg;
sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
sp_params.update_vport_active_tx_flg =
params->update_vport_active_tx_flg;
sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
sp_params.update_inner_vlan_removal_flg =
params->update_inner_vlan_removal_flg;
sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
sp_params.sge_tpa_params = params->sge_tpa_params;
/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
* We need to re-fix the rss values per engine for CMT.
*/
if (params->rss_params->update_rss_config)
sp_params.rss_params = params->rss_params;
else
sp_params.rss_params = NULL;
for_each_hwfn(cdev, i) {
p_hwfn = &cdev->hwfns[i];
if ((cdev->num_hwfns > 1) &&
params->rss_params->update_rss_config &&
params->rss_params->rss_enable) {
rss = params->rss_params;
for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
fp_index = ((cdev->num_hwfns * j) + i) %
ha->num_rss;
fp = &ha->fp_array[fp_index];
rss->rss_ind_table[j] = fp->rxq->handle;
}
for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
rss->rss_ind_table[j],
rss->rss_ind_table[j+1],
rss->rss_ind_table[j+2],
rss->rss_ind_table[j+3],
rss->rss_ind_table[j+4],
rss->rss_ind_table[j+5],
rss->rss_ind_table[j+6],
rss->rss_ind_table[j+7]);
j += 8;
}
}
sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
rc = ecore_sp_vport_update(p_hwfn, &sp_params,
ECORE_SPQ_MODE_EBLOCK, NULL);
if (rc) {
QL_DPRINT1(ha, "Failed to update VPORT\n");
return rc;
}
QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
rx_active_flag %d [tx_update %d], [rx_update %d]\n",
params->vport_id, params->vport_active_tx_flg,
params->vport_active_rx_flg,
params->update_vport_active_tx_flg,
params->update_vport_active_rx_flg);
}
return 0;
}
static void
qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
{
struct eth_rx_bd *rx_bd_cons =
ecore_chain_consume(&rxq->rx_bd_ring);
struct eth_rx_bd *rx_bd_prod =
ecore_chain_produce(&rxq->rx_bd_ring);
struct sw_rx_data *sw_rx_data_cons =
&rxq->sw_rx_ring[rxq->sw_rx_cons];
struct sw_rx_data *sw_rx_data_prod =
&rxq->sw_rx_ring[rxq->sw_rx_prod];
sw_rx_data_prod->data = sw_rx_data_cons->data;
memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
return;
}
static void
qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
{
uint16_t bd_prod;
uint16_t cqe_prod;
union {
struct eth_rx_prod_data rx_prod_data;
uint32_t data32;
} rx_prods;
bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
/* Update producers */
rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
/* Make sure that the BD and SGE data is updated before updating the
* producers since FW might read the BD/SGE right after the producer
* is updated.
*/
wmb();
#ifdef ECORE_CONFIG_DIRECT_HWFN
internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
sizeof(rx_prods), &rx_prods.data32);
#else
internal_ram_wr(rxq->hw_rxq_prod_addr,
sizeof(rx_prods), &rx_prods.data32);
#endif
/* mmiowb is needed to synchronize doorbell writes from more than one
* processor. It guarantees that the write arrives to the device before
* the napi lock is released and another qlnx_poll is called (possibly
* on another CPU). Without this barrier, the next doorbell can bypass
* this doorbell. This is applicable to IA64/Altix systems.
*/
wmb();
return;
}
static uint32_t qlnx_hash_key[] = {
((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
static int
qlnx_start_queues(qlnx_host_t *ha)
{
int rc, tc, i, vport_id = 0,
drop_ttl0_flg = 1, vlan_removal_en = 1,
tx_switching = 0, hw_lro_enable = 0;
struct ecore_dev *cdev = &ha->cdev;
struct ecore_rss_params *rss_params = &ha->rss_params;
struct qlnx_update_vport_params vport_update_params;
if_t ifp;
struct ecore_hwfn *p_hwfn;
struct ecore_sge_tpa_params tpa_params;
struct ecore_queue_start_common_params qparams;
struct qlnx_fastpath *fp;
ifp = ha->ifp;
QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
if (!ha->num_rss) {
QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
" are no Rx queues\n");
return -EINVAL;
}
#ifndef QLNX_SOFT_LRO
hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO;
#endif /* #ifndef QLNX_SOFT_LRO */
rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg,
vlan_removal_en, tx_switching, hw_lro_enable);
if (rc) {
QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
return rc;
}
QL_DPRINT2(ha, "Start vport ramrod passed, "
"vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en);
for_each_rss(i) {
struct ecore_rxq_start_ret_params rx_ret_params;
struct ecore_txq_start_ret_params tx_ret_params;
fp = &ha->fp_array[i];
p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
bzero(&rx_ret_params,
sizeof (struct ecore_rxq_start_ret_params));
qparams.queue_id = i ;
qparams.vport_id = vport_id;
qparams.stats_id = vport_id;
qparams.p_sb = fp->sb_info;
qparams.sb_idx = RX_PI;
rc = ecore_eth_rx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
&qparams,
fp->rxq->rx_buf_size, /* bd_max_bytes */
/* bd_chain_phys_addr */
fp->rxq->rx_bd_ring.p_phys_addr,
/* cqe_pbl_addr */
ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
/* cqe_pbl_size */
ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
&rx_ret_params);
if (rc) {
QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
return rc;
}
fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
fp->rxq->handle = rx_ret_params.p_handle;
fp->rxq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[RX_PI];
qlnx_update_rx_prod(p_hwfn, fp->rxq);
for (tc = 0; tc < ha->num_tc; tc++) {
struct qlnx_tx_queue *txq = fp->txq[tc];
bzero(&qparams,
sizeof(struct ecore_queue_start_common_params));
bzero(&tx_ret_params,
sizeof (struct ecore_txq_start_ret_params));
qparams.queue_id = txq->index / cdev->num_hwfns ;
qparams.vport_id = vport_id;
qparams.stats_id = vport_id;
qparams.p_sb = fp->sb_info;
qparams.sb_idx = TX_PI(tc);
rc = ecore_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
&qparams, tc,
/* bd_chain_phys_addr */
ecore_chain_get_pbl_phys(&txq->tx_pbl),
ecore_chain_get_page_cnt(&txq->tx_pbl),
&tx_ret_params);
if (rc) {
QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
txq->index, rc);
return rc;
}
txq->doorbell_addr = tx_ret_params.p_doorbell;
txq->handle = tx_ret_params.p_handle;
txq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
SET_FIELD(txq->tx_db.data.params,
ETH_DB_DATA_DEST, DB_DEST_XCM);
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
DB_AGG_CMD_SET);
SET_FIELD(txq->tx_db.data.params,
ETH_DB_DATA_AGG_VAL_SEL,
DQ_XCM_ETH_TX_BD_PROD_CMD);
txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
}
}
/* Fill struct with RSS params */
if (ha->num_rss > 1) {
rss_params->update_rss_config = 1;
rss_params->rss_enable = 1;
rss_params->update_rss_capabilities = 1;
rss_params->update_rss_ind_table = 1;
rss_params->update_rss_key = 1;
rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
fp = &ha->fp_array[(i % ha->num_rss)];
rss_params->rss_ind_table[i] = fp->rxq->handle;
}
for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
} else {
memset(rss_params, 0, sizeof(*rss_params));
}
/* Prepare and send the vport enable */
memset(&vport_update_params, 0, sizeof(vport_update_params));
vport_update_params.vport_id = vport_id;
vport_update_params.update_vport_active_tx_flg = 1;
vport_update_params.vport_active_tx_flg = 1;
vport_update_params.update_vport_active_rx_flg = 1;
vport_update_params.vport_active_rx_flg = 1;
vport_update_params.rss_params = rss_params;
vport_update_params.update_inner_vlan_removal_flg = 1;
vport_update_params.inner_vlan_removal_flg = 1;
if (hw_lro_enable) {
memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
tpa_params.update_tpa_en_flg = 1;
tpa_params.tpa_ipv4_en_flg = 1;
tpa_params.tpa_ipv6_en_flg = 1;
tpa_params.update_tpa_param_flg = 1;
tpa_params.tpa_pkt_split_flg = 0;
tpa_params.tpa_hdr_data_split_flg = 0;
tpa_params.tpa_gro_consistent_flg = 0;
tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
tpa_params.tpa_max_size = (uint16_t)(-1);
tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2;
tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2;
vport_update_params.sge_tpa_params = &tpa_params;
}
rc = qlnx_update_vport(cdev, &vport_update_params);
if (rc) {
QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
return rc;
}
return 0;
}
static int
qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
struct qlnx_tx_queue *txq)
{
uint16_t hw_bd_cons;
uint16_t ecore_cons_idx;
QL_DPRINT2(ha, "enter\n");
hw_bd_cons = le16toh(*txq->hw_cons_ptr);
while (hw_bd_cons !=
(ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
mtx_lock(&fp->tx_mtx);
(void)qlnx_tx_int(ha, fp, txq);
mtx_unlock(&fp->tx_mtx);
qlnx_mdelay(__func__, 2);
hw_bd_cons = le16toh(*txq->hw_cons_ptr);
}
QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
return 0;
}
static int
qlnx_stop_queues(qlnx_host_t *ha)
{
struct qlnx_update_vport_params vport_update_params;
struct ecore_dev *cdev;
struct qlnx_fastpath *fp;
int rc, tc, i;
cdev = &ha->cdev;
/* Disable the vport */
memset(&vport_update_params, 0, sizeof(vport_update_params));
vport_update_params.vport_id = 0;
vport_update_params.update_vport_active_tx_flg = 1;
vport_update_params.vport_active_tx_flg = 0;
vport_update_params.update_vport_active_rx_flg = 1;
vport_update_params.vport_active_rx_flg = 0;
vport_update_params.rss_params = &ha->rss_params;
vport_update_params.rss_params->update_rss_config = 0;
vport_update_params.rss_params->rss_enable = 0;
vport_update_params.update_inner_vlan_removal_flg = 0;
vport_update_params.inner_vlan_removal_flg = 0;
QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
rc = qlnx_update_vport(cdev, &vport_update_params);
if (rc) {
QL_DPRINT1(ha, "Failed to update vport\n");
return rc;
}
/* Flush Tx queues. If needed, request drain from MCP */
for_each_rss(i) {
fp = &ha->fp_array[i];
for (tc = 0; tc < ha->num_tc; tc++) {
struct qlnx_tx_queue *txq = fp->txq[tc];
rc = qlnx_drain_txq(ha, fp, txq);
if (rc)
return rc;
}
}
/* Stop all Queues in reverse order*/
for (i = ha->num_rss - 1; i >= 0; i--) {
struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
fp = &ha->fp_array[i];
/* Stop the Tx Queue(s)*/
for (tc = 0; tc < ha->num_tc; tc++) {
int tx_queue_id __unused;
tx_queue_id = tc * ha->num_rss + i;
rc = ecore_eth_tx_queue_stop(p_hwfn,
fp->txq[tc]->handle);
if (rc) {
QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
tx_queue_id);
return rc;
}
}
/* Stop the Rx Queue*/
rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
false);
if (rc) {
QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
return rc;
}
}
/* Stop the vport */
for_each_hwfn(cdev, i) {
struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
if (rc) {
QL_DPRINT1(ha, "Failed to stop VPORT\n");
return rc;
}
}
return rc;
}
static int
qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
enum ecore_filter_opcode opcode,
unsigned char mac[ETH_ALEN])
{
struct ecore_filter_ucast ucast;
struct ecore_dev *cdev;
int rc;
cdev = &ha->cdev;
bzero(&ucast, sizeof(struct ecore_filter_ucast));
ucast.opcode = opcode;
ucast.type = ECORE_FILTER_MAC;
ucast.is_rx_filter = 1;
ucast.vport_to_add_to = 0;
memcpy(&ucast.mac[0], mac, ETH_ALEN);
rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
return (rc);
}
static int
qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
{
struct ecore_filter_ucast ucast;
struct ecore_dev *cdev;
int rc;
bzero(&ucast, sizeof(struct ecore_filter_ucast));
ucast.opcode = ECORE_FILTER_REPLACE;
ucast.type = ECORE_FILTER_MAC;
ucast.is_rx_filter = 1;
cdev = &ha->cdev;
rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
return (rc);
}
static int
qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
{
struct ecore_filter_mcast *mcast;
struct ecore_dev *cdev;
int rc, i;
cdev = &ha->cdev;
mcast = &ha->ecore_mcast;
bzero(mcast, sizeof(struct ecore_filter_mcast));
mcast->opcode = ECORE_FILTER_REMOVE;
for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
mcast->num_mc_addrs++;
}
}
mcast = &ha->ecore_mcast;
rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
ha->nmcast = 0;
return (rc);
}
static int
qlnx_clean_filters(qlnx_host_t *ha)
{
int rc = 0;
/* Remove all unicast macs */
rc = qlnx_remove_all_ucast_mac(ha);
if (rc)
return rc;
/* Remove all multicast macs */
rc = qlnx_remove_all_mcast_mac(ha);
if (rc)
return rc;
rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
return (rc);
}
static int
qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
{
struct ecore_filter_accept_flags accept;
int rc = 0;
struct ecore_dev *cdev;
cdev = &ha->cdev;
bzero(&accept, sizeof(struct ecore_filter_accept_flags));
accept.update_rx_mode_config = 1;
accept.rx_accept_filter = filter;
accept.update_tx_mode_config = 1;
accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
ECORE_SPQ_MODE_CB, NULL);
return (rc);
}
static int
qlnx_set_rx_mode(qlnx_host_t *ha)
{
int rc = 0;
uint8_t filter;
rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
if (rc)
return rc;
rc = qlnx_remove_all_mcast_mac(ha);
if (rc)
return rc;
filter = ECORE_ACCEPT_UCAST_MATCHED |
ECORE_ACCEPT_MCAST_MATCHED |
ECORE_ACCEPT_BCAST;
if (qlnx_vf_device(ha) == 0 || (ha->ifp->if_flags & IFF_PROMISC)) {
filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
} else if (ha->ifp->if_flags & IFF_ALLMULTI) {
filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
}
ha->filter = filter;
rc = qlnx_set_rx_accept_filter(ha, filter);
return (rc);
}
static int
qlnx_set_link(qlnx_host_t *ha, bool link_up)
{
int i, rc = 0;
struct ecore_dev *cdev;
struct ecore_hwfn *hwfn;
struct ecore_ptt *ptt;
if (qlnx_vf_device(ha) == 0)
return (0);
cdev = &ha->cdev;
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
ptt = ecore_ptt_acquire(hwfn);
if (!ptt)
return -EBUSY;
rc = ecore_mcp_set_link(hwfn, ptt, link_up);
ecore_ptt_release(hwfn, ptt);
if (rc)
return rc;
}
return (rc);
}
static uint64_t
qlnx_get_counter(if_t ifp, ift_counter cnt)
{
qlnx_host_t *ha;
uint64_t count;
ha = (qlnx_host_t *)if_getsoftc(ifp);
switch (cnt) {
case IFCOUNTER_IPACKETS:
count = ha->hw_stats.common.rx_ucast_pkts +
ha->hw_stats.common.rx_mcast_pkts +
ha->hw_stats.common.rx_bcast_pkts;
break;
case IFCOUNTER_IERRORS:
count = ha->hw_stats.common.rx_crc_errors +
ha->hw_stats.common.rx_align_errors +
ha->hw_stats.common.rx_oversize_packets +
ha->hw_stats.common.rx_undersize_packets;
break;
case IFCOUNTER_OPACKETS:
count = ha->hw_stats.common.tx_ucast_pkts +
ha->hw_stats.common.tx_mcast_pkts +
ha->hw_stats.common.tx_bcast_pkts;
break;
case IFCOUNTER_OERRORS:
count = ha->hw_stats.common.tx_err_drop_pkts;
break;
case IFCOUNTER_COLLISIONS:
return (0);
case IFCOUNTER_IBYTES:
count = ha->hw_stats.common.rx_ucast_bytes +
ha->hw_stats.common.rx_mcast_bytes +
ha->hw_stats.common.rx_bcast_bytes;
break;
case IFCOUNTER_OBYTES:
count = ha->hw_stats.common.tx_ucast_bytes +
ha->hw_stats.common.tx_mcast_bytes +
ha->hw_stats.common.tx_bcast_bytes;
break;
case IFCOUNTER_IMCASTS:
count = ha->hw_stats.common.rx_mcast_bytes;
break;
case IFCOUNTER_OMCASTS:
count = ha->hw_stats.common.tx_mcast_bytes;
break;
case IFCOUNTER_IQDROPS:
case IFCOUNTER_OQDROPS:
case IFCOUNTER_NOPROTO:
default:
return (if_get_counter_default(ifp, cnt));
}
return (count);
}
static void
qlnx_timer(void *arg)
{
qlnx_host_t *ha;
ha = (qlnx_host_t *)arg;
if (ha->error_recovery) {
ha->error_recovery = 0;
taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
return;
}
ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
if (ha->storm_stats_gather)
qlnx_sample_storm_stats(ha);
callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
return;
}
static int
qlnx_load(qlnx_host_t *ha)
{
int i;
int rc = 0;
device_t dev;
dev = ha->pci_dev;
QL_DPRINT2(ha, "enter\n");
rc = qlnx_alloc_mem_arrays(ha);
if (rc)
goto qlnx_load_exit0;
qlnx_init_fp(ha);
rc = qlnx_alloc_mem_load(ha);
if (rc)
goto qlnx_load_exit1;
QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
ha->num_rss, ha->num_tc);
for (i = 0; i < ha->num_rss; i++) {
if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, qlnx_fp_isr, &ha->irq_vec[i],
&ha->irq_vec[i].handle))) {
QL_DPRINT1(ha, "could not setup interrupt\n");
goto qlnx_load_exit2;
}
QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
irq %p handle %p\n", i,
ha->irq_vec[i].irq_rid,
ha->irq_vec[i].irq, ha->irq_vec[i].handle);
bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
}
rc = qlnx_start_queues(ha);
if (rc)
goto qlnx_load_exit2;
QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
/* Add primary mac and set Rx filters */
rc = qlnx_set_rx_mode(ha);
if (rc)
goto qlnx_load_exit2;
/* Ask for link-up using current configuration */
qlnx_set_link(ha, true);
if (qlnx_vf_device(ha) == 0)
qlnx_link_update(&ha->cdev.hwfns[0]);
ha->state = QLNX_STATE_OPEN;
bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
if (ha->flags.callout_init)
callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
goto qlnx_load_exit0;
qlnx_load_exit2:
qlnx_free_mem_load(ha);
qlnx_load_exit1:
ha->num_rss = 0;
qlnx_load_exit0:
QL_DPRINT2(ha, "exit [%d]\n", rc);
return rc;
}
static void
qlnx_drain_soft_lro(qlnx_host_t *ha)
{
#ifdef QLNX_SOFT_LRO
if_t ifp;
int i;
ifp = ha->ifp;
if (if_getcapenable(ifp) & IFCAP_LRO) {
for (i = 0; i < ha->num_rss; i++) {
struct qlnx_fastpath *fp = &ha->fp_array[i];
struct lro_ctrl *lro;
lro = &fp->rxq->lro;
tcp_lro_flush_all(lro);
}
}
#endif /* #ifdef QLNX_SOFT_LRO */
return;
}
static void
qlnx_unload(qlnx_host_t *ha)
{
struct ecore_dev *cdev;
device_t dev;
int i;
cdev = &ha->cdev;
dev = ha->pci_dev;
QL_DPRINT2(ha, "enter\n");
QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
if (ha->state == QLNX_STATE_OPEN) {
qlnx_set_link(ha, false);
qlnx_clean_filters(ha);
qlnx_stop_queues(ha);
ecore_hw_stop_fastpath(cdev);
for (i = 0; i < ha->num_rss; i++) {
if (ha->irq_vec[i].handle) {
(void)bus_teardown_intr(dev,
ha->irq_vec[i].irq,
ha->irq_vec[i].handle);
ha->irq_vec[i].handle = NULL;
}
}
qlnx_drain_fp_taskqueues(ha);
qlnx_drain_soft_lro(ha);
qlnx_free_mem_load(ha);
}
if (ha->flags.callout_init)
callout_drain(&ha->qlnx_callout);
qlnx_mdelay(__func__, 1000);
ha->state = QLNX_STATE_CLOSED;
QL_DPRINT2(ha, "exit\n");
return;
}
static int
qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
{
int rval = -1;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
p_hwfn = &ha->cdev.hwfns[hwfn_index];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
return (rval);
}
rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
if (rval == DBG_STATUS_OK)
rval = 0;
else {
QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
"[0x%x]\n", rval);
}
ecore_ptt_release(p_hwfn, p_ptt);
return (rval);
}
static int
qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
{
int rval = -1;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
p_hwfn = &ha->cdev.hwfns[hwfn_index];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
return (rval);
}
rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
if (rval == DBG_STATUS_OK)
rval = 0;
else {
QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
" [0x%x]\n", rval);
}
ecore_ptt_release(p_hwfn, p_ptt);
return (rval);
}
static void
qlnx_sample_storm_stats(qlnx_host_t *ha)
{
int i, index;
struct ecore_dev *cdev;
qlnx_storm_stats_t *s_stats;
uint32_t reg;
struct ecore_ptt *p_ptt;
struct ecore_hwfn *hwfn;
if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
ha->storm_stats_gather = 0;
return;
}
cdev = &ha->cdev;
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
p_ptt = ecore_ptt_acquire(hwfn);
if (!p_ptt)
return;
index = ha->storm_stats_index +
(i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
s_stats = &ha->storm_stats[index];
/* XSTORM */
reg = XSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = XSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = XSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = XSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
/* YSTORM */
reg = YSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = YSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = YSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = YSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
/* PSTORM */
reg = PSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = PSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = PSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = PSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
/* TSTORM */
reg = TSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = TSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = TSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = TSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
/* MSTORM */
reg = MSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = MSEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = MSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = MSEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
/* USTORM */
reg = USEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = USEM_REG_FAST_MEMORY +
SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = USEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
reg = USEM_REG_FAST_MEMORY +
SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
ecore_ptt_release(hwfn, p_ptt);
}
ha->storm_stats_index++;
return;
}
/*
* Name: qlnx_dump_buf8
* Function: dumps a buffer as bytes
*/
static void
qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
{
device_t dev;
uint32_t i = 0;
uint8_t *buf;
dev = ha->pci_dev;
buf = dbuf;
device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
while (len >= 16) {
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7],
buf[8], buf[9], buf[10], buf[11],
buf[12], buf[13], buf[14], buf[15]);
i += 16;
len -= 16;
buf += 16;
}
switch (len) {
case 1:
device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
break;
case 2:
device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
break;
case 3:
device_printf(dev,"0x%08x: %02x %02x %02x\n",
i, buf[0], buf[1], buf[2]);
break;
case 4:
device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3]);
break;
case 5:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4]);
break;
case 6:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
break;
case 7:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
break;
case 8:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7]);
break;
case 9:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7], buf[8]);
break;
case 10:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7], buf[8], buf[9]);
break;
case 11:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7], buf[8], buf[9], buf[10]);
break;
case 12:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7], buf[8], buf[9], buf[10], buf[11]);
break;
case 13:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
break;
case 14:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
buf[13]);
break;
case 15:
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
" %02x %02x %02x %02x %02x %02x %02x\n", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
buf[13], buf[14]);
break;
default:
break;
}
device_printf(dev, "%s: %s dump end\n", __func__, msg);
return;
}
#ifdef CONFIG_ECORE_SRIOV
static void
__qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
{
struct ecore_public_vf_info *vf_info;
vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
if (!vf_info)
return;
/* Clear the VF mac */
memset(vf_info->forced_mac, 0, ETH_ALEN);
vf_info->forced_vlan = 0;
return;
}
void
qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
{
__qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
return;
}
static int
__qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
struct ecore_filter_ucast *params)
{
struct ecore_public_vf_info *vf;
if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
"VF[%d] vport not initialized\n", vfid);
return ECORE_INVAL;
}
vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
if (!vf)
return -EINVAL;
/* No real decision to make; Store the configured MAC */
if (params->type == ECORE_FILTER_MAC ||
params->type == ECORE_FILTER_MAC_VLAN)
memcpy(params->mac, vf->forced_mac, ETH_ALEN);
return 0;
}
int
qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
{
return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
}
static int
__qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
{
if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
"VF[%d] vport not initialized\n", vfid);
return ECORE_INVAL;
}
/* Untrusted VFs can't even be trusted to know that fact.
* Simply indicate everything is configured fine, and trace
* configuration 'behind their back'.
*/
if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
return 0;
return 0;
}
int
qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
{
return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
}
static int
qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
{
int i;
struct ecore_dev *cdev;
cdev = p_hwfn->p_dev;
for (i = 0; i < cdev->num_hwfns; i++) {
if (&cdev->hwfns[i] == p_hwfn)
break;
}
if (i >= cdev->num_hwfns)
return (-1);
return (i);
}
static int
__qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
{
qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
int i;
QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
return (-1);
if (ha->sriov_task[i].pf_taskqueue != NULL) {
atomic_testandset_32(&ha->sriov_task[i].flags,
QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
&ha->sriov_task[i].pf_task);
}
return (ECORE_SUCCESS);
}
int
qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
{
return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
}
static void
__qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
{
qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
int i;
if (!ha->sriov_initialized)
return;
QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
ha, p_hwfn->p_dev, p_hwfn);
if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
return;
if (ha->sriov_task[i].pf_taskqueue != NULL) {
atomic_testandset_32(&ha->sriov_task[i].flags,
QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
&ha->sriov_task[i].pf_task);
}
return;
}
void
qlnx_vf_flr_update(void *p_hwfn)
{
__qlnx_vf_flr_update(p_hwfn);
return;
}
#ifndef QLNX_VF
static void
qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
{
qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
int i;
QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
ha, p_hwfn->p_dev, p_hwfn);
if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
return;
QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
ha, p_hwfn->p_dev, p_hwfn, i);
if (ha->sriov_task[i].pf_taskqueue != NULL) {
atomic_testandset_32(&ha->sriov_task[i].flags,
QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
&ha->sriov_task[i].pf_task);
}
}
static void
qlnx_initialize_sriov(qlnx_host_t *ha)
{
device_t dev;
nvlist_t *pf_schema, *vf_schema;
int iov_error;
dev = ha->pci_dev;
pf_schema = pci_iov_schema_alloc_node();
vf_schema = pci_iov_schema_alloc_node();
pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
IOV_SCHEMA_HASDEFAULT, FALSE);
pci_iov_schema_add_bool(vf_schema, "allow-promisc",
IOV_SCHEMA_HASDEFAULT, FALSE);
pci_iov_schema_add_uint16(vf_schema, "num-queues",
IOV_SCHEMA_HASDEFAULT, 1);
iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
if (iov_error != 0) {
ha->sriov_initialized = 0;
} else {
device_printf(dev, "SRIOV initialized\n");
ha->sriov_initialized = 1;
}
return;
}
static void
qlnx_sriov_disable(qlnx_host_t *ha)
{
struct ecore_dev *cdev;
int i, j;
cdev = &ha->cdev;
ecore_iov_set_vfs_to_disable(cdev, true);
for_each_hwfn(cdev, i) {
struct ecore_hwfn *hwfn = &cdev->hwfns[i];
struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
if (!ptt) {
QL_DPRINT1(ha, "Failed to acquire ptt\n");
return;
}
/* Clean WFQ db and configure equal weight for all vports */
ecore_clean_wfq_db(hwfn, ptt);
ecore_for_each_vf(hwfn, j) {
int k = 0;
if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
continue;
if (ecore_iov_is_vf_started(hwfn, j)) {
/* Wait until VF is disabled before releasing */
for (k = 0; k < 100; k++) {
if (!ecore_iov_is_vf_stopped(hwfn, j)) {
qlnx_mdelay(__func__, 10);
} else
break;
}
}
if (k < 100)
ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
ptt, j);
else {
QL_DPRINT1(ha,
"Timeout waiting for VF's FLR to end\n");
}
}
ecore_ptt_release(hwfn, ptt);
}
ecore_iov_set_vfs_to_disable(cdev, false);
return;
}
static void
qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
struct ecore_iov_vf_init_params *params)
{
u16 base, i;
/* Since we have an equal resource distribution per-VF, and we assume
* PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
* sequentially from there.
*/
base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
params->rel_vf_id = vfid;
for (i = 0; i < params->num_queues; i++) {
params->req_rx_queue[i] = base + i;
params->req_tx_queue[i] = base + i;
}
/* PF uses indices 0 for itself; Set vport/RSS afterwards */
params->vport_id = vfid + 1;
params->rss_eng_id = vfid + 1;
return;
}
static int
qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
{
qlnx_host_t *ha;
struct ecore_dev *cdev;
struct ecore_iov_vf_init_params params;
int ret, j, i;
uint32_t max_vfs;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "%s: cannot get softc\n", __func__);
return (-1);
}
if (qlnx_create_pf_taskqueues(ha) != 0)
goto qlnx_iov_init_err0;
cdev = &ha->cdev;
max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
dev, num_vfs, max_vfs);
if (num_vfs >= max_vfs) {
QL_DPRINT1(ha, "Can start at most %d VFs\n",
(RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
goto qlnx_iov_init_err0;
}
ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
M_NOWAIT);
if (ha->vf_attr == NULL)
goto qlnx_iov_init_err0;
memset(&params, 0, sizeof(params));
/* Initialize HW for VF access */
for_each_hwfn(cdev, j) {
struct ecore_hwfn *hwfn = &cdev->hwfns[j];
struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
/* Make sure not to use more than 16 queues per VF */
params.num_queues = min_t(int,
(FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
16);
if (!ptt) {
QL_DPRINT1(ha, "Failed to acquire ptt\n");
goto qlnx_iov_init_err1;
}
for (i = 0; i < num_vfs; i++) {
if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
continue;
qlnx_sriov_enable_qid_config(hwfn, i, &params);
ret = ecore_iov_init_hw_for_vf(hwfn, ptt, &params);
if (ret) {
QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
ecore_ptt_release(hwfn, ptt);
goto qlnx_iov_init_err1;
}
}
ecore_ptt_release(hwfn, ptt);
}
ha->num_vfs = num_vfs;
qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
return (0);
qlnx_iov_init_err1:
qlnx_sriov_disable(ha);
qlnx_iov_init_err0:
qlnx_destroy_pf_taskqueues(ha);
ha->num_vfs = 0;
return (-1);
}
static void
qlnx_iov_uninit(device_t dev)
{
qlnx_host_t *ha;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "%s: cannot get softc\n", __func__);
return;
}
QL_DPRINT2(ha," dev = %p enter\n", dev);
qlnx_sriov_disable(ha);
qlnx_destroy_pf_taskqueues(ha);
free(ha->vf_attr, M_QLNXBUF);
ha->vf_attr = NULL;
ha->num_vfs = 0;
QL_DPRINT2(ha," dev = %p exit\n", dev);
return;
}
static int
qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
{
qlnx_host_t *ha;
qlnx_vf_attr_t *vf_attr;
unsigned const char *mac;
size_t size;
struct ecore_hwfn *p_hwfn;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "%s: cannot get softc\n", __func__);
return (-1);
}
QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
if (vfnum > (ha->num_vfs - 1)) {
QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
vfnum, (ha->num_vfs - 1));
}
vf_attr = &ha->vf_attr[vfnum];
if (nvlist_exists_binary(params, "mac-addr")) {
mac = nvlist_get_binary(params, "mac-addr", &size);
bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
device_printf(dev,
"%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
__func__, vf_attr->mac_addr[0],
vf_attr->mac_addr[1], vf_attr->mac_addr[2],
vf_attr->mac_addr[3], vf_attr->mac_addr[4],
vf_attr->mac_addr[5]);
p_hwfn = &ha->cdev.hwfns[0];
ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
vfnum);
}
QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
return (0);
}
static void
qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
{
uint64_t events[ECORE_VF_ARRAY_LENGTH];
struct ecore_ptt *ptt;
int i;
ptt = ecore_ptt_acquire(p_hwfn);
if (!ptt) {
QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
__qlnx_pf_vf_msg(p_hwfn, 0);
return;
}
ecore_iov_pf_get_pending_events(p_hwfn, events);
QL_DPRINT2(ha, "Event mask of VF events:"
"0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
events[0], events[1], events[2]);
ecore_for_each_vf(p_hwfn, i) {
/* Skip VFs with no pending messages */
if (!(events[i / 64] & (1ULL << (i % 64))))
continue;
QL_DPRINT2(ha,
"Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
/* Copy VF's message to PF's request buffer for that VF */
if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
continue;
ecore_iov_process_mbx_req(p_hwfn, ptt, i);
}
ecore_ptt_release(p_hwfn, ptt);
return;
}
static void
qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *ptt;
int ret;
ptt = ecore_ptt_acquire(p_hwfn);
if (!ptt) {
QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
__qlnx_vf_flr_update(p_hwfn);
return;
}
ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
if (ret) {
QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
}
ecore_ptt_release(p_hwfn, ptt);
return;
}
static void
qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *ptt;
int i;
ptt = ecore_ptt_acquire(p_hwfn);
if (!ptt) {
QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
qlnx_vf_bulleting_update(p_hwfn);
return;
}
ecore_for_each_vf(p_hwfn, i) {
QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
p_hwfn, i);
ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
}
ecore_ptt_release(p_hwfn, ptt);
return;
}
static void
qlnx_pf_taskqueue(void *context, int pending)
{
struct ecore_hwfn *p_hwfn;
qlnx_host_t *ha;
int i;
p_hwfn = context;
if (p_hwfn == NULL)
return;
ha = (qlnx_host_t *)(p_hwfn->p_dev);
if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
return;
if (atomic_testandclear_32(&ha->sriov_task[i].flags,
QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
qlnx_handle_vf_msg(ha, p_hwfn);
if (atomic_testandclear_32(&ha->sriov_task[i].flags,
QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
qlnx_handle_vf_flr_update(ha, p_hwfn);
if (atomic_testandclear_32(&ha->sriov_task[i].flags,
QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
qlnx_handle_bulletin_update(ha, p_hwfn);
return;
}
static int
qlnx_create_pf_taskqueues(qlnx_host_t *ha)
{
int i;
uint8_t tq_name[32];
for (i = 0; i < ha->cdev.num_hwfns; i++) {
struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
bzero(tq_name, sizeof (tq_name));
snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&ha->sriov_task[i].pf_taskqueue);
if (ha->sriov_task[i].pf_taskqueue == NULL)
return (-1);
taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
PI_NET, "%s", tq_name);
QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
}
return (0);
}
static void
qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
{
int i;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (ha->sriov_task[i].pf_taskqueue != NULL) {
taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
&ha->sriov_task[i].pf_task);
taskqueue_free(ha->sriov_task[i].pf_taskqueue);
ha->sriov_task[i].pf_taskqueue = NULL;
}
}
return;
}
static void
qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
{
struct ecore_mcp_link_capabilities caps;
struct ecore_mcp_link_params params;
struct ecore_mcp_link_state link;
int i;
if (!p_hwfn->pf_iov_info)
return;
memset(&params, 0, sizeof(struct ecore_mcp_link_params));
memset(&link, 0, sizeof(struct ecore_mcp_link_state));
memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
memcpy(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
QL_DPRINT2(ha, "called\n");
/* Update bulletin of all future possible VFs with link configuration */
for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
/* Modify link according to the VF's configured link state */
link.link_up = false;
if (ha->link_up) {
link.link_up = true;
/* Set speed according to maximum supported by HW.
* that is 40G for regular devices and 100G for CMT
* mode devices.
*/
link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
100000 : link.speed;
}
QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
ecore_iov_set_link(p_hwfn, i, &params, &link, &caps);
}
qlnx_vf_bulleting_update(p_hwfn);
return;
}
#endif /* #ifndef QLNX_VF */
#endif /* #ifdef CONFIG_ECORE_SRIOV */
diff --git a/sys/dev/qlxgb/qla_os.c b/sys/dev/qlxgb/qla_os.c
index b9a0c1ec07a3..87e504a83c79 100644
--- a/sys/dev/qlxgb/qla_os.c
+++ b/sys/dev/qlxgb/qla_os.c
@@ -1,1456 +1,1453 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011-2013 Qlogic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: qla_os.c
* Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
*/
#include <sys/cdefs.h>
#include "qla_os.h"
#include "qla_reg.h"
#include "qla_hw.h"
#include "qla_def.h"
#include "qla_inline.h"
#include "qla_ver.h"
#include "qla_glbl.h"
#include "qla_dbg.h"
/*
* Some PCI Configuration Space Related Defines
*/
#ifndef PCI_VENDOR_QLOGIC
#define PCI_VENDOR_QLOGIC 0x1077
#endif
#ifndef PCI_PRODUCT_QLOGIC_ISP8020
#define PCI_PRODUCT_QLOGIC_ISP8020 0x8020
#endif
#define PCI_QLOGIC_ISP8020 \
((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
/*
* static functions
*/
static int qla_alloc_parent_dma_tag(qla_host_t *ha);
static void qla_free_parent_dma_tag(qla_host_t *ha);
static int qla_alloc_xmt_bufs(qla_host_t *ha);
static void qla_free_xmt_bufs(qla_host_t *ha);
static int qla_alloc_rcv_bufs(qla_host_t *ha);
static void qla_free_rcv_bufs(qla_host_t *ha);
static void qla_init_ifnet(device_t dev, qla_host_t *ha);
static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
static void qla_release(qla_host_t *ha);
static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
int error);
static void qla_stop(qla_host_t *ha);
static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
static void qla_tx_done(void *context, int pending);
/*
* Hooks to the Operating Systems
*/
static int qla_pci_probe (device_t);
static int qla_pci_attach (device_t);
static int qla_pci_detach (device_t);
static void qla_init(void *arg);
static int qla_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int qla_media_change(if_t ifp);
static void qla_media_status(if_t ifp, struct ifmediareq *ifmr);
static device_method_t qla_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, qla_pci_probe),
DEVMETHOD(device_attach, qla_pci_attach),
DEVMETHOD(device_detach, qla_pci_detach),
{ 0, 0 }
};
static driver_t qla_pci_driver = {
"ql", qla_pci_methods, sizeof (qla_host_t),
};
DRIVER_MODULE(qla80xx, pci, qla_pci_driver, 0, 0);
MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
uint32_t std_replenish = 8;
uint32_t jumbo_replenish = 2;
uint32_t rcv_pkt_thres = 128;
uint32_t rcv_pkt_thres_d = 32;
uint32_t snd_pkt_thres = 16;
uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
static char dev_str[64];
/*
* Name: qla_pci_probe
* Function: Validate the PCI device to be a QLA80XX device
*/
static int
qla_pci_probe(device_t dev)
{
switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
case PCI_QLOGIC_ISP8020:
snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
"Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
QLA_VERSION_BUILD);
device_set_desc(dev, dev_str);
break;
default:
return (ENXIO);
}
if (bootverbose)
printf("%s: %s\n ", __func__, dev_str);
return (BUS_PROBE_DEFAULT);
}
static void
qla_add_sysctls(qla_host_t *ha)
{
device_t dev = ha->pci_dev;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
(void *)ha, 0, qla_sysctl_get_stats, "I", "Statistics");
SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "fw_version", CTLFLAG_RD,
ha->fw_ver_str, 0, "firmware version");
dbg_level = 0;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "debug", CTLFLAG_RW,
&dbg_level, dbg_level, "Debug Level");
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "std_replenish", CTLFLAG_RW,
&std_replenish, std_replenish,
"Threshold for Replenishing Standard Frames");
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
&jumbo_replenish, jumbo_replenish,
"Threshold for Replenishing Jumbo Frames");
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW,
&rcv_pkt_thres, rcv_pkt_thres,
"Threshold for # of rcv pkts to trigger indication isr");
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW,
&rcv_pkt_thres_d, rcv_pkt_thres_d,
"Threshold for # of rcv pkts to trigger indication defered");
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "snd_pkt_thres", CTLFLAG_RW,
&snd_pkt_thres, snd_pkt_thres,
"Threshold for # of snd packets");
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "free_pkt_thres", CTLFLAG_RW,
&free_pkt_thres, free_pkt_thres,
"Threshold for # of packets to free at a time");
return;
}
static void
qla_watchdog(void *arg)
{
qla_host_t *ha = arg;
qla_hw_t *hw;
if_t ifp;
hw = &ha->hw;
ifp = ha->ifp;
if (ha->flags.qla_watchdog_exit)
return;
if (!ha->flags.qla_watchdog_pause) {
if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
} else if (!if_sendq_empty(ifp) && QL_RUNNING(ifp)) {
taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
}
}
ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000;
callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
qla_watchdog, ha);
}
/*
* Name: qla_pci_attach
* Function: attaches the device to the operating system
*/
static int
qla_pci_attach(device_t dev)
{
qla_host_t *ha = NULL;
uint32_t rsrc_len, i;
QL_DPRINT2((dev, "%s: enter\n", __func__));
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
}
memset(ha, 0, sizeof (qla_host_t));
if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
device_printf(dev, "device is not ISP8020\n");
return (ENXIO);
}
ha->pci_func = pci_get_function(dev);
ha->pci_dev = dev;
pci_enable_busmaster(dev);
ha->reg_rid = PCIR_BAR(0);
ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
RF_ACTIVE);
if (ha->pci_reg == NULL) {
device_printf(dev, "unable to map any ports\n");
goto qla_pci_attach_err;
}
rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
ha->reg_rid);
mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
ha->flags.lock_init = 1;
ha->msix_count = pci_msix_count(dev);
if (ha->msix_count < qla_get_msix_count(ha)) {
device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
ha->msix_count);
goto qla_pci_attach_err;
}
QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
" msix_count 0x%x pci_reg %p\n", __func__, ha,
ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
ha->msix_count = qla_get_msix_count(ha);
if (pci_alloc_msix(dev, &ha->msix_count)) {
device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
ha->msix_count);
ha->msix_count = 0;
goto qla_pci_attach_err;
}
TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
taskqueue_thread_enqueue, &ha->tx_tq);
taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
device_get_nameunit(ha->pci_dev));
for (i = 0; i < ha->msix_count; i++) {
ha->irq_vec[i].irq_rid = i+1;
ha->irq_vec[i].ha = ha;
ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ha->irq_vec[i].irq_rid,
(RF_ACTIVE | RF_SHAREABLE));
if (ha->irq_vec[i].irq == NULL) {
device_printf(dev, "could not allocate interrupt\n");
goto qla_pci_attach_err;
}
if (bus_setup_intr(dev, ha->irq_vec[i].irq,
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, qla_isr, &ha->irq_vec[i],
&ha->irq_vec[i].handle)) {
device_printf(dev, "could not setup interrupt\n");
goto qla_pci_attach_err;
}
TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
&ha->irq_vec[i]);
ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
M_NOWAIT, taskqueue_thread_enqueue,
&ha->irq_vec[i].rcv_tq);
taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
"%s rcvq",
device_get_nameunit(ha->pci_dev));
}
qla_add_sysctls(ha);
/* add hardware specific sysctls */
qla_hw_add_sysctls(ha);
/* initialize hardware */
if (qla_init_hw(ha)) {
device_printf(dev, "%s: qla_init_hw failed\n", __func__);
goto qla_pci_attach_err;
}
device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
ha->fw_ver_build);
snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
ha->fw_ver_build);
//qla_get_hw_caps(ha);
qla_read_mac_addr(ha);
/* allocate parent dma tag */
if (qla_alloc_parent_dma_tag(ha)) {
device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
__func__);
goto qla_pci_attach_err;
}
/* alloc all dma buffers */
if (qla_alloc_dma(ha)) {
device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
goto qla_pci_attach_err;
}
/* create the o.s ethernet interface */
qla_init_ifnet(dev, ha);
ha->flags.qla_watchdog_active = 1;
ha->flags.qla_watchdog_pause = 1;
callout_init(&ha->tx_callout, 1);
/* create ioctl device interface */
if (qla_make_cdev(ha)) {
device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
goto qla_pci_attach_err;
}
callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
qla_watchdog, ha);
QL_DPRINT2((dev, "%s: exit 0\n", __func__));
return (0);
qla_pci_attach_err:
qla_release(ha);
QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
return (ENXIO);
}
/*
* Name: qla_pci_detach
* Function: Unhooks the device from the operating system
*/
static int
qla_pci_detach(device_t dev)
{
qla_host_t *ha = NULL;
int i;
QL_DPRINT2((dev, "%s: enter\n", __func__));
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
}
QLA_LOCK(ha, __func__);
qla_stop(ha);
QLA_UNLOCK(ha, __func__);
if (ha->tx_tq) {
taskqueue_drain(ha->tx_tq, &ha->tx_task);
taskqueue_free(ha->tx_tq);
}
for (i = 0; i < ha->msix_count; i++) {
taskqueue_drain(ha->irq_vec[i].rcv_tq,
&ha->irq_vec[i].rcv_task);
taskqueue_free(ha->irq_vec[i].rcv_tq);
}
qla_release(ha);
QL_DPRINT2((dev, "%s: exit\n", __func__));
return (0);
}
/*
* SYSCTL Related Callbacks
*/
static int
qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
{
int err, ret = 0;
qla_host_t *ha;
err = sysctl_handle_int(oidp, &ret, 0, req);
if (err)
return (err);
ha = (qla_host_t *)arg1;
//qla_get_stats(ha);
QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
return (err);
}
/*
* Name: qla_release
* Function: Releases the resources allocated for the device
*/
static void
qla_release(qla_host_t *ha)
{
device_t dev;
int i;
dev = ha->pci_dev;
qla_del_cdev(ha);
if (ha->flags.qla_watchdog_active)
ha->flags.qla_watchdog_exit = 1;
callout_stop(&ha->tx_callout);
qla_mdelay(__func__, 100);
if (ha->ifp != NULL)
ether_ifdetach(ha->ifp);
qla_free_dma(ha);
qla_free_parent_dma_tag(ha);
for (i = 0; i < ha->msix_count; i++) {
if (ha->irq_vec[i].handle)
(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
ha->irq_vec[i].handle);
if (ha->irq_vec[i].irq)
(void) bus_release_resource(dev, SYS_RES_IRQ,
ha->irq_vec[i].irq_rid,
ha->irq_vec[i].irq);
}
if (ha->msix_count)
pci_release_msi(dev);
if (ha->flags.lock_init) {
mtx_destroy(&ha->tx_lock);
mtx_destroy(&ha->rx_lock);
mtx_destroy(&ha->rxj_lock);
mtx_destroy(&ha->hw_lock);
}
if (ha->pci_reg)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
ha->pci_reg);
}
/*
* DMA Related Functions
*/
static void
qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
*((bus_addr_t *)arg) = 0;
if (error) {
printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
return;
}
QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
*((bus_addr_t *)arg) = segs[0].ds_addr;
return;
}
int
qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
{
int ret = 0;
device_t dev;
bus_addr_t b_addr;
dev = ha->pci_dev;
QL_DPRINT2((dev, "%s: enter\n", __func__));
ret = bus_dma_tag_create(
ha->parent_tag,/* parent */
dma_buf->alignment,
((bus_size_t)(1ULL << 32)),/* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dma_buf->size, /* maxsize */
1, /* nsegments */
dma_buf->size, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&dma_buf->dma_tag);
if (ret) {
device_printf(dev, "%s: could not create dma tag\n", __func__);
goto qla_alloc_dmabuf_exit;
}
ret = bus_dmamem_alloc(dma_buf->dma_tag,
(void **)&dma_buf->dma_b,
(BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
&dma_buf->dma_map);
if (ret) {
bus_dma_tag_destroy(dma_buf->dma_tag);
device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
goto qla_alloc_dmabuf_exit;
}
ret = bus_dmamap_load(dma_buf->dma_tag,
dma_buf->dma_map,
dma_buf->dma_b,
dma_buf->size,
qla_dmamap_callback,
&b_addr, BUS_DMA_NOWAIT);
if (ret || !b_addr) {
bus_dma_tag_destroy(dma_buf->dma_tag);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
dma_buf->dma_map);
ret = -1;
goto qla_alloc_dmabuf_exit;
}
dma_buf->dma_addr = b_addr;
qla_alloc_dmabuf_exit:
QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
__func__, ret, (void *)dma_buf->dma_tag,
(void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
dma_buf->size));
return ret;
}
void
qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
{
bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
bus_dma_tag_destroy(dma_buf->dma_tag);
}
static int
qla_alloc_parent_dma_tag(qla_host_t *ha)
{
int ret;
device_t dev;
dev = ha->pci_dev;
/*
* Allocate parent DMA Tag
*/
ret = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ha->parent_tag);
if (ret) {
device_printf(dev, "%s: could not create parent dma tag\n",
__func__);
return (-1);
}
ha->flags.parent_tag = 1;
return (0);
}
static void
qla_free_parent_dma_tag(qla_host_t *ha)
{
if (ha->flags.parent_tag) {
bus_dma_tag_destroy(ha->parent_tag);
ha->flags.parent_tag = 0;
}
}
/*
* Name: qla_init_ifnet
* Function: Creates the Network Device Interface and Registers it with the O.S
*/
static void
qla_init_ifnet(device_t dev, qla_host_t *ha)
{
if_t ifp;
QL_DPRINT2((dev, "%s: enter\n", __func__));
ifp = ha->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setmtu(ifp, ETHERMTU);
if_setbaudrate(ifp, IF_Gbps(10));
if_setinitfn(ifp, qla_init);
if_setsoftc(ifp, ha);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, qla_ioctl);
if_setstartfn(ifp, qla_start);
if_setsendqlen(ifp, qla_get_ifq_snd_maxlen(ha));
if_setsendqready(ifp);
ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
ether_ifattach(ifp, qla_get_mac_addr(ha));
if_setcapabilities(ifp, IFCAP_HWCSUM |
IFCAP_TSO4 |
IFCAP_JUMBO_MTU);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
QL_DPRINT2((dev, "%s: exit\n", __func__));
return;
}
static void
qla_init_locked(qla_host_t *ha)
{
if_t ifp = ha->ifp;
qla_stop(ha);
if (qla_alloc_xmt_bufs(ha) != 0)
return;
if (qla_alloc_rcv_bufs(ha) != 0)
return;
if (qla_config_lro(ha))
return;
bcopy(if_getlladdr(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_TSO);
ha->flags.stop_rcv = 0;
if (qla_init_hw_if(ha) == 0) {
ifp = ha->ifp;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
ha->flags.qla_watchdog_pause = 0;
}
return;
}
static void
qla_init(void *arg)
{
qla_host_t *ha;
ha = (qla_host_t *)arg;
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
QLA_LOCK(ha, __func__);
qla_init_locked(ha);
QLA_UNLOCK(ha, __func__);
QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
}
static u_int
qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
uint8_t *mta = arg;
if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
return (0);
bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
return (1);
}
static void
qla_set_multi(qla_host_t *ha, uint32_t add_multi)
{
uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
if_t ifp = ha->ifp;
int mcnt;
mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta);
qla_hw_set_multi(ha, mta, mcnt, add_multi);
return;
}
static int
qla_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
int ret = 0;
struct ifreq *ifr = (struct ifreq *)data;
#ifdef INET
struct ifaddr *ifa = (struct ifaddr *)data;
#endif
qla_host_t *ha;
ha = (qla_host_t *)if_getsoftc(ifp);
switch (cmd) {
case SIOCSIFADDR:
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
__func__, cmd));
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET) {
if_setflagbits(ifp, IFF_UP, 0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
QLA_LOCK(ha, __func__);
qla_init_locked(ha);
QLA_UNLOCK(ha, __func__);
}
QL_DPRINT4((ha->pci_dev,
"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
__func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
arp_ifinit(ifp, ifa);
if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
qla_config_ipv4_addr(ha,
(IA_SIN(ifa)->sin_addr.s_addr));
}
break;
}
#endif
ether_ioctl(ifp, cmd, data);
break;
case SIOCSIFMTU:
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
__func__, cmd));
if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
ret = EINVAL;
} else {
QLA_LOCK(ha, __func__);
if_setmtu(ifp, ifr->ifr_mtu);
ha->max_frame_size =
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
ret = qla_set_max_mtu(ha, ha->max_frame_size,
(ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
}
QLA_UNLOCK(ha, __func__);
if (ret)
ret = EINVAL;
}
break;
case SIOCSIFFLAGS:
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
__func__, cmd));
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_PROMISC) {
qla_set_promisc(ha);
} else if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_ALLMULTI) {
qla_set_allmulti(ha);
}
} else {
QLA_LOCK(ha, __func__);
qla_init_locked(ha);
ha->max_frame_size = if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN;
ret = qla_set_max_mtu(ha, ha->max_frame_size,
(ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
QLA_UNLOCK(ha, __func__);
}
} else {
QLA_LOCK(ha, __func__);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
qla_stop(ha);
ha->if_flags = if_getflags(ifp);
QLA_UNLOCK(ha, __func__);
}
break;
case SIOCADDMULTI:
QL_DPRINT4((ha->pci_dev,
"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
qla_set_multi(ha, 1);
}
break;
case SIOCDELMULTI:
QL_DPRINT4((ha->pci_dev,
"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
qla_set_multi(ha, 0);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
QL_DPRINT4((ha->pci_dev,
"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
__func__, cmd));
ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
break;
case SIOCSIFCAP:
{
int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
__func__, cmd));
if (mask & IFCAP_HWCSUM)
if_togglecapenable(ifp, IFCAP_HWCSUM);
if (mask & IFCAP_TSO4)
if_togglecapenable(ifp, IFCAP_TSO4);
if (mask & IFCAP_TSO6)
if_togglecapenable(ifp, IFCAP_TSO6);
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
qla_init(ha);
VLAN_CAPABILITIES(ifp);
break;
}
default:
QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
__func__, cmd));
ret = ether_ioctl(ifp, cmd, data);
break;
}
return (ret);
}
static int
qla_media_change(if_t ifp)
{
qla_host_t *ha;
struct ifmedia *ifm;
int ret = 0;
ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
ifm = &ha->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
ret = EINVAL;
QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
return (ret);
}
static void
qla_media_status(if_t ifp, struct ifmediareq *ifmr)
{
qla_host_t *ha;
ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
qla_update_link_state(ha);
if (ha->hw.flags.link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
}
QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
(ha->hw.flags.link_up ? "link_up" : "link_down")));
return;
}
void
qla_start(if_t ifp)
{
struct mbuf *m_head;
qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
if (!mtx_trylock(&ha->tx_lock)) {
QL_DPRINT8((ha->pci_dev,
"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
return;
}
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
QLA_TX_UNLOCK(ha);
return;
}
if (!ha->watchdog_ticks)
qla_update_link_state(ha);
if (!ha->hw.flags.link_up) {
QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
QLA_TX_UNLOCK(ha);
return;
}
while (!if_sendq_empty(ifp)) {
m_head = if_dequeue(ifp);
if (m_head == NULL) {
QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
__func__));
break;
}
if (qla_send(ha, &m_head)) {
if (m_head == NULL)
break;
QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m_head);
break;
}
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, m_head);
}
QLA_TX_UNLOCK(ha);
QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
return;
}
static int
qla_send(qla_host_t *ha, struct mbuf **m_headp)
{
bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
bus_dmamap_t map;
int nsegs;
int ret = -1;
uint32_t tx_idx;
struct mbuf *m_head = *m_headp;
QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
ha->err_tx_dmamap_create++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_create failed[%d, %d]\n",
__func__, ret, m_head->m_pkthdr.len);
return (ret);
}
ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
BUS_DMA_NOWAIT);
if (ret == EFBIG) {
struct mbuf *m;
QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
m_head->m_pkthdr.len));
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
ha->err_tx_defrag++;
m_freem(m_head);
*m_headp = NULL;
device_printf(ha->pci_dev,
"%s: m_defrag() = NULL [%d]\n",
__func__, ret);
return (ENOBUFS);
}
m_head = m;
if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
segs, &nsegs, BUS_DMA_NOWAIT))) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
__func__, ret, m_head->m_pkthdr.len);
bus_dmamap_destroy(ha->tx_tag, map);
if (ret != ENOMEM) {
m_freem(m_head);
*m_headp = NULL;
}
return (ret);
}
} else if (ret) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
__func__, ret, m_head->m_pkthdr.len);
bus_dmamap_destroy(ha->tx_tag, map);
if (ret != ENOMEM) {
m_freem(m_head);
*m_headp = NULL;
}
return (ret);
}
QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
ha->tx_buf[tx_idx].m_head = m_head;
ha->tx_buf[tx_idx].map = map;
} else {
if (ret == EINVAL) {
m_freem(m_head);
*m_headp = NULL;
}
}
QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
return (ret);
}
static void
qla_stop(qla_host_t *ha)
{
if_t ifp = ha->ifp;
ha->flags.qla_watchdog_pause = 1;
qla_mdelay(__func__, 100);
ha->flags.stop_rcv = 1;
qla_hw_stop_rcv(ha);
qla_del_hw_if(ha);
qla_free_lro(ha);
qla_free_xmt_bufs(ha);
qla_free_rcv_bufs(ha);
if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
return;
}
/*
* Buffer Management Functions for Transmit and Receive Rings
*/
static int
qla_alloc_xmt_bufs(qla_host_t *ha)
{
if (bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
QLA_MAX_SEGMENTS, /* nsegments */
PAGE_SIZE, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&ha->tx_tag)) {
device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
__func__);
return (ENOMEM);
}
bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
return 0;
}
/*
* Release mbuf after it sent on the wire
*/
static void
qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
{
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
if (txb->m_head) {
bus_dmamap_unload(ha->tx_tag, txb->map);
bus_dmamap_destroy(ha->tx_tag, txb->map);
m_freem(txb->m_head);
txb->m_head = NULL;
}
QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
}
static void
qla_free_xmt_bufs(qla_host_t *ha)
{
int i;
for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
qla_clear_tx_buf(ha, &ha->tx_buf[i]);
if (ha->tx_tag != NULL) {
bus_dma_tag_destroy(ha->tx_tag);
ha->tx_tag = NULL;
}
bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
return;
}
static int
qla_alloc_rcv_bufs(qla_host_t *ha)
{
int i, j, ret = 0;
qla_rx_buf_t *rxb;
if (bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUM9BYTES, /* maxsize */
1, /* nsegments */
MJUM9BYTES, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&ha->rx_tag)) {
device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
__func__);
return (ENOMEM);
}
bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
bzero((void *)ha->rx_jbuf,
(sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
for (i = 0; i < MAX_SDS_RINGS; i++) {
ha->hw.sds[i].sdsr_next = 0;
ha->hw.sds[i].rxb_free = NULL;
ha->hw.sds[i].rx_free = 0;
ha->hw.sds[i].rxjb_free = NULL;
ha->hw.sds[i].rxj_free = 0;
}
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &ha->rx_buf[i];
ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
if (ret) {
device_printf(ha->pci_dev,
"%s: dmamap[%d] failed\n", __func__, i);
for (j = 0; j < i; j++) {
bus_dmamap_destroy(ha->rx_tag,
ha->rx_buf[j].map);
}
goto qla_alloc_rcv_bufs_failed;
}
}
qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &ha->rx_buf[i];
rxb->handle = i;
if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
/*
* set the physical address in the corresponding
* descriptor entry in the receive ring/queue for the
* hba
*/
qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
rxb->handle, rxb->paddr,
(rxb->m_head)->m_pkthdr.len);
} else {
device_printf(ha->pci_dev,
"%s: qla_get_mbuf [standard(%d)] failed\n",
__func__, i);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
goto qla_alloc_rcv_bufs_failed;
}
}
for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
rxb = &ha->rx_jbuf[i];
ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
if (ret) {
device_printf(ha->pci_dev,
"%s: dmamap[%d] failed\n", __func__, i);
for (j = 0; j < i; j++) {
bus_dmamap_destroy(ha->rx_tag,
ha->rx_jbuf[j].map);
}
goto qla_alloc_rcv_bufs_failed;
}
}
qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
rxb = &ha->rx_jbuf[i];
rxb->handle = i;
if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
/*
* set the physical address in the corresponding
* descriptor entry in the receive ring/queue for the
* hba
*/
qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
rxb->handle, rxb->paddr,
(rxb->m_head)->m_pkthdr.len);
} else {
device_printf(ha->pci_dev,
"%s: qla_get_mbuf [jumbo(%d)] failed\n",
__func__, i);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
goto qla_alloc_rcv_bufs_failed;
}
}
return (0);
qla_alloc_rcv_bufs_failed:
qla_free_rcv_bufs(ha);
return (ret);
}
static void
qla_free_rcv_bufs(qla_host_t *ha)
{
int i;
qla_rx_buf_t *rxb;
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &ha->rx_buf[i];
if (rxb->m_head != NULL) {
bus_dmamap_unload(ha->rx_tag, rxb->map);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
m_freem(rxb->m_head);
rxb->m_head = NULL;
}
}
for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
rxb = &ha->rx_jbuf[i];
if (rxb->m_head != NULL) {
bus_dmamap_unload(ha->rx_tag, rxb->map);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
m_freem(rxb->m_head);
rxb->m_head = NULL;
}
}
if (ha->rx_tag != NULL) {
bus_dma_tag_destroy(ha->rx_tag);
ha->rx_tag = NULL;
}
bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
bzero((void *)ha->rx_jbuf,
(sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
for (i = 0; i < MAX_SDS_RINGS; i++) {
ha->hw.sds[i].sdsr_next = 0;
ha->hw.sds[i].rxb_free = NULL;
ha->hw.sds[i].rx_free = 0;
ha->hw.sds[i].rxjb_free = NULL;
ha->hw.sds[i].rxj_free = 0;
}
return;
}
int
qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
uint32_t jumbo)
{
struct mbuf *mp = nmp;
int ret = 0;
uint32_t offset;
QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
if (mp == NULL) {
if (!jumbo) {
mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mp == NULL) {
ha->err_m_getcl++;
ret = ENOBUFS;
device_printf(ha->pci_dev,
"%s: m_getcl failed\n", __func__);
goto exit_qla_get_mbuf;
}
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
} else {
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
MJUM9BYTES);
if (mp == NULL) {
ha->err_m_getjcl++;
ret = ENOBUFS;
device_printf(ha->pci_dev,
"%s: m_getjcl failed\n", __func__);
goto exit_qla_get_mbuf;
}
mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
}
} else {
if (!jumbo)
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
else
mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
}
offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
if (offset) {
offset = 8 - offset;
m_adj(mp, offset);
}
/*
* Using memory from the mbuf cluster pool, invoke the bus_dma
* machinery to arrange the memory mapping.
*/
ret = bus_dmamap_load(ha->rx_tag, rxb->map,
mtod(mp, void *), mp->m_len,
qla_dmamap_callback, &rxb->paddr,
BUS_DMA_NOWAIT);
if (ret || !rxb->paddr) {
m_free(mp);
rxb->m_head = NULL;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load failed\n", __func__);
ret = -1;
goto exit_qla_get_mbuf;
}
rxb->m_head = mp;
bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
exit_qla_get_mbuf:
QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
return (ret);
}
static void
qla_tx_done(void *context, int pending)
{
qla_host_t *ha = context;
qla_hw_tx_done(ha);
qla_start(ha->ifp);
}
diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c
index 0b3b2ca7954f..a12f1efec082 100644
--- a/sys/dev/qlxgbe/ql_os.c
+++ b/sys/dev/qlxgbe/ql_os.c
@@ -1,2254 +1,2250 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013-2016 Qlogic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: ql_os.c
* Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
*/
#include <sys/cdefs.h>
#include "ql_os.h"
#include "ql_hw.h"
#include "ql_def.h"
#include "ql_inline.h"
#include "ql_ver.h"
#include "ql_glbl.h"
#include "ql_dbg.h"
#include <sys/smp.h>
/*
* Some PCI Configuration Space Related Defines
*/
#ifndef PCI_VENDOR_QLOGIC
#define PCI_VENDOR_QLOGIC 0x1077
#endif
#ifndef PCI_PRODUCT_QLOGIC_ISP8030
#define PCI_PRODUCT_QLOGIC_ISP8030 0x8030
#endif
#define PCI_QLOGIC_ISP8030 \
((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
/*
* static functions
*/
static int qla_alloc_parent_dma_tag(qla_host_t *ha);
static void qla_free_parent_dma_tag(qla_host_t *ha);
static int qla_alloc_xmt_bufs(qla_host_t *ha);
static void qla_free_xmt_bufs(qla_host_t *ha);
static int qla_alloc_rcv_bufs(qla_host_t *ha);
static void qla_free_rcv_bufs(qla_host_t *ha);
static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
static void qla_init_ifnet(device_t dev, qla_host_t *ha);
static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
static void qla_release(qla_host_t *ha);
static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
int error);
static void qla_stop(qla_host_t *ha);
static void qla_get_peer(qla_host_t *ha);
static void qla_error_recovery(void *context, int pending);
static void qla_async_event(void *context, int pending);
static void qla_stats(void *context, int pending);
static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
uint32_t iscsi_pdu);
/*
* Hooks to the Operating Systems
*/
static int qla_pci_probe (device_t);
static int qla_pci_attach (device_t);
static int qla_pci_detach (device_t);
static void qla_init(void *arg);
static int qla_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int qla_media_change(if_t ifp);
static void qla_media_status(if_t ifp, struct ifmediareq *ifmr);
static int qla_transmit(if_t ifp, struct mbuf *mp);
static void qla_qflush(if_t ifp);
static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
static int qla_create_fp_taskqueues(qla_host_t *ha);
static void qla_destroy_fp_taskqueues(qla_host_t *ha);
static void qla_drain_fp_taskqueues(qla_host_t *ha);
static device_method_t qla_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, qla_pci_probe),
DEVMETHOD(device_attach, qla_pci_attach),
DEVMETHOD(device_detach, qla_pci_detach),
{ 0, 0 }
};
static driver_t qla_pci_driver = {
"ql", qla_pci_methods, sizeof (qla_host_t),
};
DRIVER_MODULE(qla83xx, pci, qla_pci_driver, 0, 0);
MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
#define QL_STD_REPLENISH_THRES 0
#define QL_JUMBO_REPLENISH_THRES 32
static char dev_str[64];
static char ver_str[64];
/*
* Name: qla_pci_probe
* Function: Validate the PCI device to be a QLA80XX device
*/
static int
qla_pci_probe(device_t dev)
{
switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
case PCI_QLOGIC_ISP8030:
snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
QLA_VERSION_BUILD);
snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
QLA_VERSION_BUILD);
device_set_desc(dev, dev_str);
break;
default:
return (ENXIO);
}
if (bootverbose)
printf("%s: %s\n ", __func__, dev_str);
return (BUS_PROBE_DEFAULT);
}
static void
qla_add_sysctls(qla_host_t *ha)
{
device_t dev = ha->pci_dev;
SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "version", CTLFLAG_RD,
ver_str, 0, "Driver Version");
SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "fw_version", CTLFLAG_RD,
ha->fw_ver_str, 0, "firmware version");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"link_status", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
(void *)ha, 0, qla_sysctl_get_link_status, "I", "Link Status");
ha->dbg_level = 0;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "debug", CTLFLAG_RW,
&ha->dbg_level, ha->dbg_level, "Debug Level");
ha->enable_minidump = 1;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "enable_minidump", CTLFLAG_RW,
&ha->enable_minidump, ha->enable_minidump,
"Minidump retrival prior to error recovery "
"is enabled only when this is set");
ha->enable_driverstate_dump = 1;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW,
&ha->enable_driverstate_dump, ha->enable_driverstate_dump,
"Driver State retrival prior to error recovery "
"is enabled only when this is set");
ha->enable_error_recovery = 1;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "enable_error_recovery", CTLFLAG_RW,
&ha->enable_error_recovery, ha->enable_error_recovery,
"when set error recovery is enabled on fatal errors "
"otherwise the port is turned offline");
ha->ms_delay_after_init = 1000;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "ms_delay_after_init", CTLFLAG_RW,
&ha->ms_delay_after_init, ha->ms_delay_after_init,
"millisecond delay after hw_init");
ha->std_replenish = QL_STD_REPLENISH_THRES;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "std_replenish", CTLFLAG_RW,
&ha->std_replenish, ha->std_replenish,
"Threshold for Replenishing Standard Frames");
SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "ipv4_lro",
CTLFLAG_RD, &ha->ipv4_lro,
"number of ipv4 lro completions");
SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "ipv6_lro",
CTLFLAG_RD, &ha->ipv6_lro,
"number of ipv6 lro completions");
SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "tx_tso_frames",
CTLFLAG_RD, &ha->tx_tso_frames,
"number of Tx TSO Frames");
SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "hw_vlan_tx_frames",
CTLFLAG_RD, &ha->hw_vlan_tx_frames,
"number of Tx VLAN Frames");
SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "hw_lock_failed",
CTLFLAG_RD, &ha->hw_lock_failed,
"number of hw_lock failures");
return;
}
static void
qla_watchdog(void *arg)
{
qla_host_t *ha = arg;
if_t ifp;
ifp = ha->ifp;
if (ha->qla_watchdog_exit) {
ha->qla_watchdog_exited = 1;
return;
}
ha->qla_watchdog_exited = 0;
if (!ha->qla_watchdog_pause) {
if (!ha->offline &&
(ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
(ha->msg_from_peer == QL_PEER_MSG_RESET))) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ql_update_link_state(ha);
if (ha->enable_error_recovery) {
ha->qla_watchdog_paused = 1;
ha->qla_watchdog_pause = 1;
ha->err_inject = 0;
device_printf(ha->pci_dev,
"%s: taskqueue_enqueue(err_task) \n",
__func__);
taskqueue_enqueue(ha->err_tq, &ha->err_task);
} else {
if (ifp != NULL)
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ha->offline = 1;
}
return;
} else {
if (ha->qla_interface_up) {
ha->watchdog_ticks++;
if (ha->watchdog_ticks > 1000)
ha->watchdog_ticks = 0;
if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
taskqueue_enqueue(ha->stats_tq,
&ha->stats_task);
}
if (ha->async_event) {
taskqueue_enqueue(ha->async_event_tq,
&ha->async_event_task);
}
}
ha->qla_watchdog_paused = 0;
}
} else {
ha->qla_watchdog_paused = 1;
}
callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
qla_watchdog, ha);
}
/*
* Name: qla_pci_attach
* Function: attaches the device to the operating system
*/
static int
qla_pci_attach(device_t dev)
{
qla_host_t *ha = NULL;
uint32_t rsrc_len __unused;
int i;
uint32_t num_rcvq = 0;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
}
memset(ha, 0, sizeof (qla_host_t));
if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
device_printf(dev, "device is not ISP8030\n");
return (ENXIO);
}
ha->pci_func = pci_get_function(dev) & 0x1;
ha->pci_dev = dev;
pci_enable_busmaster(dev);
ha->reg_rid = PCIR_BAR(0);
ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
RF_ACTIVE);
if (ha->pci_reg == NULL) {
device_printf(dev, "unable to map any ports\n");
goto qla_pci_attach_err;
}
rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
ha->reg_rid);
mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF);
ha->flags.lock_init = 1;
qla_add_sysctls(ha);
ha->hw.num_sds_rings = MAX_SDS_RINGS;
ha->hw.num_rds_rings = MAX_RDS_RINGS;
ha->hw.num_tx_rings = NUM_TX_RINGS;
ha->reg_rid1 = PCIR_BAR(2);
ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ha->reg_rid1, RF_ACTIVE);
ha->msix_count = pci_msix_count(dev);
if (ha->msix_count < 1 ) {
device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
ha->msix_count);
goto qla_pci_attach_err;
}
if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
ha->hw.num_sds_rings = ha->msix_count - 1;
}
QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
ha->pci_reg1));
/* initialize hardware */
if (ql_init_hw(ha)) {
device_printf(dev, "%s: ql_init_hw failed\n", __func__);
goto qla_pci_attach_err;
}
device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
ha->fw_ver_build);
snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
ha->fw_ver_build);
if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
device_printf(dev, "%s: qla_get_nic_partition failed\n",
__func__);
goto qla_pci_attach_err;
}
QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
" msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
ha->pci_reg, ha->pci_reg1, num_rcvq));
if ((ha->msix_count < 64) || (num_rcvq != 32)) {
if (ha->hw.num_sds_rings > 15) {
ha->hw.num_sds_rings = 15;
}
}
ha->hw.num_rds_rings = ha->hw.num_sds_rings;
ha->hw.num_tx_rings = ha->hw.num_sds_rings;
#ifdef QL_ENABLE_ISCSI_TLV
ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
ql_hw_add_sysctls(ha);
ha->msix_count = ha->hw.num_sds_rings + 1;
if (pci_alloc_msix(dev, &ha->msix_count)) {
device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
ha->msix_count);
ha->msix_count = 0;
goto qla_pci_attach_err;
}
ha->mbx_irq_rid = 1;
ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ha->mbx_irq_rid,
(RF_ACTIVE | RF_SHAREABLE));
if (ha->mbx_irq == NULL) {
device_printf(dev, "could not allocate mbx interrupt\n");
goto qla_pci_attach_err;
}
if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
device_printf(dev, "could not setup mbx interrupt\n");
goto qla_pci_attach_err;
}
for (i = 0; i < ha->hw.num_sds_rings; i++) {
ha->irq_vec[i].sds_idx = i;
ha->irq_vec[i].ha = ha;
ha->irq_vec[i].irq_rid = 2 + i;
ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ha->irq_vec[i].irq_rid,
(RF_ACTIVE | RF_SHAREABLE));
if (ha->irq_vec[i].irq == NULL) {
device_printf(dev, "could not allocate interrupt\n");
goto qla_pci_attach_err;
}
if (bus_setup_intr(dev, ha->irq_vec[i].irq,
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, ql_isr, &ha->irq_vec[i],
&ha->irq_vec[i].handle)) {
device_printf(dev, "could not setup interrupt\n");
goto qla_pci_attach_err;
}
ha->tx_fp[i].ha = ha;
ha->tx_fp[i].txr_idx = i;
if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
device_printf(dev, "%s: could not allocate tx_br[%d]\n",
__func__, i);
goto qla_pci_attach_err;
}
}
if (qla_create_fp_taskqueues(ha) != 0)
goto qla_pci_attach_err;
printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
ql_read_mac_addr(ha);
/* allocate parent dma tag */
if (qla_alloc_parent_dma_tag(ha)) {
device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
__func__);
goto qla_pci_attach_err;
}
/* alloc all dma buffers */
if (ql_alloc_dma(ha)) {
device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
goto qla_pci_attach_err;
}
qla_get_peer(ha);
if (ql_minidump_init(ha) != 0) {
device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
goto qla_pci_attach_err;
}
ql_alloc_drvr_state_buffer(ha);
ql_alloc_sp_log_buffer(ha);
/* create the o.s ethernet interface */
qla_init_ifnet(dev, ha);
ha->flags.qla_watchdog_active = 1;
ha->qla_watchdog_pause = 0;
callout_init(&ha->tx_callout, TRUE);
ha->flags.qla_callout_init = 1;
/* create ioctl device interface */
if (ql_make_cdev(ha)) {
device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
goto qla_pci_attach_err;
}
callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
qla_watchdog, ha);
TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
taskqueue_thread_enqueue, &ha->err_tq);
taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
device_get_nameunit(ha->pci_dev));
TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
taskqueue_thread_enqueue, &ha->async_event_tq);
taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
device_get_nameunit(ha->pci_dev));
TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
taskqueue_thread_enqueue, &ha->stats_tq);
taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(ha->pci_dev));
QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
return (0);
qla_pci_attach_err:
qla_release(ha);
if (ha->flags.lock_init) {
mtx_destroy(&ha->hw_lock);
mtx_destroy(&ha->sp_log_lock);
}
QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
return (ENXIO);
}
/*
* Name: qla_pci_detach
* Function: Unhooks the device from the operating system
*/
static int
qla_pci_detach(device_t dev)
{
qla_host_t *ha = NULL;
if_t ifp;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
}
QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
ifp = ha->ifp;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
QLA_LOCK(ha, __func__, -1, 0);
ha->qla_detach_active = 1;
qla_stop(ha);
qla_release(ha);
QLA_UNLOCK(ha, __func__);
if (ha->flags.lock_init) {
mtx_destroy(&ha->hw_lock);
mtx_destroy(&ha->sp_log_lock);
}
QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
return (0);
}
/*
* SYSCTL Related Callbacks
*/
static int
qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
{
int err, ret = 0;
qla_host_t *ha;
err = sysctl_handle_int(oidp, &ret, 0, req);
if (err || !req->newptr)
return (err);
if (ret == 1) {
ha = (qla_host_t *)arg1;
ql_hw_link_status(ha);
}
return (err);
}
/*
* Name: qla_release
* Function: Releases the resources allocated for the device
*/
static void
qla_release(qla_host_t *ha)
{
device_t dev;
int i;
dev = ha->pci_dev;
if (ha->async_event_tq) {
taskqueue_drain_all(ha->async_event_tq);
taskqueue_free(ha->async_event_tq);
}
if (ha->err_tq) {
taskqueue_drain_all(ha->err_tq);
taskqueue_free(ha->err_tq);
}
if (ha->stats_tq) {
taskqueue_drain_all(ha->stats_tq);
taskqueue_free(ha->stats_tq);
}
ql_del_cdev(ha);
if (ha->flags.qla_watchdog_active) {
ha->qla_watchdog_exit = 1;
while (ha->qla_watchdog_exited == 0)
qla_mdelay(__func__, 1);
}
if (ha->flags.qla_callout_init)
callout_stop(&ha->tx_callout);
if (ha->ifp != NULL)
ether_ifdetach(ha->ifp);
ql_free_drvr_state_buffer(ha);
ql_free_sp_log_buffer(ha);
ql_free_dma(ha);
qla_free_parent_dma_tag(ha);
if (ha->mbx_handle)
(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
if (ha->mbx_irq)
(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
ha->mbx_irq);
for (i = 0; i < ha->hw.num_sds_rings; i++) {
if (ha->irq_vec[i].handle) {
(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
ha->irq_vec[i].handle);
}
if (ha->irq_vec[i].irq) {
(void)bus_release_resource(dev, SYS_RES_IRQ,
ha->irq_vec[i].irq_rid,
ha->irq_vec[i].irq);
}
qla_free_tx_br(ha, &ha->tx_fp[i]);
}
qla_destroy_fp_taskqueues(ha);
if (ha->msix_count)
pci_release_msi(dev);
if (ha->pci_reg)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
ha->pci_reg);
if (ha->pci_reg1)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
ha->pci_reg1);
return;
}
/*
* DMA Related Functions
*/
static void
qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
*((bus_addr_t *)arg) = 0;
if (error) {
printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
return;
}
*((bus_addr_t *)arg) = segs[0].ds_addr;
return;
}
int
ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
{
int ret = 0;
device_t dev;
bus_addr_t b_addr;
dev = ha->pci_dev;
QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
ret = bus_dma_tag_create(
ha->parent_tag,/* parent */
dma_buf->alignment,
((bus_size_t)(1ULL << 32)),/* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dma_buf->size, /* maxsize */
1, /* nsegments */
dma_buf->size, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&dma_buf->dma_tag);
if (ret) {
device_printf(dev, "%s: could not create dma tag\n", __func__);
goto ql_alloc_dmabuf_exit;
}
ret = bus_dmamem_alloc(dma_buf->dma_tag,
(void **)&dma_buf->dma_b,
(BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
&dma_buf->dma_map);
if (ret) {
bus_dma_tag_destroy(dma_buf->dma_tag);
device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
goto ql_alloc_dmabuf_exit;
}
ret = bus_dmamap_load(dma_buf->dma_tag,
dma_buf->dma_map,
dma_buf->dma_b,
dma_buf->size,
qla_dmamap_callback,
&b_addr, BUS_DMA_NOWAIT);
if (ret || !b_addr) {
bus_dma_tag_destroy(dma_buf->dma_tag);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
dma_buf->dma_map);
ret = -1;
goto ql_alloc_dmabuf_exit;
}
dma_buf->dma_addr = b_addr;
ql_alloc_dmabuf_exit:
QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
__func__, ret, (void *)dma_buf->dma_tag,
(void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
dma_buf->size));
return ret;
}
void
ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
{
bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
bus_dma_tag_destroy(dma_buf->dma_tag);
}
static int
qla_alloc_parent_dma_tag(qla_host_t *ha)
{
int ret;
device_t dev;
dev = ha->pci_dev;
/*
* Allocate parent DMA Tag
*/
ret = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ha->parent_tag);
if (ret) {
device_printf(dev, "%s: could not create parent dma tag\n",
__func__);
return (-1);
}
ha->flags.parent_tag = 1;
return (0);
}
static void
qla_free_parent_dma_tag(qla_host_t *ha)
{
if (ha->flags.parent_tag) {
bus_dma_tag_destroy(ha->parent_tag);
ha->flags.parent_tag = 0;
}
}
/*
* Name: qla_init_ifnet
* Function: Creates the Network Device Interface and Registers it with the O.S
*/
static void
qla_init_ifnet(device_t dev, qla_host_t *ha)
{
if_t ifp;
QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
ifp = ha->ifp = if_alloc(IFT_ETHER);
-
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setbaudrate(ifp, IF_Gbps(10));
if_setcapabilities(ifp, IFCAP_LINKSTATE);
if_setmtu(ifp, ETHERMTU);
if_setinitfn(ifp, qla_init);
if_setsoftc(ifp, ha);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, qla_ioctl);
if_settransmitfn(ifp, qla_transmit);
if_setqflushfn(ifp, qla_qflush);
if_setsendqlen(ifp, qla_get_ifq_snd_maxlen(ha));
if_setsendqready(ifp);
ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
ether_ifattach(ifp, qla_get_mac_addr(ha));
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
IFCAP_TSO4 |
IFCAP_TSO6 |
IFCAP_JUMBO_MTU |
IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_MTU |
IFCAP_VLAN_HWTSO |
IFCAP_LRO, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
return;
}
static void
qla_init_locked(qla_host_t *ha)
{
if_t ifp = ha->ifp;
ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0);
qla_stop(ha);
if (qla_alloc_xmt_bufs(ha) != 0)
return;
qla_confirm_9kb_enable(ha);
if (qla_alloc_rcv_bufs(ha) != 0)
return;
bcopy(if_getlladdr(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_TSO);
if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
ha->stop_rcv = 0;
if (ql_init_hw_if(ha) == 0) {
ifp = ha->ifp;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
ha->hw_vlan_tx_frames = 0;
ha->tx_tso_frames = 0;
ha->qla_interface_up = 1;
ql_update_link_state(ha);
} else {
if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE)
ha->hw.sp_log_stop = -1;
}
ha->qla_watchdog_pause = 0;
return;
}
static void
qla_init(void *arg)
{
qla_host_t *ha;
ha = (qla_host_t *)arg;
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
if (QLA_LOCK(ha, __func__, -1, 0) != 0)
return;
qla_init_locked(ha);
QLA_UNLOCK(ha, __func__);
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
}
static u_int
qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
uint8_t *mta = arg;
if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
return (0);
bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
return (1);
}
static int
qla_set_multi(qla_host_t *ha, uint32_t add_multi)
{
uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
int mcnt = 0;
if_t ifp = ha->ifp;
int ret = 0;
mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta);
if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
QLA_LOCK_NO_SLEEP) != 0)
return (-1);
ql_sp_log(ha, 12, 4, if_getdrvflags(ifp),
(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
add_multi, (uint32_t)mcnt, 0);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (!add_multi) {
ret = qla_hw_del_all_mcast(ha);
if (ret)
device_printf(ha->pci_dev,
"%s: qla_hw_del_all_mcast() failed\n",
__func__);
}
if (!ret)
ret = ql_hw_set_multi(ha, mta, mcnt, 1);
}
QLA_UNLOCK(ha, __func__);
return (ret);
}
static int
qla_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
int ret = 0;
struct ifreq *ifr = (struct ifreq *)data;
#ifdef INET
struct ifaddr *ifa = (struct ifaddr *)data;
#endif
qla_host_t *ha;
ha = (qla_host_t *)if_getsoftc(ifp);
if (ha->offline || ha->qla_initiate_recovery)
return (ret);
switch (cmd) {
case SIOCSIFADDR:
QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
__func__, cmd));
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET) {
ret = QLA_LOCK(ha, __func__,
QLA_LOCK_DEFAULT_MS_TIMEOUT,
QLA_LOCK_NO_SLEEP);
if (ret)
break;
if_setflagbits(ifp, IFF_UP, 0);
ql_sp_log(ha, 8, 3, if_getdrvflags(ifp),
(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
qla_init_locked(ha);
}
QLA_UNLOCK(ha, __func__);
QL_DPRINT4(ha, (ha->pci_dev,
"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
__func__, cmd,
ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
arp_ifinit(ifp, ifa);
break;
}
#endif
ether_ioctl(ifp, cmd, data);
break;
case SIOCSIFMTU:
QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
__func__, cmd));
if (ifr->ifr_mtu > QLA_MAX_MTU) {
ret = EINVAL;
} else {
ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
QLA_LOCK_NO_SLEEP);
if (ret)
break;
if_setmtu(ifp, ifr->ifr_mtu);
ha->max_frame_size =
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
ql_sp_log(ha, 9, 4, if_getdrvflags(ifp),
(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
ha->max_frame_size, if_getmtu(ifp), 0);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
qla_init_locked(ha);
}
if (if_getmtu(ifp) > ETHERMTU)
ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
else
ha->std_replenish = QL_STD_REPLENISH_THRES;
QLA_UNLOCK(ha, __func__);
}
break;
case SIOCSIFFLAGS:
QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
__func__, cmd));
ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
QLA_LOCK_NO_SLEEP);
if (ret)
break;
ql_sp_log(ha, 10, 4, if_getdrvflags(ifp),
(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
ha->if_flags, if_getflags(ifp), 0);
if (if_getflags(ifp) & IFF_UP) {
ha->max_frame_size = if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN;
qla_init_locked(ha);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_PROMISC) {
ret = ql_set_promisc(ha);
} else if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_ALLMULTI) {
ret = ql_set_allmulti(ha);
}
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
qla_stop(ha);
ha->if_flags = if_getflags(ifp);
}
QLA_UNLOCK(ha, __func__);
break;
case SIOCADDMULTI:
QL_DPRINT4(ha, (ha->pci_dev,
"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
if (qla_set_multi(ha, 1))
ret = EINVAL;
break;
case SIOCDELMULTI:
QL_DPRINT4(ha, (ha->pci_dev,
"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
if (qla_set_multi(ha, 0))
ret = EINVAL;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
QL_DPRINT4(ha, (ha->pci_dev,
"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
__func__, cmd));
ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
break;
case SIOCSIFCAP:
{
int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
__func__, cmd));
if (mask & IFCAP_HWCSUM)
if_togglecapenable(ifp, IFCAP_HWCSUM);
if (mask & IFCAP_TSO4)
if_togglecapenable(ifp, IFCAP_TSO4);
if (mask & IFCAP_TSO6)
if_togglecapenable(ifp, IFCAP_TSO6);
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
QLA_LOCK_NO_SLEEP);
if (ret)
break;
ql_sp_log(ha, 11, 4, if_getdrvflags(ifp),
(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
mask, if_getcapenable(ifp), 0);
qla_init_locked(ha);
QLA_UNLOCK(ha, __func__);
}
VLAN_CAPABILITIES(ifp);
break;
}
default:
QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
__func__, cmd));
ret = ether_ioctl(ifp, cmd, data);
break;
}
return (ret);
}
static int
qla_media_change(if_t ifp)
{
qla_host_t *ha;
struct ifmedia *ifm;
int ret = 0;
ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
ifm = &ha->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
ret = EINVAL;
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
return (ret);
}
static void
qla_media_status(if_t ifp, struct ifmediareq *ifmr)
{
qla_host_t *ha;
ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
ql_update_link_state(ha);
if (ha->hw.link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
}
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
(ha->hw.link_up ? "link_up" : "link_down")));
return;
}
static int
qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
uint32_t iscsi_pdu)
{
bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
bus_dmamap_t map;
int nsegs;
int ret = -1;
uint32_t tx_idx;
struct mbuf *m_head = *m_headp;
QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) ||
(QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){
QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
QL_DPRINT2(ha, (ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
if (m_head)
m_freem(m_head);
*m_headp = NULL;
QL_INITIATE_RECOVERY(ha);
return (ret);
}
map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
BUS_DMA_NOWAIT);
if (ret == EFBIG) {
struct mbuf *m;
QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
m_head->m_pkthdr.len));
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
ha->err_tx_defrag++;
m_freem(m_head);
*m_headp = NULL;
device_printf(ha->pci_dev,
"%s: m_defrag() = NULL [%d]\n",
__func__, ret);
return (ENOBUFS);
}
m_head = m;
*m_headp = m_head;
if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
segs, &nsegs, BUS_DMA_NOWAIT))) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
__func__, ret, m_head->m_pkthdr.len);
if (ret != ENOMEM) {
m_freem(m_head);
*m_headp = NULL;
}
return (ret);
}
} else if (ret) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
__func__, ret, m_head->m_pkthdr.len);
if (ret != ENOMEM) {
m_freem(m_head);
*m_headp = NULL;
}
return (ret);
}
QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
iscsi_pdu))) {
ha->tx_ring[txr_idx].count++;
if (iscsi_pdu)
ha->tx_ring[txr_idx].iscsi_pkt_count++;
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
} else {
bus_dmamap_unload(ha->tx_tag, map);
if (ret == EINVAL) {
if (m_head)
m_freem(m_head);
*m_headp = NULL;
}
}
QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
return (ret);
}
static int
qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
{
snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
"qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
M_NOWAIT, &fp->tx_mtx);
if (fp->tx_br == NULL) {
QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
" fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
return (-ENOMEM);
}
return 0;
}
static void
qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
{
struct mbuf *mp;
if_t ifp = ha->ifp;
if (mtx_initialized(&fp->tx_mtx)) {
if (fp->tx_br != NULL) {
mtx_lock(&fp->tx_mtx);
while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
m_freem(mp);
}
mtx_unlock(&fp->tx_mtx);
buf_ring_free(fp->tx_br, M_DEVBUF);
fp->tx_br = NULL;
}
mtx_destroy(&fp->tx_mtx);
}
return;
}
static void
qla_fp_taskqueue(void *context, int pending)
{
qla_tx_fp_t *fp;
qla_host_t *ha;
if_t ifp;
struct mbuf *mp = NULL;
int ret = 0;
uint32_t txr_idx;
uint32_t iscsi_pdu = 0;
uint32_t rx_pkts_left = -1;
fp = context;
if (fp == NULL)
return;
ha = (qla_host_t *)fp->ha;
ifp = ha->ifp;
txr_idx = fp->txr_idx;
mtx_lock(&fp->tx_mtx);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
mtx_unlock(&fp->tx_mtx);
goto qla_fp_taskqueue_exit;
}
while (rx_pkts_left && !ha->stop_rcv &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) && ha->hw.link_up) {
rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
#ifdef QL_ENABLE_ISCSI_TLV
ql_hw_tx_done_locked(ha, fp->txr_idx);
ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
#else
ql_hw_tx_done_locked(ha, fp->txr_idx);
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
mp = drbr_peek(ifp, fp->tx_br);
while (mp != NULL) {
if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
#ifdef QL_ENABLE_ISCSI_TLV
if (ql_iscsi_pdu(ha, mp) == 0) {
txr_idx = txr_idx +
(ha->hw.num_tx_rings >> 1);
iscsi_pdu = 1;
} else {
iscsi_pdu = 0;
txr_idx = fp->txr_idx;
}
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
}
ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
if (ret) {
if (mp != NULL)
drbr_putback(ifp, fp->tx_br, mp);
else {
drbr_advance(ifp, fp->tx_br);
}
mtx_unlock(&fp->tx_mtx);
goto qla_fp_taskqueue_exit0;
} else {
drbr_advance(ifp, fp->tx_br);
}
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, mp);
if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
(!ha->hw.link_up))
break;
mp = drbr_peek(ifp, fp->tx_br);
}
}
mtx_unlock(&fp->tx_mtx);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
goto qla_fp_taskqueue_exit;
qla_fp_taskqueue_exit0:
if (rx_pkts_left || ((mp != NULL) && ret)) {
taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
} else {
if (!ha->stop_rcv) {
QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
}
}
qla_fp_taskqueue_exit:
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
return;
}
static int
qla_create_fp_taskqueues(qla_host_t *ha)
{
int i;
uint8_t tq_name[32];
for (i = 0; i < ha->hw.num_sds_rings; i++) {
qla_tx_fp_t *fp = &ha->tx_fp[i];
bzero(tq_name, sizeof (tq_name));
snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&fp->fp_taskqueue);
if (fp->fp_taskqueue == NULL)
return (-1);
taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
tq_name);
QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
fp->fp_taskqueue));
}
return (0);
}
static void
qla_destroy_fp_taskqueues(qla_host_t *ha)
{
int i;
for (i = 0; i < ha->hw.num_sds_rings; i++) {
qla_tx_fp_t *fp = &ha->tx_fp[i];
if (fp->fp_taskqueue != NULL) {
taskqueue_drain_all(fp->fp_taskqueue);
taskqueue_free(fp->fp_taskqueue);
fp->fp_taskqueue = NULL;
}
}
return;
}
static void
qla_drain_fp_taskqueues(qla_host_t *ha)
{
int i;
for (i = 0; i < ha->hw.num_sds_rings; i++) {
qla_tx_fp_t *fp = &ha->tx_fp[i];
if (fp->fp_taskqueue != NULL) {
taskqueue_drain_all(fp->fp_taskqueue);
}
}
return;
}
static int
qla_transmit(if_t ifp, struct mbuf *mp)
{
qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp);
qla_tx_fp_t *fp;
int rss_id = 0;
int ret = 0;
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
ha->hw.num_sds_rings;
fp = &ha->tx_fp[rss_id];
if (fp->tx_br == NULL) {
ret = EINVAL;
goto qla_transmit_exit;
}
if (mp != NULL) {
ret = drbr_enqueue(ifp, fp->tx_br, mp);
}
if (fp->fp_taskqueue != NULL)
taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
ret = 0;
qla_transmit_exit:
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
return ret;
}
static void
qla_qflush(if_t ifp)
{
int i;
qla_tx_fp_t *fp;
struct mbuf *mp;
qla_host_t *ha;
ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
for (i = 0; i < ha->hw.num_sds_rings; i++) {
fp = &ha->tx_fp[i];
if (fp == NULL)
continue;
if (fp->tx_br) {
mtx_lock(&fp->tx_mtx);
while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
m_freem(mp);
}
mtx_unlock(&fp->tx_mtx);
}
}
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
return;
}
static void
qla_stop(qla_host_t *ha)
{
if_t ifp = ha->ifp;
int i = 0;
ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ha->qla_watchdog_pause = 1;
for (i = 0; i < ha->hw.num_sds_rings; i++) {
qla_tx_fp_t *fp;
fp = &ha->tx_fp[i];
if (fp == NULL)
continue;
if (fp->tx_br != NULL) {
mtx_lock(&fp->tx_mtx);
mtx_unlock(&fp->tx_mtx);
}
}
while (!ha->qla_watchdog_paused)
qla_mdelay(__func__, 1);
ha->qla_interface_up = 0;
qla_drain_fp_taskqueues(ha);
ql_del_hw_if(ha);
qla_free_xmt_bufs(ha);
qla_free_rcv_bufs(ha);
return;
}
/*
* Buffer Management Functions for Transmit and Receive Rings
*/
static int
qla_alloc_xmt_bufs(qla_host_t *ha)
{
int ret = 0;
uint32_t i, j;
qla_tx_buf_t *txb;
if (bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
QLA_MAX_SEGMENTS, /* nsegments */
PAGE_SIZE, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&ha->tx_tag)) {
device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
__func__);
return (ENOMEM);
}
for (i = 0; i < ha->hw.num_tx_rings; i++) {
bzero((void *)ha->tx_ring[i].tx_buf,
(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
}
for (j = 0; j < ha->hw.num_tx_rings; j++) {
for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
txb = &ha->tx_ring[j].tx_buf[i];
if ((ret = bus_dmamap_create(ha->tx_tag,
BUS_DMA_NOWAIT, &txb->map))) {
ha->err_tx_dmamap_create++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_create failed[%d]\n",
__func__, ret);
qla_free_xmt_bufs(ha);
return (ret);
}
}
}
return 0;
}
/*
* Release mbuf after it sent on the wire
*/
static void
qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
{
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
if (txb->m_head) {
bus_dmamap_sync(ha->tx_tag, txb->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ha->tx_tag, txb->map);
m_freem(txb->m_head);
txb->m_head = NULL;
bus_dmamap_destroy(ha->tx_tag, txb->map);
txb->map = NULL;
}
if (txb->map) {
bus_dmamap_unload(ha->tx_tag, txb->map);
bus_dmamap_destroy(ha->tx_tag, txb->map);
txb->map = NULL;
}
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
}
static void
qla_free_xmt_bufs(qla_host_t *ha)
{
int i, j;
for (j = 0; j < ha->hw.num_tx_rings; j++) {
for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
}
if (ha->tx_tag != NULL) {
bus_dma_tag_destroy(ha->tx_tag);
ha->tx_tag = NULL;
}
for (i = 0; i < ha->hw.num_tx_rings; i++) {
bzero((void *)ha->tx_ring[i].tx_buf,
(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
}
return;
}
static int
qla_alloc_rcv_std(qla_host_t *ha)
{
int i, j, k, r, ret = 0;
qla_rx_buf_t *rxb;
qla_rx_ring_t *rx_ring;
for (r = 0; r < ha->hw.num_rds_rings; r++) {
rx_ring = &ha->rx_ring[r];
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &rx_ring->rx_buf[i];
ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
&rxb->map);
if (ret) {
device_printf(ha->pci_dev,
"%s: dmamap[%d, %d] failed\n",
__func__, r, i);
for (k = 0; k < r; k++) {
for (j = 0; j < NUM_RX_DESCRIPTORS;
j++) {
rxb = &ha->rx_ring[k].rx_buf[j];
bus_dmamap_destroy(ha->rx_tag,
rxb->map);
}
}
for (j = 0; j < i; j++) {
bus_dmamap_destroy(ha->rx_tag,
rx_ring->rx_buf[j].map);
}
goto qla_alloc_rcv_std_err;
}
}
}
qla_init_hw_rcv_descriptors(ha);
for (r = 0; r < ha->hw.num_rds_rings; r++) {
rx_ring = &ha->rx_ring[r];
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &rx_ring->rx_buf[i];
rxb->handle = i;
if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
/*
* set the physical address in the
* corresponding descriptor entry in the
* receive ring/queue for the hba
*/
qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
rxb->paddr,
(rxb->m_head)->m_pkthdr.len);
} else {
device_printf(ha->pci_dev,
"%s: ql_get_mbuf [%d, %d] failed\n",
__func__, r, i);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
goto qla_alloc_rcv_std_err;
}
}
}
return 0;
qla_alloc_rcv_std_err:
return (-1);
}
static void
qla_free_rcv_std(qla_host_t *ha)
{
int i, r;
qla_rx_buf_t *rxb;
for (r = 0; r < ha->hw.num_rds_rings; r++) {
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &ha->rx_ring[r].rx_buf[i];
if (rxb->m_head != NULL) {
bus_dmamap_unload(ha->rx_tag, rxb->map);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
m_freem(rxb->m_head);
rxb->m_head = NULL;
}
}
}
return;
}
static int
qla_alloc_rcv_bufs(qla_host_t *ha)
{
int i, ret = 0;
if (bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUM9BYTES, /* maxsize */
1, /* nsegments */
MJUM9BYTES, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&ha->rx_tag)) {
device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
__func__);
return (ENOMEM);
}
bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
for (i = 0; i < ha->hw.num_sds_rings; i++) {
ha->hw.sds[i].sdsr_next = 0;
ha->hw.sds[i].rxb_free = NULL;
ha->hw.sds[i].rx_free = 0;
}
ret = qla_alloc_rcv_std(ha);
return (ret);
}
static void
qla_free_rcv_bufs(qla_host_t *ha)
{
int i;
qla_free_rcv_std(ha);
if (ha->rx_tag != NULL) {
bus_dma_tag_destroy(ha->rx_tag);
ha->rx_tag = NULL;
}
bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
for (i = 0; i < ha->hw.num_sds_rings; i++) {
ha->hw.sds[i].sdsr_next = 0;
ha->hw.sds[i].rxb_free = NULL;
ha->hw.sds[i].rx_free = 0;
}
return;
}
int
ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
{
register struct mbuf *mp = nmp;
int ret = 0;
uint32_t offset;
bus_dma_segment_t segs[1];
int nsegs, mbuf_size;
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
if (ha->hw.enable_9kb)
mbuf_size = MJUM9BYTES;
else
mbuf_size = MCLBYTES;
if (mp == NULL) {
if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
return(-1);
if (ha->hw.enable_9kb)
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
else
mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mp == NULL) {
ha->err_m_getcl++;
ret = ENOBUFS;
device_printf(ha->pci_dev,
"%s: m_getcl failed\n", __func__);
goto exit_ql_get_mbuf;
}
mp->m_len = mp->m_pkthdr.len = mbuf_size;
} else {
mp->m_len = mp->m_pkthdr.len = mbuf_size;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
}
offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
if (offset) {
offset = 8 - offset;
m_adj(mp, offset);
}
/*
* Using memory from the mbuf cluster pool, invoke the bus_dma
* machinery to arrange the memory mapping.
*/
ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
mp, segs, &nsegs, BUS_DMA_NOWAIT);
rxb->paddr = segs[0].ds_addr;
if (ret || !rxb->paddr || (nsegs != 1)) {
m_free(mp);
rxb->m_head = NULL;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
__func__, ret, (long long unsigned int)rxb->paddr,
nsegs);
ret = -1;
goto exit_ql_get_mbuf;
}
rxb->m_head = mp;
bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
exit_ql_get_mbuf:
QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
return (ret);
}
static void
qla_get_peer(qla_host_t *ha)
{
device_t *peers;
int count, i, slot;
int my_slot = pci_get_slot(ha->pci_dev);
if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
return;
for (i = 0; i < count; i++) {
slot = pci_get_slot(peers[i]);
if ((slot >= 0) && (slot == my_slot) &&
(pci_get_device(peers[i]) ==
pci_get_device(ha->pci_dev))) {
if (ha->pci_dev != peers[i])
ha->peer_dev = peers[i];
}
}
}
static void
qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
{
qla_host_t *ha_peer;
if (ha->peer_dev) {
if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
ha_peer->msg_from_peer = msg_to_peer;
}
}
}
void
qla_set_error_recovery(qla_host_t *ha)
{
if_t ifp = ha->ifp;
if (!cold && ha->enable_error_recovery) {
if (ifp)
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ha->qla_initiate_recovery = 1;
} else
ha->offline = 1;
return;
}
static void
qla_error_recovery(void *context, int pending)
{
qla_host_t *ha = context;
uint32_t msecs_100 = 400;
if_t ifp = ha->ifp;
int i = 0;
device_printf(ha->pci_dev, "%s: enter\n", __func__);
ha->hw.imd_compl = 1;
taskqueue_drain_all(ha->stats_tq);
taskqueue_drain_all(ha->async_event_tq);
if (QLA_LOCK(ha, __func__, -1, 0) != 0)
return;
device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n",
__func__, qla_get_usec_timestamp());
if (ha->qla_interface_up) {
qla_mdelay(__func__, 300);
for (i = 0; i < ha->hw.num_sds_rings; i++) {
qla_tx_fp_t *fp;
fp = &ha->tx_fp[i];
if (fp == NULL)
continue;
if (fp->tx_br != NULL) {
mtx_lock(&fp->tx_mtx);
mtx_unlock(&fp->tx_mtx);
}
}
}
qla_drain_fp_taskqueues(ha);
if ((ha->pci_func & 0x1) == 0) {
if (!ha->msg_from_peer) {
qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
msecs_100--)
qla_mdelay(__func__, 100);
}
ha->msg_from_peer = 0;
if (ha->enable_minidump)
ql_minidump(ha);
if (ha->enable_driverstate_dump)
ql_capture_drvr_state(ha);
if (ql_init_hw(ha)) {
device_printf(ha->pci_dev,
"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
__func__, qla_get_usec_timestamp());
ha->offline = 1;
goto qla_error_recovery_exit;
}
if (ha->qla_interface_up) {
qla_free_xmt_bufs(ha);
qla_free_rcv_bufs(ha);
}
if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
} else {
if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
ha->msg_from_peer = 0;
if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
} else {
qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
}
while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--)
qla_mdelay(__func__, 100);
ha->msg_from_peer = 0;
if (ha->enable_driverstate_dump)
ql_capture_drvr_state(ha);
if (msecs_100 == 0) {
device_printf(ha->pci_dev,
"%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n",
__func__, qla_get_usec_timestamp());
ha->offline = 1;
goto qla_error_recovery_exit;
}
if (ql_init_hw(ha)) {
device_printf(ha->pci_dev,
"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
__func__, qla_get_usec_timestamp());
ha->offline = 1;
goto qla_error_recovery_exit;
}
if (ha->qla_interface_up) {
qla_free_xmt_bufs(ha);
qla_free_rcv_bufs(ha);
}
}
qla_mdelay(__func__, ha->ms_delay_after_init);
*((uint32_t *)&ha->hw.flags) = 0;
ha->qla_initiate_recovery = 0;
if (ha->qla_interface_up) {
if (qla_alloc_xmt_bufs(ha) != 0) {
ha->offline = 1;
goto qla_error_recovery_exit;
}
qla_confirm_9kb_enable(ha);
if (qla_alloc_rcv_bufs(ha) != 0) {
ha->offline = 1;
goto qla_error_recovery_exit;
}
ha->stop_rcv = 0;
if (ql_init_hw_if(ha) == 0) {
ifp = ha->ifp;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
ha->qla_watchdog_pause = 0;
ql_update_link_state(ha);
} else {
ha->offline = 1;
if (ha->hw.sp_log_stop_events &
Q8_SP_LOG_STOP_IF_START_FAILURE)
ha->hw.sp_log_stop = -1;
}
} else {
ha->qla_watchdog_pause = 0;
}
qla_error_recovery_exit:
if (ha->offline ) {
device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n",
__func__, qla_get_usec_timestamp());
if (ha->hw.sp_log_stop_events &
Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE)
ha->hw.sp_log_stop = -1;
}
QLA_UNLOCK(ha, __func__);
if (!ha->offline)
callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
qla_watchdog, ha);
device_printf(ha->pci_dev,
"%s: ts_usecs = %ld exit\n",
__func__, qla_get_usec_timestamp());
return;
}
static void
qla_async_event(void *context, int pending)
{
qla_host_t *ha = context;
if (QLA_LOCK(ha, __func__, -1, 0) != 0)
return;
if (ha->async_event) {
ha->async_event = 0;
qla_hw_async_event(ha);
}
QLA_UNLOCK(ha, __func__);
return;
}
static void
qla_stats(void *context, int pending)
{
qla_host_t *ha;
ha = context;
ql_get_stats(ha);
return;
}
diff --git a/sys/dev/qlxge/qls_os.c b/sys/dev/qlxge/qls_os.c
index a1ce1c637e1c..eca7006850e0 100644
--- a/sys/dev/qlxge/qls_os.c
+++ b/sys/dev/qlxge/qls_os.c
@@ -1,1498 +1,1494 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013-2014 Qlogic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: qls_os.c
* Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
*/
#include <sys/cdefs.h>
#include "qls_os.h"
#include "qls_hw.h"
#include "qls_def.h"
#include "qls_inline.h"
#include "qls_ver.h"
#include "qls_glbl.h"
#include "qls_dbg.h"
#include <sys/smp.h>
/*
* Some PCI Configuration Space Related Defines
*/
#ifndef PCI_VENDOR_QLOGIC
#define PCI_VENDOR_QLOGIC 0x1077
#endif
#ifndef PCI_DEVICE_QLOGIC_8000
#define PCI_DEVICE_QLOGIC_8000 0x8000
#endif
#define PCI_QLOGIC_DEV8000 \
((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
/*
* static functions
*/
static int qls_alloc_parent_dma_tag(qla_host_t *ha);
static void qls_free_parent_dma_tag(qla_host_t *ha);
static void qls_flush_xmt_bufs(qla_host_t *ha);
static int qls_alloc_rcv_bufs(qla_host_t *ha);
static void qls_free_rcv_bufs(qla_host_t *ha);
static void qls_init_ifnet(device_t dev, qla_host_t *ha);
static void qls_release(qla_host_t *ha);
static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
int error);
static void qls_stop(qla_host_t *ha);
static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
static void qls_tx_done(void *context, int pending);
static int qls_config_lro(qla_host_t *ha);
static void qls_free_lro(qla_host_t *ha);
static void qls_error_recovery(void *context, int pending);
/*
* Hooks to the Operating Systems
*/
static int qls_pci_probe (device_t);
static int qls_pci_attach (device_t);
static int qls_pci_detach (device_t);
static void qls_start(if_t ifp);
static void qls_init(void *arg);
static int qls_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int qls_media_change(if_t ifp);
static void qls_media_status(if_t ifp, struct ifmediareq *ifmr);
static device_method_t qla_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, qls_pci_probe),
DEVMETHOD(device_attach, qls_pci_attach),
DEVMETHOD(device_detach, qls_pci_detach),
{ 0, 0 }
};
static driver_t qla_pci_driver = {
"ql", qla_pci_methods, sizeof (qla_host_t),
};
DRIVER_MODULE(qla8000, pci, qla_pci_driver, 0, 0);
MODULE_DEPEND(qla8000, pci, 1, 1, 1);
MODULE_DEPEND(qla8000, ether, 1, 1, 1);
MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
static char dev_str[64];
static char ver_str[64];
/*
* Name: qls_pci_probe
* Function: Validate the PCI device to be a QLA80XX device
*/
static int
qls_pci_probe(device_t dev)
{
switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
case PCI_QLOGIC_DEV8000:
snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
"Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
QLA_VERSION_BUILD);
snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
QLA_VERSION_BUILD);
device_set_desc(dev, dev_str);
break;
default:
return (ENXIO);
}
if (bootverbose)
printf("%s: %s\n ", __func__, dev_str);
return (BUS_PROBE_DEFAULT);
}
static int
qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
{
int err = 0, ret;
qla_host_t *ha;
uint32_t i;
err = sysctl_handle_int(oidp, &ret, 0, req);
if (err || !req->newptr)
return (err);
if (ret == 1) {
ha = (qla_host_t *)arg1;
for (i = 0; i < ha->num_tx_rings; i++) {
QL_DPRINT2((ha->pci_dev,
"%s: tx_ring[%d].tx_frames= %p\n",
__func__, i,
(void *)ha->tx_ring[i].tx_frames));
QL_DPRINT2((ha->pci_dev,
"%s: tx_ring[%d].tx_tso_frames= %p\n",
__func__, i,
(void *)ha->tx_ring[i].tx_tso_frames));
QL_DPRINT2((ha->pci_dev,
"%s: tx_ring[%d].tx_vlan_frames= %p\n",
__func__, i,
(void *)ha->tx_ring[i].tx_vlan_frames));
device_printf(ha->pci_dev,
"%s: tx_ring[%d].txr_free= 0x%08x\n",
__func__, i,
ha->tx_ring[i].txr_free);
device_printf(ha->pci_dev,
"%s: tx_ring[%d].txr_next= 0x%08x\n",
__func__, i,
ha->tx_ring[i].txr_next);
device_printf(ha->pci_dev,
"%s: tx_ring[%d].txr_done= 0x%08x\n",
__func__, i,
ha->tx_ring[i].txr_done);
device_printf(ha->pci_dev,
"%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
__func__, i,
*(ha->tx_ring[i].txr_cons_vaddr));
}
for (i = 0; i < ha->num_rx_rings; i++) {
QL_DPRINT2((ha->pci_dev,
"%s: rx_ring[%d].rx_int= %p\n",
__func__, i,
(void *)ha->rx_ring[i].rx_int));
QL_DPRINT2((ha->pci_dev,
"%s: rx_ring[%d].rss_int= %p\n",
__func__, i,
(void *)ha->rx_ring[i].rss_int));
device_printf(ha->pci_dev,
"%s: rx_ring[%d].lbq_next= 0x%08x\n",
__func__, i,
ha->rx_ring[i].lbq_next);
device_printf(ha->pci_dev,
"%s: rx_ring[%d].lbq_free= 0x%08x\n",
__func__, i,
ha->rx_ring[i].lbq_free);
device_printf(ha->pci_dev,
"%s: rx_ring[%d].lbq_in= 0x%08x\n",
__func__, i,
ha->rx_ring[i].lbq_in);
device_printf(ha->pci_dev,
"%s: rx_ring[%d].sbq_next= 0x%08x\n",
__func__, i,
ha->rx_ring[i].sbq_next);
device_printf(ha->pci_dev,
"%s: rx_ring[%d].sbq_free= 0x%08x\n",
__func__, i,
ha->rx_ring[i].sbq_free);
device_printf(ha->pci_dev,
"%s: rx_ring[%d].sbq_in= 0x%08x\n",
__func__, i,
ha->rx_ring[i].sbq_in);
}
device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
__func__, ha->err_m_getcl);
device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
__func__, ha->err_m_getjcl);
device_printf(ha->pci_dev,
"%s: err_tx_dmamap_create = 0x%08x\n",
__func__, ha->err_tx_dmamap_create);
device_printf(ha->pci_dev,
"%s: err_tx_dmamap_load = 0x%08x\n",
__func__, ha->err_tx_dmamap_load);
device_printf(ha->pci_dev,
"%s: err_tx_defrag = 0x%08x\n",
__func__, ha->err_tx_defrag);
}
return (err);
}
static void
qls_add_sysctls(qla_host_t *ha)
{
device_t dev = ha->pci_dev;
SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "version", CTLFLAG_RD,
ver_str, 0, "Driver Version");
qls_dbg_level = 0;
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "debug", CTLFLAG_RW,
&qls_dbg_level, qls_dbg_level, "Debug Level");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "drvr_stats",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
return;
}
static void
qls_watchdog(void *arg)
{
qla_host_t *ha = arg;
if_t ifp;
ifp = ha->ifp;
if (ha->flags.qla_watchdog_exit) {
ha->qla_watchdog_exited = 1;
return;
}
ha->qla_watchdog_exited = 0;
if (!ha->flags.qla_watchdog_pause) {
if (ha->qla_initiate_recovery) {
ha->qla_watchdog_paused = 1;
ha->qla_initiate_recovery = 0;
ha->err_inject = 0;
taskqueue_enqueue(ha->err_tq, &ha->err_task);
} else if (!if_sendq_empty(ifp) && QL_RUNNING(ifp)) {
taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
}
ha->qla_watchdog_paused = 0;
} else {
ha->qla_watchdog_paused = 1;
}
ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000;
callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
qls_watchdog, ha);
return;
}
/*
* Name: qls_pci_attach
* Function: attaches the device to the operating system
*/
static int
qls_pci_attach(device_t dev)
{
qla_host_t *ha = NULL;
int i;
QL_DPRINT2((dev, "%s: enter\n", __func__));
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
}
memset(ha, 0, sizeof (qla_host_t));
if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
device_printf(dev, "device is not QLE8000\n");
return (ENXIO);
}
ha->pci_func = pci_get_function(dev);
ha->pci_dev = dev;
pci_enable_busmaster(dev);
ha->reg_rid = PCIR_BAR(1);
ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
RF_ACTIVE);
if (ha->pci_reg == NULL) {
device_printf(dev, "unable to map any ports\n");
goto qls_pci_attach_err;
}
ha->reg_rid1 = PCIR_BAR(3);
ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ha->reg_rid1, RF_ACTIVE);
if (ha->pci_reg1 == NULL) {
device_printf(dev, "unable to map any ports\n");
goto qls_pci_attach_err;
}
mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
qls_add_sysctls(ha);
qls_hw_add_sysctls(ha);
ha->flags.lock_init = 1;
ha->msix_count = pci_msix_count(dev);
if (ha->msix_count < qls_get_msix_count(ha)) {
device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
ha->msix_count);
goto qls_pci_attach_err;
}
ha->msix_count = qls_get_msix_count(ha);
QL_DPRINT2((dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x"
" pci_reg %p pci_reg1 %p\n", __func__, ha,
ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1));
if (pci_alloc_msix(dev, &ha->msix_count)) {
device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
ha->msix_count);
ha->msix_count = 0;
goto qls_pci_attach_err;
}
for (i = 0; i < ha->num_rx_rings; i++) {
ha->irq_vec[i].cq_idx = i;
ha->irq_vec[i].ha = ha;
ha->irq_vec[i].irq_rid = 1 + i;
ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ha->irq_vec[i].irq_rid,
(RF_ACTIVE | RF_SHAREABLE));
if (ha->irq_vec[i].irq == NULL) {
device_printf(dev, "could not allocate interrupt\n");
goto qls_pci_attach_err;
}
if (bus_setup_intr(dev, ha->irq_vec[i].irq,
(INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
&ha->irq_vec[i], &ha->irq_vec[i].handle)) {
device_printf(dev,
"could not setup interrupt\n");
goto qls_pci_attach_err;
}
}
qls_rd_nic_params(ha);
/* allocate parent dma tag */
if (qls_alloc_parent_dma_tag(ha)) {
device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
__func__);
goto qls_pci_attach_err;
}
/* alloc all dma buffers */
if (qls_alloc_dma(ha)) {
device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
goto qls_pci_attach_err;
}
/* create the o.s ethernet interface */
qls_init_ifnet(dev, ha);
ha->flags.qla_watchdog_active = 1;
ha->flags.qla_watchdog_pause = 1;
TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
taskqueue_thread_enqueue, &ha->tx_tq);
taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
device_get_nameunit(ha->pci_dev));
callout_init(&ha->tx_callout, 1);
ha->flags.qla_callout_init = 1;
/* create ioctl device interface */
if (qls_make_cdev(ha)) {
device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
goto qls_pci_attach_err;
}
callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
qls_watchdog, ha);
TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
taskqueue_thread_enqueue, &ha->err_tq);
taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
device_get_nameunit(ha->pci_dev));
QL_DPRINT2((dev, "%s: exit 0\n", __func__));
return (0);
qls_pci_attach_err:
qls_release(ha);
QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
return (ENXIO);
}
/*
* Name: qls_pci_detach
* Function: Unhooks the device from the operating system
*/
static int
qls_pci_detach(device_t dev)
{
qla_host_t *ha = NULL;
QL_DPRINT2((dev, "%s: enter\n", __func__));
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
}
(void)QLA_LOCK(ha, __func__, 0);
qls_stop(ha);
QLA_UNLOCK(ha, __func__);
qls_release(ha);
QL_DPRINT2((dev, "%s: exit\n", __func__));
return (0);
}
/*
* Name: qls_release
* Function: Releases the resources allocated for the device
*/
static void
qls_release(qla_host_t *ha)
{
device_t dev;
int i;
dev = ha->pci_dev;
if (ha->err_tq) {
taskqueue_drain(ha->err_tq, &ha->err_task);
taskqueue_free(ha->err_tq);
}
if (ha->tx_tq) {
taskqueue_drain(ha->tx_tq, &ha->tx_task);
taskqueue_free(ha->tx_tq);
}
qls_del_cdev(ha);
if (ha->flags.qla_watchdog_active) {
ha->flags.qla_watchdog_exit = 1;
while (ha->qla_watchdog_exited == 0)
qls_mdelay(__func__, 1);
}
if (ha->flags.qla_callout_init)
callout_stop(&ha->tx_callout);
if (ha->ifp != NULL)
ether_ifdetach(ha->ifp);
qls_free_dma(ha);
qls_free_parent_dma_tag(ha);
for (i = 0; i < ha->num_rx_rings; i++) {
if (ha->irq_vec[i].handle) {
(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
ha->irq_vec[i].handle);
}
if (ha->irq_vec[i].irq) {
(void)bus_release_resource(dev, SYS_RES_IRQ,
ha->irq_vec[i].irq_rid,
ha->irq_vec[i].irq);
}
}
if (ha->msix_count)
pci_release_msi(dev);
if (ha->flags.lock_init) {
mtx_destroy(&ha->tx_lock);
mtx_destroy(&ha->hw_lock);
}
if (ha->pci_reg)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
ha->pci_reg);
if (ha->pci_reg1)
(void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
ha->pci_reg1);
}
/*
* DMA Related Functions
*/
static void
qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
*((bus_addr_t *)arg) = 0;
if (error) {
printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
return;
}
*((bus_addr_t *)arg) = segs[0].ds_addr;
return;
}
int
qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
{
int ret = 0;
device_t dev;
bus_addr_t b_addr;
dev = ha->pci_dev;
QL_DPRINT2((dev, "%s: enter\n", __func__));
ret = bus_dma_tag_create(
ha->parent_tag,/* parent */
dma_buf->alignment,
((bus_size_t)(1ULL << 32)),/* boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dma_buf->size, /* maxsize */
1, /* nsegments */
dma_buf->size, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&dma_buf->dma_tag);
if (ret) {
device_printf(dev, "%s: could not create dma tag\n", __func__);
goto qls_alloc_dmabuf_exit;
}
ret = bus_dmamem_alloc(dma_buf->dma_tag,
(void **)&dma_buf->dma_b,
(BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
&dma_buf->dma_map);
if (ret) {
bus_dma_tag_destroy(dma_buf->dma_tag);
device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
goto qls_alloc_dmabuf_exit;
}
ret = bus_dmamap_load(dma_buf->dma_tag,
dma_buf->dma_map,
dma_buf->dma_b,
dma_buf->size,
qls_dmamap_callback,
&b_addr, BUS_DMA_NOWAIT);
if (ret || !b_addr) {
bus_dma_tag_destroy(dma_buf->dma_tag);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
dma_buf->dma_map);
ret = -1;
goto qls_alloc_dmabuf_exit;
}
dma_buf->dma_addr = b_addr;
qls_alloc_dmabuf_exit:
QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
__func__, ret, (void *)dma_buf->dma_tag,
(void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
dma_buf->size));
return ret;
}
void
qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
{
bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
bus_dma_tag_destroy(dma_buf->dma_tag);
}
static int
qls_alloc_parent_dma_tag(qla_host_t *ha)
{
int ret;
device_t dev;
dev = ha->pci_dev;
/*
* Allocate parent DMA Tag
*/
ret = bus_dma_tag_create(
bus_get_dma_tag(dev), /* parent */
1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&ha->parent_tag);
if (ret) {
device_printf(dev, "%s: could not create parent dma tag\n",
__func__);
return (-1);
}
ha->flags.parent_tag = 1;
return (0);
}
static void
qls_free_parent_dma_tag(qla_host_t *ha)
{
if (ha->flags.parent_tag) {
bus_dma_tag_destroy(ha->parent_tag);
ha->flags.parent_tag = 0;
}
}
/*
* Name: qls_init_ifnet
* Function: Creates the Network Device Interface and Registers it with the O.S
*/
static void
qls_init_ifnet(device_t dev, qla_host_t *ha)
{
if_t ifp;
QL_DPRINT2((dev, "%s: enter\n", __func__));
ifp = ha->ifp = if_alloc(IFT_ETHER);
-
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setbaudrate(ifp, IF_Gbps(10));
if_setinitfn(ifp, qls_init);
if_setsoftc(ifp, ha);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, qls_ioctl);
if_setstartfn(ifp, qls_start);
if_setsendqlen(ifp, qls_get_ifq_snd_maxlen(ha));
if_setsendqready(ifp);
ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
if (ha->max_frame_size <= MCLBYTES) {
ha->msize = MCLBYTES;
} else if (ha->max_frame_size <= MJUMPAGESIZE) {
ha->msize = MJUMPAGESIZE;
} else
ha->msize = MJUM9BYTES;
ether_ifattach(ifp, qls_get_mac_addr(ha));
if_setcapabilities(ifp, IFCAP_JUMBO_MTU);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
QL_DPRINT2((dev, "%s: exit\n", __func__));
return;
}
static void
qls_init_locked(qla_host_t *ha)
{
if_t ifp = ha->ifp;
qls_stop(ha);
qls_flush_xmt_bufs(ha);
if (qls_alloc_rcv_bufs(ha) != 0)
return;
if (qls_config_lro(ha))
return;
bcopy(if_getlladdr(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
if_sethwassist(ifp, CSUM_IP);
if_sethwassistbits(ifp, CSUM_TCP, 0);
if_sethwassistbits(ifp, CSUM_UDP, 0);
if_sethwassistbits(ifp, CSUM_TSO, 0);
if (qls_init_hw_if(ha) == 0) {
ifp = ha->ifp;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
ha->flags.qla_watchdog_pause = 0;
}
return;
}
static void
qls_init(void *arg)
{
qla_host_t *ha;
ha = (qla_host_t *)arg;
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
(void)QLA_LOCK(ha, __func__, 0);
qls_init_locked(ha);
QLA_UNLOCK(ha, __func__);
QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
}
static u_int
qls_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
uint8_t *mta = arg;
if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
return (0);
bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
return (1);
}
static void
qls_set_multi(qla_host_t *ha, uint32_t add_multi)
{
uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
if_t ifp = ha->ifp;
int mcnt;
mcnt = if_foreach_llmaddr(ifp, qls_copy_maddr, mta);
if (QLA_LOCK(ha, __func__, 1) == 0) {
qls_hw_set_multi(ha, mta, mcnt, add_multi);
QLA_UNLOCK(ha, __func__);
}
return;
}
static int
qls_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
int ret = 0;
struct ifreq *ifr = (struct ifreq *)data;
#ifdef INET
struct ifaddr *ifa = (struct ifaddr *)data;
#endif
qla_host_t *ha;
ha = (qla_host_t *)if_getsoftc(ifp);
switch (cmd) {
case SIOCSIFADDR:
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
__func__, cmd));
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET) {
if_setflagbits(ifp, IFF_UP, 0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
(void)QLA_LOCK(ha, __func__, 0);
qls_init_locked(ha);
QLA_UNLOCK(ha, __func__);
}
QL_DPRINT4((ha->pci_dev,
"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
__func__, cmd,
ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
arp_ifinit(ifp, ifa);
break;
}
#endif
ether_ioctl(ifp, cmd, data);
break;
case SIOCSIFMTU:
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
__func__, cmd));
if (ifr->ifr_mtu > QLA_MAX_MTU) {
ret = EINVAL;
} else {
(void) QLA_LOCK(ha, __func__, 0);
if_setmtu(ifp, ifr->ifr_mtu);
ha->max_frame_size =
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
QLA_UNLOCK(ha, __func__);
if (ret)
ret = EINVAL;
}
break;
case SIOCSIFFLAGS:
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
__func__, cmd));
(void)QLA_LOCK(ha, __func__, 0);
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_PROMISC) {
ret = qls_set_promisc(ha);
} else if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_ALLMULTI) {
ret = qls_set_allmulti(ha);
}
} else {
ha->max_frame_size = if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN;
qls_init_locked(ha);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
qls_stop(ha);
ha->if_flags = if_getflags(ifp);
}
QLA_UNLOCK(ha, __func__);
break;
case SIOCADDMULTI:
QL_DPRINT4((ha->pci_dev,
"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
qls_set_multi(ha, 1);
}
break;
case SIOCDELMULTI:
QL_DPRINT4((ha->pci_dev,
"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
qls_set_multi(ha, 0);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
QL_DPRINT4((ha->pci_dev,
"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
__func__, cmd));
ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
break;
case SIOCSIFCAP:
{
int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
__func__, cmd));
if (mask & IFCAP_HWCSUM)
if_togglecapenable(ifp, IFCAP_HWCSUM);
if (mask & IFCAP_TSO4)
if_togglecapenable(ifp, IFCAP_TSO4);
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
qls_init(ha);
VLAN_CAPABILITIES(ifp);
break;
}
default:
QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
__func__, cmd));
ret = ether_ioctl(ifp, cmd, data);
break;
}
return (ret);
}
static int
qls_media_change(if_t ifp)
{
qla_host_t *ha;
struct ifmedia *ifm;
int ret = 0;
ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
ifm = &ha->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
ret = EINVAL;
QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
return (ret);
}
static void
qls_media_status(if_t ifp, struct ifmediareq *ifmr)
{
qla_host_t *ha;
ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
qls_update_link_state(ha);
if (ha->link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
}
QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
(ha->link_up ? "link_up" : "link_down")));
return;
}
static void
qls_start(if_t ifp)
{
int i, ret = 0;
struct mbuf *m_head;
qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp);
QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
if (!mtx_trylock(&ha->tx_lock)) {
QL_DPRINT8((ha->pci_dev,
"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
return;
}
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
IFF_DRV_RUNNING) {
for (i = 0; i < ha->num_tx_rings; i++) {
ret |= qls_hw_tx_done(ha, i);
}
if (ret == 0)
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
QLA_TX_UNLOCK(ha);
return;
}
if (!ha->link_up) {
qls_update_link_state(ha);
if (!ha->link_up) {
QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
QLA_TX_UNLOCK(ha);
return;
}
}
while (!if_sendq_empty(ifp)) {
m_head = if_dequeue(ifp);
if (m_head == NULL) {
QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
__func__));
break;
}
if (qls_send(ha, &m_head)) {
if (m_head == NULL)
break;
QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m_head);
break;
}
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, m_head);
}
QLA_TX_UNLOCK(ha);
QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
return;
}
static int
qls_send(qla_host_t *ha, struct mbuf **m_headp)
{
bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
bus_dmamap_t map;
int nsegs;
int ret = -1;
uint32_t tx_idx;
struct mbuf *m_head = *m_headp;
uint32_t txr_idx = 0;
QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
/* check if flowid is set */
if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
tx_idx = ha->tx_ring[txr_idx].txr_next;
map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
BUS_DMA_NOWAIT);
if (ret == EFBIG) {
struct mbuf *m;
QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
m_head->m_pkthdr.len));
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
ha->err_tx_defrag++;
m_freem(m_head);
*m_headp = NULL;
device_printf(ha->pci_dev,
"%s: m_defrag() = NULL [%d]\n",
__func__, ret);
return (ENOBUFS);
}
m_head = m;
*m_headp = m_head;
if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
segs, &nsegs, BUS_DMA_NOWAIT))) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
__func__, ret, m_head->m_pkthdr.len);
if (ret != ENOMEM) {
m_freem(m_head);
*m_headp = NULL;
}
return (ret);
}
} else if (ret) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
__func__, ret, m_head->m_pkthdr.len);
if (ret != ENOMEM) {
m_freem(m_head);
*m_headp = NULL;
}
return (ret);
}
QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
ha->tx_ring[txr_idx].count++;
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
} else {
if (ret == EINVAL) {
if (m_head)
m_freem(m_head);
*m_headp = NULL;
}
}
QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
return (ret);
}
static void
qls_stop(qla_host_t *ha)
{
if_t ifp = ha->ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
ha->flags.qla_watchdog_pause = 1;
while (!ha->qla_watchdog_paused)
qls_mdelay(__func__, 1);
qls_del_hw_if(ha);
qls_free_lro(ha);
qls_flush_xmt_bufs(ha);
qls_free_rcv_bufs(ha);
return;
}
/*
* Buffer Management Functions for Transmit and Receive Rings
*/
/*
* Release mbuf after it sent on the wire
*/
static void
qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
{
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
if (txb->m_head) {
bus_dmamap_unload(ha->tx_tag, txb->map);
m_freem(txb->m_head);
txb->m_head = NULL;
}
QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
}
static void
qls_flush_xmt_bufs(qla_host_t *ha)
{
int i, j;
for (j = 0; j < ha->num_tx_rings; j++) {
for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
}
return;
}
static int
qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
{
int i, j, ret = 0;
qla_rx_buf_t *rxb;
qla_rx_ring_t *rx_ring;
volatile q81_bq_addr_e_t *sbq_e;
rx_ring = &ha->rx_ring[r];
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &rx_ring->rx_buf[i];
ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
if (ret) {
device_printf(ha->pci_dev,
"%s: dmamap[%d, %d] failed\n", __func__, r, i);
for (j = 0; j < i; j++) {
rxb = &rx_ring->rx_buf[j];
bus_dmamap_destroy(ha->rx_tag, rxb->map);
}
goto qls_alloc_rcv_mbufs_err;
}
}
rx_ring = &ha->rx_ring[r];
sbq_e = rx_ring->sbq_vaddr;
rxb = &rx_ring->rx_buf[0];
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
/*
* set the physical address in the
* corresponding descriptor entry in the
* receive ring/queue for the hba
*/
sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
} else {
device_printf(ha->pci_dev,
"%s: qls_get_mbuf [%d, %d] failed\n",
__func__, r, i);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
goto qls_alloc_rcv_mbufs_err;
}
rxb++;
sbq_e++;
}
return 0;
qls_alloc_rcv_mbufs_err:
return (-1);
}
static void
qls_free_rcv_bufs(qla_host_t *ha)
{
int i, r;
qla_rx_buf_t *rxb;
qla_rx_ring_t *rxr;
for (r = 0; r < ha->num_rx_rings; r++) {
rxr = &ha->rx_ring[r];
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &rxr->rx_buf[i];
if (rxb->m_head != NULL) {
bus_dmamap_unload(ha->rx_tag, rxb->map);
bus_dmamap_destroy(ha->rx_tag, rxb->map);
m_freem(rxb->m_head);
}
}
bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
}
return;
}
static int
qls_alloc_rcv_bufs(qla_host_t *ha)
{
int r, ret = 0;
qla_rx_ring_t *rxr;
for (r = 0; r < ha->num_rx_rings; r++) {
rxr = &ha->rx_ring[r];
bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
}
for (r = 0; r < ha->num_rx_rings; r++) {
ret = qls_alloc_rcv_mbufs(ha, r);
if (ret)
qls_free_rcv_bufs(ha);
}
return (ret);
}
int
qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
{
struct mbuf *mp = nmp;
int ret = 0;
uint32_t offset;
bus_dma_segment_t segs[1];
int nsegs;
QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
if (mp == NULL) {
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize);
if (mp == NULL) {
if (ha->msize == MCLBYTES)
ha->err_m_getcl++;
else
ha->err_m_getjcl++;
ret = ENOBUFS;
device_printf(ha->pci_dev,
"%s: m_getcl failed\n", __func__);
goto exit_qls_get_mbuf;
}
mp->m_len = mp->m_pkthdr.len = ha->msize;
} else {
mp->m_len = mp->m_pkthdr.len = ha->msize;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
}
/* align the receive buffers to 8 byte boundary */
offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
if (offset) {
offset = 8 - offset;
m_adj(mp, offset);
}
/*
* Using memory from the mbuf cluster pool, invoke the bus_dma
* machinery to arrange the memory mapping.
*/
ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
mp, segs, &nsegs, BUS_DMA_NOWAIT);
rxb->paddr = segs[0].ds_addr;
if (ret || !rxb->paddr || (nsegs != 1)) {
m_freem(mp);
rxb->m_head = NULL;
device_printf(ha->pci_dev,
"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
__func__, ret, (long long unsigned int)rxb->paddr,
nsegs);
ret = -1;
goto exit_qls_get_mbuf;
}
rxb->m_head = mp;
bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
exit_qls_get_mbuf:
QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
return (ret);
}
static void
qls_tx_done(void *context, int pending)
{
qla_host_t *ha = context;
if_t ifp;
ifp = ha->ifp;
if (!ifp)
return;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
return;
}
qls_start(ha->ifp);
return;
}
static int
qls_config_lro(qla_host_t *ha)
{
#if defined(INET) || defined(INET6)
int i;
struct lro_ctrl *lro;
for (i = 0; i < ha->num_rx_rings; i++) {
lro = &ha->rx_ring[i].lro;
if (tcp_lro_init(lro)) {
device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
__func__);
return (-1);
}
lro->ifp = ha->ifp;
}
ha->flags.lro_init = 1;
QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
#endif
return (0);
}
static void
qls_free_lro(qla_host_t *ha)
{
#if defined(INET) || defined(INET6)
int i;
struct lro_ctrl *lro;
if (!ha->flags.lro_init)
return;
for (i = 0; i < ha->num_rx_rings; i++) {
lro = &ha->rx_ring[i].lro;
tcp_lro_free(lro);
}
ha->flags.lro_init = 0;
#endif
}
static void
qls_error_recovery(void *context, int pending)
{
qla_host_t *ha = context;
qls_init(ha);
return;
}
diff --git a/sys/dev/re/if_re.c b/sys/dev/re/if_re.c
index 594ed9d60379..69593e68d36f 100644
--- a/sys/dev/re/if_re.c
+++ b/sys/dev/re/if_re.c
@@ -1,4162 +1,4157 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1997, 1998-2003
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
*
* Written by Bill Paul <wpaul@windriver.com>
* Senior Networking Software Engineer
* Wind River Systems
*/
/*
* This driver is designed to support RealTek's next generation of
* 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
* seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
* RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
*
* The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
* with the older 8139 family, however it also supports a special
* C+ mode of operation that provides several new performance enhancing
* features. These include:
*
* o Descriptor based DMA mechanism. Each descriptor represents
* a single packet fragment. Data buffers may be aligned on
* any byte boundary.
*
* o 64-bit DMA
*
* o TCP/IP checksum offload for both RX and TX
*
* o High and normal priority transmit DMA rings
*
* o VLAN tag insertion and extraction
*
* o TCP large send (segmentation offload)
*
* Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
* programming API is fairly straightforward. The RX filtering, EEPROM
* access and PHY access is the same as it is on the older 8139 series
* chips.
*
* The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
* same programming API and feature set as the 8139C+ with the following
* differences and additions:
*
* o 1000Mbps mode
*
* o Jumbo frames
*
* o GMII and TBI ports/registers for interfacing with copper
* or fiber PHYs
*
* o RX and TX DMA rings can have up to 1024 descriptors
* (the 8139C+ allows a maximum of 64)
*
* o Slight differences in register layout from the 8139C+
*
* The TX start and timer interrupt registers are at different locations
* on the 8169 than they are on the 8139C+. Also, the status word in the
* RX descriptor has a slightly different bit layout. The 8169 does not
* have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
* copper gigE PHY.
*
* The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
* (the 'S' stands for 'single-chip'). These devices have the same
* programming API as the older 8169, but also have some vendor-specific
* registers for the on-board PHY. The 8110S is a LAN-on-motherboard
* part designed to be pin-compatible with the RealTek 8100 10/100 chip.
*
* This driver takes advantage of the RX and TX checksum offload and
* VLAN tag insertion/extraction features. It also implements TX
* interrupt moderation using the timer interrupt registers, which
* significantly reduces TX interrupt load. There is also support
* for jumbo frames, however the 8169/8169S/8110S can not transmit
* jumbo frames larger than 7440, so the max MTU possible with this
* driver is 7422 bytes.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/debugnet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/rl/if_rlreg.h>
MODULE_DEPEND(re, pci, 1, 1, 1);
MODULE_DEPEND(re, ether, 1, 1, 1);
MODULE_DEPEND(re, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/* Tunables. */
static int intr_filter = 0;
TUNABLE_INT("hw.re.intr_filter", &intr_filter);
static int msi_disable = 0;
TUNABLE_INT("hw.re.msi_disable", &msi_disable);
static int msix_disable = 0;
TUNABLE_INT("hw.re.msix_disable", &msix_disable);
static int prefer_iomap = 0;
TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
/*
* Various supported device vendors/types and their names.
*/
static const struct rl_type re_devs[] = {
{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
"D-Link DGE-528(T) Gigabit Ethernet Adapter" },
{ DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
"D-Link DGE-530(T) Gigabit Ethernet Adapter" },
{ RT_VENDORID, RT_DEVICEID_2600, 0,
"RealTek Killer E2600 Gigabit Ethernet Controller" },
{ RT_VENDORID, RT_DEVICEID_8139, 0,
"RealTek 8139C+ 10/100BaseTX" },
{ RT_VENDORID, RT_DEVICEID_8101E, 0,
"RealTek 810xE PCIe 10/100baseTX" },
{ RT_VENDORID, RT_DEVICEID_8168, 0,
"RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" },
{ RT_VENDORID, RT_DEVICEID_8161, 0,
"RealTek 8168 Gigabit Ethernet" },
{ NCUBE_VENDORID, RT_DEVICEID_8168, 0,
"TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" },
{ RT_VENDORID, RT_DEVICEID_8169, 0,
"RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
"RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
"Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
{ LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
"Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
{ USR_VENDORID, USR_DEVICEID_997902, 0,
"US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
};
static const struct rl_hwrev re_hwrevs[] = {
{ RL_HWREV_8139, RL_8139, "", RL_MTU },
{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
{ RL_HWREV_8130, RL_8139, "8130", RL_MTU },
{ RL_HWREV_8139C, RL_8139, "C", RL_MTU },
{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
{ RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
{ RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
{ RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
{ RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
{ RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
{ RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
{ RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
{ RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
{ RL_HWREV_8100, RL_8139, "8100", RL_MTU },
{ RL_HWREV_8101, RL_8139, "8101", RL_MTU },
{ RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
{ RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
{ RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
{ RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
{ RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
{ RL_HWREV_8402, RL_8169, "8402", RL_MTU },
{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
{ RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
{ RL_HWREV_8106E, RL_8169, "8106E", RL_MTU },
{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
{ RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
{ RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
{ RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
{ RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168FP, RL_8169, "8168FP/8111FP", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K},
{ RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
{ RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
{ 0, 0, NULL, 0 }
};
static int re_probe (device_t);
static int re_attach (device_t);
static int re_detach (device_t);
static int re_encap (struct rl_softc *, struct mbuf **);
static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
static int re_allocmem (device_t, struct rl_softc *);
static __inline void re_discard_rxbuf
(struct rl_softc *, int);
static int re_newbuf (struct rl_softc *, int);
static int re_jumbo_newbuf (struct rl_softc *, int);
static int re_rx_list_init (struct rl_softc *);
static int re_jrx_list_init (struct rl_softc *);
static int re_tx_list_init (struct rl_softc *);
#ifdef RE_FIXUP_RX
static __inline void re_fixup_rx
(struct mbuf *);
#endif
static int re_rxeof (struct rl_softc *, int *);
static void re_txeof (struct rl_softc *);
#ifdef DEVICE_POLLING
static int re_poll (if_t, enum poll_cmd, int);
static int re_poll_locked (if_t, enum poll_cmd, int);
#endif
static int re_intr (void *);
static void re_intr_msi (void *);
static void re_tick (void *);
static void re_int_task (void *, int);
static void re_start (if_t);
static void re_start_locked (if_t);
static void re_start_tx (struct rl_softc *);
static int re_ioctl (if_t, u_long, caddr_t);
static void re_init (void *);
static void re_init_locked (struct rl_softc *);
static void re_stop (struct rl_softc *);
static void re_watchdog (struct rl_softc *);
static int re_suspend (device_t);
static int re_resume (device_t);
static int re_shutdown (device_t);
static int re_ifmedia_upd (if_t);
static void re_ifmedia_sts (if_t, struct ifmediareq *);
static void re_eeprom_putbyte (struct rl_softc *, int);
static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
static void re_read_eeprom (struct rl_softc *, caddr_t, int, int);
static int re_gmii_readreg (device_t, int, int);
static int re_gmii_writereg (device_t, int, int, int);
static int re_miibus_readreg (device_t, int, int);
static int re_miibus_writereg (device_t, int, int, int);
static void re_miibus_statchg (device_t);
static void re_set_jumbo (struct rl_softc *, int);
static void re_set_rxmode (struct rl_softc *);
static void re_reset (struct rl_softc *);
static void re_setwol (struct rl_softc *);
static void re_clrwol (struct rl_softc *);
static void re_set_linkspeed (struct rl_softc *);
DEBUGNET_DEFINE(re);
#ifdef DEV_NETMAP /* see ixgbe.c for details */
#include <dev/netmap/if_re_netmap.h>
MODULE_DEPEND(re, netmap, 1, 1, 1);
#endif /* !DEV_NETMAP */
#ifdef RE_DIAG
static int re_diag (struct rl_softc *);
#endif
static void re_add_sysctls (struct rl_softc *);
static int re_sysctl_stats (SYSCTL_HANDLER_ARGS);
static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS);
static device_method_t re_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, re_probe),
DEVMETHOD(device_attach, re_attach),
DEVMETHOD(device_detach, re_detach),
DEVMETHOD(device_suspend, re_suspend),
DEVMETHOD(device_resume, re_resume),
DEVMETHOD(device_shutdown, re_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, re_miibus_readreg),
DEVMETHOD(miibus_writereg, re_miibus_writereg),
DEVMETHOD(miibus_statchg, re_miibus_statchg),
DEVMETHOD_END
};
static driver_t re_driver = {
"re",
re_methods,
sizeof(struct rl_softc)
};
DRIVER_MODULE(re, pci, re_driver, 0, 0);
DRIVER_MODULE(miibus, re, miibus_driver, 0, 0);
#define EE_SET(x) \
CSR_WRITE_1(sc, RL_EECMD, \
CSR_READ_1(sc, RL_EECMD) | x)
#define EE_CLR(x) \
CSR_WRITE_1(sc, RL_EECMD, \
CSR_READ_1(sc, RL_EECMD) & ~x)
/*
* Send a read command and address to the EEPROM, check for ACK.
*/
static void
re_eeprom_putbyte(struct rl_softc *sc, int addr)
{
int d, i;
d = addr | (RL_9346_READ << sc->rl_eewidth);
/*
* Feed in each bit and strobe the clock.
*/
for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
if (d & i) {
EE_SET(RL_EE_DATAIN);
} else {
EE_CLR(RL_EE_DATAIN);
}
DELAY(100);
EE_SET(RL_EE_CLK);
DELAY(150);
EE_CLR(RL_EE_CLK);
DELAY(100);
}
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
{
int i;
u_int16_t word = 0;
/*
* Send address of word we want to read.
*/
re_eeprom_putbyte(sc, addr);
/*
* Start reading bits from EEPROM.
*/
for (i = 0x8000; i; i >>= 1) {
EE_SET(RL_EE_CLK);
DELAY(100);
if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
word |= i;
EE_CLR(RL_EE_CLK);
DELAY(100);
}
*dest = word;
}
/*
* Read a sequence of words from the EEPROM.
*/
static void
re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
{
int i;
u_int16_t word = 0, *ptr;
CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
DELAY(100);
for (i = 0; i < cnt; i++) {
CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
re_eeprom_getword(sc, off + i, &word);
CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
ptr = (u_int16_t *)(dest + (i * 2));
*ptr = word;
}
CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
}
static int
re_gmii_readreg(device_t dev, int phy, int reg)
{
struct rl_softc *sc;
u_int32_t rval;
int i;
sc = device_get_softc(dev);
/* Let the rgephy driver read the GMEDIASTAT register */
if (reg == RL_GMEDIASTAT) {
rval = CSR_READ_1(sc, RL_GMEDIASTAT);
return (rval);
}
CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
for (i = 0; i < RL_PHY_TIMEOUT; i++) {
rval = CSR_READ_4(sc, RL_PHYAR);
if (rval & RL_PHYAR_BUSY)
break;
DELAY(25);
}
if (i == RL_PHY_TIMEOUT) {
device_printf(sc->rl_dev, "PHY read failed\n");
return (0);
}
/*
* Controller requires a 20us delay to process next MDIO request.
*/
DELAY(20);
return (rval & RL_PHYAR_PHYDATA);
}
static int
re_gmii_writereg(device_t dev, int phy, int reg, int data)
{
struct rl_softc *sc;
u_int32_t rval;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
(data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
for (i = 0; i < RL_PHY_TIMEOUT; i++) {
rval = CSR_READ_4(sc, RL_PHYAR);
if (!(rval & RL_PHYAR_BUSY))
break;
DELAY(25);
}
if (i == RL_PHY_TIMEOUT) {
device_printf(sc->rl_dev, "PHY write failed\n");
return (0);
}
/*
* Controller requires a 20us delay to process next MDIO request.
*/
DELAY(20);
return (0);
}
static int
re_miibus_readreg(device_t dev, int phy, int reg)
{
struct rl_softc *sc;
u_int16_t rval = 0;
u_int16_t re8139_reg = 0;
sc = device_get_softc(dev);
if (sc->rl_type == RL_8169) {
rval = re_gmii_readreg(dev, phy, reg);
return (rval);
}
switch (reg) {
case MII_BMCR:
re8139_reg = RL_BMCR;
break;
case MII_BMSR:
re8139_reg = RL_BMSR;
break;
case MII_ANAR:
re8139_reg = RL_ANAR;
break;
case MII_ANER:
re8139_reg = RL_ANER;
break;
case MII_ANLPAR:
re8139_reg = RL_LPAR;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
/*
* Allow the rlphy driver to read the media status
* register. If we have a link partner which does not
* support NWAY, this is the register which will tell
* us the results of parallel detection.
*/
case RL_MEDIASTAT:
rval = CSR_READ_1(sc, RL_MEDIASTAT);
return (rval);
default:
device_printf(sc->rl_dev, "bad phy register\n");
return (0);
}
rval = CSR_READ_2(sc, re8139_reg);
if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
/* 8139C+ has different bit layout. */
rval &= ~(BMCR_LOOP | BMCR_ISO);
}
return (rval);
}
static int
re_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct rl_softc *sc;
u_int16_t re8139_reg = 0;
int rval = 0;
sc = device_get_softc(dev);
if (sc->rl_type == RL_8169) {
rval = re_gmii_writereg(dev, phy, reg, data);
return (rval);
}
switch (reg) {
case MII_BMCR:
re8139_reg = RL_BMCR;
if (sc->rl_type == RL_8139CPLUS) {
/* 8139C+ has different bit layout. */
data &= ~(BMCR_LOOP | BMCR_ISO);
}
break;
case MII_BMSR:
re8139_reg = RL_BMSR;
break;
case MII_ANAR:
re8139_reg = RL_ANAR;
break;
case MII_ANER:
re8139_reg = RL_ANER;
break;
case MII_ANLPAR:
re8139_reg = RL_LPAR;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
break;
default:
device_printf(sc->rl_dev, "bad phy register\n");
return (0);
}
CSR_WRITE_2(sc, re8139_reg, data);
return (0);
}
static void
re_miibus_statchg(device_t dev)
{
struct rl_softc *sc;
if_t ifp;
struct mii_data *mii;
sc = device_get_softc(dev);
mii = device_get_softc(sc->rl_miibus);
ifp = sc->rl_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->rl_flags &= ~RL_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->rl_flags |= RL_FLAG_LINK;
break;
case IFM_1000_T:
if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
break;
sc->rl_flags |= RL_FLAG_LINK;
break;
default:
break;
}
}
/*
* RealTek controllers do not provide any interface to the RX/TX
* MACs for resolved speed, duplex and flow-control parameters.
*/
}
static u_int
re_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t h, *hashes = arg;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
/*
* Set the RX configuration and 64-bit multicast hash filter.
*/
static void
re_set_rxmode(struct rl_softc *sc)
{
if_t ifp;
uint32_t h, hashes[2] = { 0, 0 };
uint32_t rxfilt;
RL_LOCK_ASSERT(sc);
ifp = sc->rl_ifp;
rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0)
rxfilt |= RL_RXCFG_EARLYOFF;
else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
rxfilt |= RL_RXCFG_EARLYOFFV2;
if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
if (if_getflags(ifp) & IFF_PROMISC)
rxfilt |= RL_RXCFG_RX_ALLPHYS;
/*
* Unlike other hardwares, we have to explicitly set
* RL_RXCFG_RX_MULTI to receive multicast frames in
* promiscuous mode.
*/
rxfilt |= RL_RXCFG_RX_MULTI;
hashes[0] = hashes[1] = 0xffffffff;
goto done;
}
if_foreach_llmaddr(ifp, re_hash_maddr, hashes);
if (hashes[0] != 0 || hashes[1] != 0) {
/*
* For some unfathomable reason, RealTek decided to
* reverse the order of the multicast hash registers
* in the PCI Express parts. This means we have to
* write the hash pattern in reverse order for those
* devices.
*/
if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
h = bswap32(hashes[0]);
hashes[0] = bswap32(hashes[1]);
hashes[1] = h;
}
rxfilt |= RL_RXCFG_RX_MULTI;
}
if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) {
/* Disable multicast filtering due to silicon bug. */
hashes[0] = 0xffffffff;
hashes[1] = 0xffffffff;
}
done:
CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
}
static void
re_reset(struct rl_softc *sc)
{
int i;
RL_LOCK_ASSERT(sc);
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
for (i = 0; i < RL_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
break;
}
if (i == RL_TIMEOUT)
device_printf(sc->rl_dev, "reset never completed!\n");
if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
CSR_WRITE_1(sc, 0x82, 1);
if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
}
#ifdef RE_DIAG
/*
* The following routine is designed to test for a defect on some
* 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
* lines connected to the bus, however for a 32-bit only card, they
* should be pulled high. The result of this defect is that the
* NIC will not work right if you plug it into a 64-bit slot: DMA
* operations will be done with 64-bit transfers, which will fail
* because the 64-bit data lines aren't connected.
*
* There's no way to work around this (short of talking a soldering
* iron to the board), however we can detect it. The method we use
* here is to put the NIC into digital loopback mode, set the receiver
* to promiscuous mode, and then try to send a frame. We then compare
* the frame data we sent to what was received. If the data matches,
* then the NIC is working correctly, otherwise we know the user has
* a defective NIC which has been mistakenly plugged into a 64-bit PCI
* slot. In the latter case, there's no way the NIC can work correctly,
* so we print out a message on the console and abort the device attach.
*/
static int
re_diag(struct rl_softc *sc)
{
if_t ifp = sc->rl_ifp;
struct mbuf *m0;
struct ether_header *eh;
struct rl_desc *cur_rx;
u_int16_t status;
u_int32_t rxstat;
int total_len, i, error = 0, phyaddr;
u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
/* Allocate a single mbuf */
MGETHDR(m0, M_NOWAIT, MT_DATA);
if (m0 == NULL)
return (ENOBUFS);
RL_LOCK(sc);
/*
* Initialize the NIC in test mode. This sets the chip up
* so that it can send and receive frames, but performs the
* following special functions:
* - Puts receiver in promiscuous mode
* - Enables digital loopback mode
* - Leaves interrupts turned off
*/
if_setflagbit(ifp, IFF_PROMISC, 0);
sc->rl_testmode = 1;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
sc->rl_flags |= RL_FLAG_LINK;
if (sc->rl_type == RL_8169)
phyaddr = 1;
else
phyaddr = 0;
re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
for (i = 0; i < RL_TIMEOUT; i++) {
status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
if (!(status & BMCR_RESET))
break;
}
re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
DELAY(100000);
/* Put some data in the mbuf */
eh = mtod(m0, struct ether_header *);
bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
eh->ether_type = htons(ETHERTYPE_IP);
m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
/*
* Queue the packet, start transmission.
* Note: IF_HANDOFF() ultimately calls re_start() for us.
*/
CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
RL_UNLOCK(sc);
/* XXX: re_diag must not be called when in ALTQ mode */
if_handoff(ifp, m0, ifp);
RL_LOCK(sc);
m0 = NULL;
/* Wait for it to propagate through the chip */
DELAY(100000);
for (i = 0; i < RL_TIMEOUT; i++) {
status = CSR_READ_2(sc, RL_ISR);
CSR_WRITE_2(sc, RL_ISR, status);
if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
(RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
break;
DELAY(10);
}
if (i == RL_TIMEOUT) {
device_printf(sc->rl_dev,
"diagnostic failed, failed to receive packet in"
" loopback mode\n");
error = EIO;
goto done;
}
/*
* The packet should have been dumped into the first
* entry in the RX DMA ring. Grab it from there.
*/
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
eh = mtod(m0, struct ether_header *);
cur_rx = &sc->rl_ldata.rl_rx_list[0];
total_len = RL_RXBYTES(cur_rx);
rxstat = le32toh(cur_rx->rl_cmdstat);
if (total_len != ETHER_MIN_LEN) {
device_printf(sc->rl_dev,
"diagnostic failed, received short packet\n");
error = EIO;
goto done;
}
/* Test that the received packet data matches what we sent. */
if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
ntohs(eh->ether_type) != ETHERTYPE_IP) {
device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
dst, ":", src, ":", ETHERTYPE_IP);
device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
eh->ether_dhost, ":", eh->ether_shost, ":",
ntohs(eh->ether_type));
device_printf(sc->rl_dev, "You may have a defective 32-bit "
"NIC plugged into a 64-bit PCI slot.\n");
device_printf(sc->rl_dev, "Please re-install the NIC in a "
"32-bit slot for proper operation.\n");
device_printf(sc->rl_dev, "Read the re(4) man page for more "
"details.\n");
error = EIO;
}
done:
/* Turn interface off, release resources */
sc->rl_testmode = 0;
sc->rl_flags &= ~RL_FLAG_LINK;
if_setflagbit(ifp, 0, IFF_PROMISC);
re_stop(sc);
if (m0 != NULL)
m_freem(m0);
RL_UNLOCK(sc);
return (error);
}
#endif
/*
* Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
re_probe(device_t dev)
{
const struct rl_type *t;
uint16_t devid, vendor;
uint16_t revid, sdevid;
int i;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
revid = pci_get_revid(dev);
sdevid = pci_get_subdevice(dev);
if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
/*
* Only attach to rev. 3 of the Linksys EG1032 adapter.
* Rev. 2 is supported by sk(4).
*/
return (ENXIO);
}
}
if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
if (revid != 0x20) {
/* 8139, let rl(4) take care of this device. */
return (ENXIO);
}
}
t = re_devs;
for (i = 0; i < nitems(re_devs); i++, t++) {
if (vendor == t->rl_vid && devid == t->rl_did) {
device_set_desc(dev, t->rl_name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
/*
* Map a single buffer address.
*/
static void
re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *addr;
if (error)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
addr = arg;
*addr = segs->ds_addr;
}
static int
re_allocmem(device_t dev, struct rl_softc *sc)
{
bus_addr_t lowaddr;
bus_size_t rx_list_size, tx_list_size;
int error;
int i;
rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
/*
* Allocate the parent bus DMA tag appropriate for PCI.
* In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
* register should be set. However some RealTek chips are known
* to be buggy on DAC handling, therefore disable DAC by limiting
* DMA address space to 32bit. PCIe variants of RealTek chips
* may not have the limitation.
*/
lowaddr = BUS_SPACE_MAXADDR;
if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
NULL, NULL, &sc->rl_parent_tag);
if (error) {
device_printf(dev, "could not allocate parent DMA tag\n");
return (error);
}
/*
* Allocate map for TX mbufs.
*/
error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
if (error) {
device_printf(dev, "could not allocate TX DMA tag\n");
return (error);
}
/*
* Allocate map for RX mbufs.
*/
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
&sc->rl_ldata.rl_jrx_mtag);
if (error) {
device_printf(dev,
"could not allocate jumbo RX DMA tag\n");
return (error);
}
}
error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
if (error) {
device_printf(dev, "could not allocate RX DMA tag\n");
return (error);
}
/*
* Allocate map for TX descriptor list.
*/
error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
NULL, tx_list_size, 1, tx_list_size, 0,
NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
if (error) {
device_printf(dev, "could not allocate TX DMA ring tag\n");
return (error);
}
/* Allocate DMA'able memory for the TX ring */
error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
(void **)&sc->rl_ldata.rl_tx_list,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->rl_ldata.rl_tx_list_map);
if (error) {
device_printf(dev, "could not allocate TX DMA ring\n");
return (error);
}
/* Load the map for the TX ring. */
sc->rl_ldata.rl_tx_list_addr = 0;
error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
tx_list_size, re_dma_map_addr,
&sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
device_printf(dev, "could not load TX DMA ring\n");
return (ENOMEM);
}
/* Create DMA maps for TX buffers */
for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
&sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
if (error) {
device_printf(dev, "could not create DMA map for TX\n");
return (error);
}
}
/*
* Allocate map for RX descriptor list.
*/
error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
NULL, rx_list_size, 1, rx_list_size, 0,
NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
if (error) {
device_printf(dev, "could not create RX DMA ring tag\n");
return (error);
}
/* Allocate DMA'able memory for the RX ring */
error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
(void **)&sc->rl_ldata.rl_rx_list,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->rl_ldata.rl_rx_list_map);
if (error) {
device_printf(dev, "could not allocate RX DMA ring\n");
return (error);
}
/* Load the map for the RX ring. */
sc->rl_ldata.rl_rx_list_addr = 0;
error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
rx_list_size, re_dma_map_addr,
&sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
device_printf(dev, "could not load RX DMA ring\n");
return (ENOMEM);
}
/* Create DMA maps for RX buffers */
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
&sc->rl_ldata.rl_jrx_sparemap);
if (error) {
device_printf(dev,
"could not create spare DMA map for jumbo RX\n");
return (error);
}
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
&sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
if (error) {
device_printf(dev,
"could not create DMA map for jumbo RX\n");
return (error);
}
}
}
error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
&sc->rl_ldata.rl_rx_sparemap);
if (error) {
device_printf(dev, "could not create spare DMA map for RX\n");
return (error);
}
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
&sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
if (error) {
device_printf(dev, "could not create DMA map for RX\n");
return (error);
}
}
/* Create DMA map for statistics. */
error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
&sc->rl_ldata.rl_stag);
if (error) {
device_printf(dev, "could not create statistics DMA tag\n");
return (error);
}
/* Allocate DMA'able memory for statistics. */
error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
(void **)&sc->rl_ldata.rl_stats,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc->rl_ldata.rl_smap);
if (error) {
device_printf(dev,
"could not allocate statistics DMA memory\n");
return (error);
}
/* Load the map for statistics. */
sc->rl_ldata.rl_stats_addr = 0;
error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
&sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
device_printf(dev, "could not load statistics DMA memory\n");
return (ENOMEM);
}
return (0);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
re_attach(device_t dev)
{
u_char eaddr[ETHER_ADDR_LEN];
u_int16_t as[ETHER_ADDR_LEN / 2];
struct rl_softc *sc;
if_t ifp;
const struct rl_hwrev *hw_rev;
int capmask, error = 0, hwrev, i, msic, msixc,
phy, reg, rid;
u_int32_t cap, ctl;
u_int16_t devid, re_did = 0;
uint8_t cfg;
sc = device_get_softc(dev);
sc->rl_dev = dev;
mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
devid = pci_get_device(dev);
/*
* Prefer memory space register mapping over IO space.
* Because RTL8169SC does not seem to work when memory mapping
* is used always activate io mapping.
*/
if (devid == RT_DEVICEID_8169SC)
prefer_iomap = 1;
if (prefer_iomap == 0) {
sc->rl_res_id = PCIR_BAR(1);
sc->rl_res_type = SYS_RES_MEMORY;
/* RTL8168/8101E seems to use different BARs. */
if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
sc->rl_res_id = PCIR_BAR(2);
} else {
sc->rl_res_id = PCIR_BAR(0);
sc->rl_res_type = SYS_RES_IOPORT;
}
sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
&sc->rl_res_id, RF_ACTIVE);
if (sc->rl_res == NULL && prefer_iomap == 0) {
sc->rl_res_id = PCIR_BAR(0);
sc->rl_res_type = SYS_RES_IOPORT;
sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
&sc->rl_res_id, RF_ACTIVE);
}
if (sc->rl_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
sc->rl_btag = rman_get_bustag(sc->rl_res);
sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
msic = pci_msi_count(dev);
msixc = pci_msix_count(dev);
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
sc->rl_flags |= RL_FLAG_PCIE;
sc->rl_expcap = reg;
}
if (bootverbose) {
device_printf(dev, "MSI count : %d\n", msic);
device_printf(dev, "MSI-X count : %d\n", msixc);
}
if (msix_disable > 0)
msixc = 0;
if (msi_disable > 0)
msic = 0;
/* Prefer MSI-X to MSI. */
if (msixc > 0) {
msixc = RL_MSI_MESSAGES;
rid = PCIR_BAR(4);
sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->rl_res_pba == NULL) {
device_printf(sc->rl_dev,
"could not allocate MSI-X PBA resource\n");
}
if (sc->rl_res_pba != NULL &&
pci_alloc_msix(dev, &msixc) == 0) {
if (msixc == RL_MSI_MESSAGES) {
device_printf(dev, "Using %d MSI-X message\n",
msixc);
sc->rl_flags |= RL_FLAG_MSIX;
} else
pci_release_msi(dev);
}
if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
if (sc->rl_res_pba != NULL)
bus_release_resource(dev, SYS_RES_MEMORY, rid,
sc->rl_res_pba);
sc->rl_res_pba = NULL;
msixc = 0;
}
}
/* Prefer MSI to INTx. */
if (msixc == 0 && msic > 0) {
msic = RL_MSI_MESSAGES;
if (pci_alloc_msi(dev, &msic) == 0) {
if (msic == RL_MSI_MESSAGES) {
device_printf(dev, "Using %d MSI message\n",
msic);
sc->rl_flags |= RL_FLAG_MSI;
/* Explicitly set MSI enable bit. */
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
cfg = CSR_READ_1(sc, RL_CFG2);
cfg |= RL_CFG2_MSI;
CSR_WRITE_1(sc, RL_CFG2, cfg);
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
} else
pci_release_msi(dev);
}
if ((sc->rl_flags & RL_FLAG_MSI) == 0)
msic = 0;
}
/* Allocate interrupt */
if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
rid = 0;
sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->rl_irq[0] == NULL) {
device_printf(dev, "couldn't allocate IRQ resources\n");
error = ENXIO;
goto fail;
}
} else {
for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
sc->rl_irq[i] = bus_alloc_resource_any(dev,
SYS_RES_IRQ, &rid, RF_ACTIVE);
if (sc->rl_irq[i] == NULL) {
device_printf(dev,
"couldn't allocate IRQ resources for "
"message %d\n", rid);
error = ENXIO;
goto fail;
}
}
}
if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
cfg = CSR_READ_1(sc, RL_CFG2);
if ((cfg & RL_CFG2_MSI) != 0) {
device_printf(dev, "turning off MSI enable bit.\n");
cfg &= ~RL_CFG2_MSI;
CSR_WRITE_1(sc, RL_CFG2, cfg);
}
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
}
/* Disable ASPM L0S/L1 and CLKREQ. */
if (sc->rl_expcap != 0) {
cap = pci_read_config(dev, sc->rl_expcap +
PCIER_LINK_CAP, 2);
if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
ctl = pci_read_config(dev, sc->rl_expcap +
PCIER_LINK_CTL, 2);
if ((ctl & (PCIEM_LINK_CTL_ECPM |
PCIEM_LINK_CTL_ASPMC))!= 0) {
ctl &= ~(PCIEM_LINK_CTL_ECPM |
PCIEM_LINK_CTL_ASPMC);
pci_write_config(dev, sc->rl_expcap +
PCIER_LINK_CTL, ctl, 2);
device_printf(dev, "ASPM disabled\n");
}
} else
device_printf(dev, "no ASPM capability\n");
}
hw_rev = re_hwrevs;
hwrev = CSR_READ_4(sc, RL_TXCFG);
switch (hwrev & 0x70000000) {
case 0x00000000:
case 0x10000000:
device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
hwrev &= (RL_TXCFG_HWREV | 0x80000000);
break;
default:
device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
sc->rl_macrev = hwrev & 0x00700000;
hwrev &= RL_TXCFG_HWREV;
break;
}
device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev);
while (hw_rev->rl_desc != NULL) {
if (hw_rev->rl_rev == hwrev) {
sc->rl_type = hw_rev->rl_type;
sc->rl_hwrev = hw_rev;
break;
}
hw_rev++;
}
if (hw_rev->rl_desc == NULL) {
device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
error = ENXIO;
goto fail;
}
switch (hw_rev->rl_rev) {
case RL_HWREV_8139CPLUS:
sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
break;
case RL_HWREV_8100E:
case RL_HWREV_8101E:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
break;
case RL_HWREV_8102E:
case RL_HWREV_8102EL:
case RL_HWREV_8102EL_SPIN1:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
RL_FLAG_AUTOPAD;
break;
case RL_HWREV_8103E:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
break;
case RL_HWREV_8401E:
case RL_HWREV_8105E:
case RL_HWREV_8105E_SPIN1:
case RL_HWREV_8106E:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
break;
case RL_HWREV_8402:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
RL_FLAG_CMDSTOP_WAIT_TXQ;
break;
case RL_HWREV_8168B_SPIN1:
case RL_HWREV_8168B_SPIN2:
sc->rl_flags |= RL_FLAG_WOLRXENB;
/* FALLTHROUGH */
case RL_HWREV_8168B_SPIN3:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
break;
case RL_HWREV_8168C_SPIN2:
sc->rl_flags |= RL_FLAG_MACSLEEP;
/* FALLTHROUGH */
case RL_HWREV_8168C:
if (sc->rl_macrev == 0x00200000)
sc->rl_flags |= RL_FLAG_MACSLEEP;
/* FALLTHROUGH */
case RL_HWREV_8168CP:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
break;
case RL_HWREV_8168D:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
RL_FLAG_WOL_MANLINK;
break;
case RL_HWREV_8168DP:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
break;
case RL_HWREV_8168E:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
RL_FLAG_WOL_MANLINK;
break;
case RL_HWREV_8168E_VL:
case RL_HWREV_8168F:
sc->rl_flags |= RL_FLAG_EARLYOFF;
/* FALLTHROUGH */
case RL_HWREV_8411:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
break;
case RL_HWREV_8168EP:
case RL_HWREV_8168FP:
case RL_HWREV_8168G:
case RL_HWREV_8411B:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK |
RL_FLAG_8168G_PLUS;
break;
case RL_HWREV_8168GU:
case RL_HWREV_8168H:
if (pci_get_device(dev) == RT_DEVICEID_8101E) {
/* RTL8106E(US), RTL8107E */
sc->rl_flags |= RL_FLAG_FASTETHER;
} else
sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
RL_FLAG_8168G_PLUS;
break;
case RL_HWREV_8169_8110SB:
case RL_HWREV_8169_8110SBL:
case RL_HWREV_8169_8110SC:
case RL_HWREV_8169_8110SCE:
sc->rl_flags |= RL_FLAG_PHYWAKE;
/* FALLTHROUGH */
case RL_HWREV_8169:
case RL_HWREV_8169S:
case RL_HWREV_8110S:
sc->rl_flags |= RL_FLAG_MACRESET;
break;
default:
break;
}
if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
sc->rl_cfg0 = RL_8139_CFG0;
sc->rl_cfg1 = RL_8139_CFG1;
sc->rl_cfg2 = 0;
sc->rl_cfg3 = RL_8139_CFG3;
sc->rl_cfg4 = RL_8139_CFG4;
sc->rl_cfg5 = RL_8139_CFG5;
} else {
sc->rl_cfg0 = RL_CFG0;
sc->rl_cfg1 = RL_CFG1;
sc->rl_cfg2 = RL_CFG2;
sc->rl_cfg3 = RL_CFG3;
sc->rl_cfg4 = RL_CFG4;
sc->rl_cfg5 = RL_CFG5;
}
/* Reset the adapter. */
RL_LOCK(sc);
re_reset(sc);
RL_UNLOCK(sc);
/* Enable PME. */
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
cfg = CSR_READ_1(sc, sc->rl_cfg1);
cfg |= RL_CFG1_PME;
CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
cfg = CSR_READ_1(sc, sc->rl_cfg5);
cfg &= RL_CFG5_PME_STS;
CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
/*
* XXX Should have a better way to extract station
* address from EEPROM.
*/
for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
} else {
sc->rl_eewidth = RL_9356_ADDR_LEN;
re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
if (re_did != 0x8129)
sc->rl_eewidth = RL_9346_ADDR_LEN;
/*
* Get station address from the EEPROM.
*/
re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
as[i] = le16toh(as[i]);
bcopy(as, eaddr, ETHER_ADDR_LEN);
}
if (sc->rl_type == RL_8169) {
/* Set RX length mask and number of descriptors. */
sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
sc->rl_txstart = RL_GTXSTART;
sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
} else {
/* Set RX length mask and number of descriptors. */
sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
sc->rl_txstart = RL_TXSTART;
sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
}
error = re_allocmem(dev, sc);
if (error)
goto fail;
re_add_sysctls(sc);
ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/* Take controller out of deep sleep mode. */
if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
CSR_WRITE_1(sc, RL_GPIO,
CSR_READ_1(sc, RL_GPIO) | 0x01);
else
CSR_WRITE_1(sc, RL_GPIO,
CSR_READ_1(sc, RL_GPIO) & ~0x01);
}
/* Take PHY out of power down mode. */
if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
if (hw_rev->rl_rev == RL_HWREV_8401E)
CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
}
if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
re_gmii_writereg(dev, 1, 0x1f, 0);
re_gmii_writereg(dev, 1, 0x0e, 0);
}
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, re_ioctl);
if_setstartfn(ifp, re_start);
/*
* RTL8168/8111C generates wrong IP checksummed frame if the
* packet has IP options so disable TX checksum offloading.
*/
if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) {
if_sethwassist(ifp, 0);
if_setcapabilities(ifp, IFCAP_RXCSUM | IFCAP_TSO4);
} else {
if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP);
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
}
if_sethwassistbits(ifp, CSUM_TSO, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setinitfn(ifp, re_init);
if_setsendqlen(ifp, RL_IFQ_MAXLEN);
if_setsendqready(ifp);
NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
#define RE_PHYAD_INTERNAL 0
/* Do MII setup. */
phy = RE_PHYAD_INTERNAL;
if (sc->rl_type == RL_8169)
phy = 1;
capmask = BMSR_DEFCAPMASK;
if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
capmask &= ~BMSR_EXTSTAT;
error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
/* If address was not found, create one based on the hostid and name. */
if (ETHER_IS_ZERO(eaddr)) {
ether_gen_addr(ifp, (struct ether_addr *)eaddr);
}
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/* VLAN capability setup */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0);
if (if_getcapabilities(ifp) & IFCAP_HWCSUM)
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
/* Enable WOL if PM is supported. */
if (pci_find_cap(sc->rl_dev, PCIY_PMG, &reg) == 0)
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setcapenablebit(ifp, 0, (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST));
/*
* Don't enable TSO by default. It is known to generate
* corrupted TCP segments(bad TCP options) under certain
* circumstances.
*/
if_sethwassistbits(ifp, 0, CSUM_TSO);
if_setcapenablebit(ifp, 0, (IFCAP_TSO4 | IFCAP_VLAN_HWTSO));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
#ifdef DEV_NETMAP
re_netmap_attach(sc);
#endif /* DEV_NETMAP */
#ifdef RE_DIAG
/*
* Perform hardware diagnostic on the original RTL8169.
* Some 32-bit cards were incorrectly wired and would
* malfunction if plugged into a 64-bit slot.
*/
if (hwrev == RL_HWREV_8169) {
error = re_diag(sc);
if (error) {
device_printf(dev,
"attach aborted due to hardware diag failure\n");
ether_ifdetach(ifp);
goto fail;
}
}
#endif
#ifdef RE_TX_MODERATION
intr_filter = 1;
#endif
/* Hook interrupt last to avoid having to lock softc */
if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
intr_filter == 0) {
error = bus_setup_intr(dev, sc->rl_irq[0],
INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
&sc->rl_intrhand[0]);
} else {
error = bus_setup_intr(dev, sc->rl_irq[0],
INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
&sc->rl_intrhand[0]);
}
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
DEBUGNET_SET(ifp, re);
fail:
if (error)
re_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
re_detach(device_t dev)
{
struct rl_softc *sc;
if_t ifp;
int i, rid;
sc = device_get_softc(dev);
ifp = sc->rl_ifp;
KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
RL_LOCK(sc);
#if 0
sc->suspended = 1;
#endif
re_stop(sc);
RL_UNLOCK(sc);
callout_drain(&sc->rl_stat_callout);
taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
/*
* Force off the IFF_UP flag here, in case someone
* still had a BPF descriptor attached to this
* interface. If they do, ether_ifdetach() will cause
* the BPF code to try and clear the promisc mode
* flag, which will bubble down to re_ioctl(),
* which will try to call re_init() again. This will
* turn the NIC back on and restart the MII ticker,
* which will panic the system when the kernel tries
* to invoke the re_tick() function that isn't there
* anymore.
*/
if_setflagbits(ifp, 0, IFF_UP);
ether_ifdetach(ifp);
}
if (sc->rl_miibus)
device_delete_child(dev, sc->rl_miibus);
bus_generic_detach(dev);
/*
* The rest is resource deallocation, so we should already be
* stopped here.
*/
if (sc->rl_intrhand[0] != NULL) {
bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
sc->rl_intrhand[0] = NULL;
}
if (ifp != NULL) {
#ifdef DEV_NETMAP
netmap_detach(ifp);
#endif /* DEV_NETMAP */
if_free(ifp);
}
if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
rid = 0;
else
rid = 1;
if (sc->rl_irq[0] != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
sc->rl_irq[0] = NULL;
}
if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
pci_release_msi(dev);
if (sc->rl_res_pba) {
rid = PCIR_BAR(4);
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
}
if (sc->rl_res)
bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
sc->rl_res);
/* Unload and free the RX DMA ring memory and map */
if (sc->rl_ldata.rl_rx_list_tag) {
if (sc->rl_ldata.rl_rx_list_addr)
bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map);
if (sc->rl_ldata.rl_rx_list)
bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list,
sc->rl_ldata.rl_rx_list_map);
bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
}
/* Unload and free the TX DMA ring memory and map */
if (sc->rl_ldata.rl_tx_list_tag) {
if (sc->rl_ldata.rl_tx_list_addr)
bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list_map);
if (sc->rl_ldata.rl_tx_list)
bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list,
sc->rl_ldata.rl_tx_list_map);
bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
}
/* Destroy all the RX and TX buffer maps */
if (sc->rl_ldata.rl_tx_mtag) {
for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
}
bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
}
if (sc->rl_ldata.rl_rx_mtag) {
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
}
if (sc->rl_ldata.rl_rx_sparemap)
bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
sc->rl_ldata.rl_rx_sparemap);
bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
}
if (sc->rl_ldata.rl_jrx_mtag) {
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
}
if (sc->rl_ldata.rl_jrx_sparemap)
bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
sc->rl_ldata.rl_jrx_sparemap);
bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
}
/* Unload and free the stats buffer and map */
if (sc->rl_ldata.rl_stag) {
if (sc->rl_ldata.rl_stats_addr)
bus_dmamap_unload(sc->rl_ldata.rl_stag,
sc->rl_ldata.rl_smap);
if (sc->rl_ldata.rl_stats)
bus_dmamem_free(sc->rl_ldata.rl_stag,
sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
}
if (sc->rl_parent_tag)
bus_dma_tag_destroy(sc->rl_parent_tag);
mtx_destroy(&sc->rl_mtx);
return (0);
}
static __inline void
re_discard_rxbuf(struct rl_softc *sc, int idx)
{
struct rl_desc *desc;
struct rl_rxdesc *rxd;
uint32_t cmdstat;
if (if_getmtu(sc->rl_ifp) > RL_MTU &&
(sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
rxd = &sc->rl_ldata.rl_jrx_desc[idx];
else
rxd = &sc->rl_ldata.rl_rx_desc[idx];
desc = &sc->rl_ldata.rl_rx_list[idx];
desc->rl_vlanctl = 0;
cmdstat = rxd->rx_size;
if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
cmdstat |= RL_RDESC_CMD_EOR;
desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
}
static int
re_newbuf(struct rl_softc *sc, int idx)
{
struct mbuf *m;
struct rl_rxdesc *rxd;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
struct rl_desc *desc;
uint32_t cmdstat;
int error, nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
#ifdef RE_FIXUP_RX
/*
* This is part of an evil trick to deal with non-x86 platforms.
* The RealTek chip requires RX buffers to be aligned on 64-bit
* boundaries, but that will hose non-x86 machines. To get around
* this, we leave some empty space at the start of each buffer
* and for non-x86 hosts, we copy the buffer back six bytes
* to achieve word alignment. This is slightly more efficient
* than allocating a new buffer, copying the contents, and
* discarding the old buffer.
*/
m_adj(m, RE_ETHER_ALIGN);
#endif
error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
rxd = &sc->rl_ldata.rl_rx_desc[idx];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
}
rxd->rx_m = m;
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
rxd->rx_size = segs[0].ds_len;
sc->rl_ldata.rl_rx_sparemap = map;
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
desc = &sc->rl_ldata.rl_rx_list[idx];
desc->rl_vlanctl = 0;
desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
cmdstat = segs[0].ds_len;
if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
cmdstat |= RL_RDESC_CMD_EOR;
desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
return (0);
}
static int
re_jumbo_newbuf(struct rl_softc *sc, int idx)
{
struct mbuf *m;
struct rl_rxdesc *rxd;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
struct rl_desc *desc;
uint32_t cmdstat;
int error, nsegs;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
#ifdef RE_FIXUP_RX
m_adj(m, RE_ETHER_ALIGN);
#endif
error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
rxd = &sc->rl_ldata.rl_jrx_desc[idx];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
}
rxd->rx_m = m;
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
rxd->rx_size = segs[0].ds_len;
sc->rl_ldata.rl_jrx_sparemap = map;
bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
desc = &sc->rl_ldata.rl_rx_list[idx];
desc->rl_vlanctl = 0;
desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
cmdstat = segs[0].ds_len;
if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
cmdstat |= RL_RDESC_CMD_EOR;
desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
return (0);
}
#ifdef RE_FIXUP_RX
static __inline void
re_fixup_rx(struct mbuf *m)
{
int i;
uint16_t *src, *dst;
src = mtod(m, uint16_t *);
dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
}
#endif
static int
re_tx_list_init(struct rl_softc *sc)
{
struct rl_desc *desc;
int i;
RL_LOCK_ASSERT(sc);
bzero(sc->rl_ldata.rl_tx_list,
sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
#ifdef DEV_NETMAP
re_netmap_tx_init(sc);
#endif /* DEV_NETMAP */
/* Set EOR. */
desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->rl_ldata.rl_tx_prodidx = 0;
sc->rl_ldata.rl_tx_considx = 0;
sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
return (0);
}
static int
re_rx_list_init(struct rl_softc *sc)
{
int error, i;
bzero(sc->rl_ldata.rl_rx_list,
sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
if ((error = re_newbuf(sc, i)) != 0)
return (error);
}
#ifdef DEV_NETMAP
re_netmap_rx_init(sc);
#endif /* DEV_NETMAP */
/* Flush the RX descriptors */
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
sc->rl_ldata.rl_rx_prodidx = 0;
sc->rl_head = sc->rl_tail = NULL;
sc->rl_int_rx_act = 0;
return (0);
}
static int
re_jrx_list_init(struct rl_softc *sc)
{
int error, i;
bzero(sc->rl_ldata.rl_rx_list,
sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
if ((error = re_jumbo_newbuf(sc, i)) != 0)
return (error);
}
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
sc->rl_ldata.rl_rx_prodidx = 0;
sc->rl_head = sc->rl_tail = NULL;
sc->rl_int_rx_act = 0;
return (0);
}
/*
* RX handler for C+ and 8169. For the gigE chips, we support
* the reception of jumbo frames that have been fragmented
* across multiple 2K mbuf cluster buffers.
*/
static int
re_rxeof(struct rl_softc *sc, int *rx_npktsp)
{
struct mbuf *m;
if_t ifp;
int i, rxerr, total_len;
struct rl_desc *cur_rx;
u_int32_t rxstat, rxvlan;
int jumbo, maxpkt = 16, rx_npkts = 0;
RL_LOCK_ASSERT(sc);
ifp = sc->rl_ifp;
#ifdef DEV_NETMAP
if (netmap_rx_irq(ifp, 0, &rx_npkts))
return 0;
#endif /* DEV_NETMAP */
if (if_getmtu(ifp) > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
jumbo = 1;
else
jumbo = 0;
/* Invalidate the descriptor memory */
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
i = RL_RX_DESC_NXT(sc, i)) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
cur_rx = &sc->rl_ldata.rl_rx_list[i];
rxstat = le32toh(cur_rx->rl_cmdstat);
if ((rxstat & RL_RDESC_STAT_OWN) != 0)
break;
total_len = rxstat & sc->rl_rxlenmask;
rxvlan = le32toh(cur_rx->rl_vlanctl);
if (jumbo != 0)
m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
else
m = sc->rl_ldata.rl_rx_desc[i].rx_m;
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
(rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
(RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
/*
* RTL8168C or later controllers do not
* support multi-fragment packet.
*/
re_discard_rxbuf(sc, i);
continue;
} else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
if (re_newbuf(sc, i) != 0) {
/*
* If this is part of a multi-fragment packet,
* discard all the pieces.
*/
if (sc->rl_head != NULL) {
m_freem(sc->rl_head);
sc->rl_head = sc->rl_tail = NULL;
}
re_discard_rxbuf(sc, i);
continue;
}
m->m_len = RE_RX_DESC_BUFLEN;
if (sc->rl_head == NULL)
sc->rl_head = sc->rl_tail = m;
else {
m->m_flags &= ~M_PKTHDR;
sc->rl_tail->m_next = m;
sc->rl_tail = m;
}
continue;
}
/*
* NOTE: for the 8139C+, the frame length field
* is always 12 bits in size, but for the gigE chips,
* it is 13 bits (since the max RX frame length is 16K).
* Unfortunately, all 32 bits in the status word
* were already used, so to make room for the extra
* length bit, RealTek took out the 'frame alignment
* error' bit and shifted the other status bits
* over one slot. The OWN, EOR, FS and LS bits are
* still in the same places. We have already extracted
* the frame length and checked the OWN bit, so rather
* than using an alternate bit mapping, we shift the
* status bits one space to the right so we can evaluate
* them using the 8169 status as though it was in the
* same format as that of the 8139C+.
*/
if (sc->rl_type == RL_8169)
rxstat >>= 1;
/*
* if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
* set, but if CRC is clear, it will still be a valid frame.
*/
if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
rxerr = 1;
if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
total_len > 8191 &&
(rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
rxerr = 0;
if (rxerr != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
/*
* If this is part of a multi-fragment packet,
* discard all the pieces.
*/
if (sc->rl_head != NULL) {
m_freem(sc->rl_head);
sc->rl_head = sc->rl_tail = NULL;
}
re_discard_rxbuf(sc, i);
continue;
}
}
/*
* If allocating a replacement mbuf fails,
* reload the current one.
*/
if (jumbo != 0)
rxerr = re_jumbo_newbuf(sc, i);
else
rxerr = re_newbuf(sc, i);
if (rxerr != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
if (sc->rl_head != NULL) {
m_freem(sc->rl_head);
sc->rl_head = sc->rl_tail = NULL;
}
re_discard_rxbuf(sc, i);
continue;
}
if (sc->rl_head != NULL) {
if (jumbo != 0)
m->m_len = total_len;
else {
m->m_len = total_len % RE_RX_DESC_BUFLEN;
if (m->m_len == 0)
m->m_len = RE_RX_DESC_BUFLEN;
}
/*
* Special case: if there's 4 bytes or less
* in this buffer, the mbuf can be discarded:
* the last 4 bytes is the CRC, which we don't
* care about anyway.
*/
if (m->m_len <= ETHER_CRC_LEN) {
sc->rl_tail->m_len -=
(ETHER_CRC_LEN - m->m_len);
m_freem(m);
} else {
m->m_len -= ETHER_CRC_LEN;
m->m_flags &= ~M_PKTHDR;
sc->rl_tail->m_next = m;
}
m = sc->rl_head;
sc->rl_head = sc->rl_tail = NULL;
m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
} else
m->m_pkthdr.len = m->m_len =
(total_len - ETHER_CRC_LEN);
#ifdef RE_FIXUP_RX
re_fixup_rx(m);
#endif
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
/* Do RX checksumming if enabled */
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
/* Check IP header checksum */
if (rxstat & RL_RDESC_STAT_PROTOID)
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED;
if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
m->m_pkthdr.csum_flags |=
CSUM_IP_VALID;
/* Check TCP/UDP checksum */
if ((RL_TCPPKT(rxstat) &&
!(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
(RL_UDPPKT(rxstat) &&
!(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
} else {
/*
* RTL8168C/RTL816CP/RTL8111C/RTL8111CP
*/
if ((rxstat & RL_RDESC_STAT_PROTOID) &&
(rxvlan & RL_RDESC_IPV4))
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED;
if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
(rxvlan & RL_RDESC_IPV4))
m->m_pkthdr.csum_flags |=
CSUM_IP_VALID;
if (((rxstat & RL_RDESC_STAT_TCP) &&
!(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
((rxstat & RL_RDESC_STAT_UDP) &&
!(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
}
maxpkt--;
if (rxvlan & RL_RDESC_VLANCTL_TAG) {
m->m_pkthdr.ether_vtag =
bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
m->m_flags |= M_VLANTAG;
}
RL_UNLOCK(sc);
if_input(ifp, m);
RL_LOCK(sc);
rx_npkts++;
}
/* Flush the RX DMA ring */
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
sc->rl_ldata.rl_rx_prodidx = i;
if (rx_npktsp != NULL)
*rx_npktsp = rx_npkts;
if (maxpkt)
return (EAGAIN);
return (0);
}
static void
re_txeof(struct rl_softc *sc)
{
if_t ifp;
struct rl_txdesc *txd;
u_int32_t txstat;
int cons;
cons = sc->rl_ldata.rl_tx_considx;
if (cons == sc->rl_ldata.rl_tx_prodidx)
return;
ifp = sc->rl_ifp;
#ifdef DEV_NETMAP
if (netmap_tx_irq(ifp, 0))
return;
#endif /* DEV_NETMAP */
/* Invalidate the TX descriptor list */
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (; cons != sc->rl_ldata.rl_tx_prodidx;
cons = RL_TX_DESC_NXT(sc, cons)) {
txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
if (txstat & RL_TDESC_STAT_OWN)
break;
/*
* We only stash mbufs in the last descriptor
* in a fragment chain, which also happens to
* be the only place where the TX status bits
* are valid.
*/
if (txstat & RL_TDESC_CMD_EOF) {
txd = &sc->rl_ldata.rl_tx_desc[cons];
bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
txd->tx_dmamap);
KASSERT(txd->tx_m != NULL,
("%s: freeing NULL mbufs!", __func__));
m_freem(txd->tx_m);
txd->tx_m = NULL;
if (txstat & (RL_TDESC_STAT_EXCESSCOL|
RL_TDESC_STAT_COLCNT))
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
if (txstat & RL_TDESC_STAT_TXERRSUM)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
sc->rl_ldata.rl_tx_free++;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
sc->rl_ldata.rl_tx_considx = cons;
/* No changes made to the TX ring, so no flush needed */
if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
#ifdef RE_TX_MODERATION
/*
* If not all descriptors have been reaped yet, reload
* the timer so that we will eventually get another
* interrupt that will cause us to re-enter this routine.
* This is done in case the transmitter has gone idle.
*/
CSR_WRITE_4(sc, RL_TIMERCNT, 1);
#endif
} else
sc->rl_watchdog_timer = 0;
}
static void
re_tick(void *xsc)
{
struct rl_softc *sc;
struct mii_data *mii;
sc = xsc;
RL_LOCK_ASSERT(sc);
mii = device_get_softc(sc->rl_miibus);
mii_tick(mii);
if ((sc->rl_flags & RL_FLAG_LINK) == 0)
re_miibus_statchg(sc->rl_dev);
/*
* Reclaim transmitted frames here. Technically it is not
* necessary to do here but it ensures periodic reclamation
* regardless of Tx completion interrupt which seems to be
* lost on PCIe based controllers under certain situations.
*/
re_txeof(sc);
re_watchdog(sc);
callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
}
#ifdef DEVICE_POLLING
static int
re_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct rl_softc *sc = if_getsoftc(ifp);
int rx_npkts = 0;
RL_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rx_npkts = re_poll_locked(ifp, cmd, count);
RL_UNLOCK(sc);
return (rx_npkts);
}
static int
re_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
{
struct rl_softc *sc = if_getsoftc(ifp);
int rx_npkts;
RL_LOCK_ASSERT(sc);
sc->rxcycles = count;
re_rxeof(sc, &rx_npkts);
re_txeof(sc);
if (!if_sendq_empty(ifp))
re_start_locked(ifp);
if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
u_int16_t status;
status = CSR_READ_2(sc, RL_ISR);
if (status == 0xffff)
return (rx_npkts);
if (status)
CSR_WRITE_2(sc, RL_ISR, status);
if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
(sc->rl_flags & RL_FLAG_PCIE))
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
/*
* XXX check behaviour on receiver stalls.
*/
if (status & RL_ISR_SYSTEM_ERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
}
}
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static int
re_intr(void *arg)
{
struct rl_softc *sc;
uint16_t status;
sc = arg;
status = CSR_READ_2(sc, RL_ISR);
if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
return (FILTER_STRAY);
CSR_WRITE_2(sc, RL_IMR, 0);
taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
return (FILTER_HANDLED);
}
static void
re_int_task(void *arg, int npending)
{
struct rl_softc *sc;
if_t ifp;
u_int16_t status;
int rval = 0;
sc = arg;
ifp = sc->rl_ifp;
RL_LOCK(sc);
status = CSR_READ_2(sc, RL_ISR);
CSR_WRITE_2(sc, RL_ISR, status);
if (sc->suspended ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
RL_UNLOCK(sc);
return;
}
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
RL_UNLOCK(sc);
return;
}
#endif
if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
rval = re_rxeof(sc, NULL);
/*
* Some chips will ignore a second TX request issued
* while an existing transmission is in progress. If
* the transmitter goes idle but there are still
* packets waiting to be sent, we need to restart the
* channel here to flush them out. This only seems to
* be required with the PCIe devices.
*/
if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
(sc->rl_flags & RL_FLAG_PCIE))
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
if (status & (
#ifdef RE_TX_MODERATION
RL_ISR_TIMEOUT_EXPIRED|
#else
RL_ISR_TX_OK|
#endif
RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
re_txeof(sc);
if (status & RL_ISR_SYSTEM_ERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
}
if (!if_sendq_empty(ifp))
re_start_locked(ifp);
RL_UNLOCK(sc);
if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
return;
}
CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
}
static void
re_intr_msi(void *xsc)
{
struct rl_softc *sc;
if_t ifp;
uint16_t intrs, status;
sc = xsc;
RL_LOCK(sc);
ifp = sc->rl_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
RL_UNLOCK(sc);
return;
}
#endif
/* Disable interrupts. */
CSR_WRITE_2(sc, RL_IMR, 0);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
RL_UNLOCK(sc);
return;
}
intrs = RL_INTRS_CPLUS;
status = CSR_READ_2(sc, RL_ISR);
CSR_WRITE_2(sc, RL_ISR, status);
if (sc->rl_int_rx_act > 0) {
intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
RL_ISR_RX_OVERRUN);
status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
RL_ISR_RX_OVERRUN);
}
if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
re_rxeof(sc, NULL);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (sc->rl_int_rx_mod != 0 &&
(status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
/* Rearm one-shot timer. */
CSR_WRITE_4(sc, RL_TIMERCNT, 1);
intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
sc->rl_int_rx_act = 1;
} else {
intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
sc->rl_int_rx_act = 0;
}
}
}
/*
* Some chips will ignore a second TX request issued
* while an existing transmission is in progress. If
* the transmitter goes idle but there are still
* packets waiting to be sent, we need to restart the
* channel here to flush them out. This only seems to
* be required with the PCIe devices.
*/
if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
(sc->rl_flags & RL_FLAG_PCIE))
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
re_txeof(sc);
if (status & RL_ISR_SYSTEM_ERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
}
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (!if_sendq_empty(ifp))
re_start_locked(ifp);
CSR_WRITE_2(sc, RL_IMR, intrs);
}
RL_UNLOCK(sc);
}
static int
re_encap(struct rl_softc *sc, struct mbuf **m_head)
{
struct rl_txdesc *txd, *txd_last;
bus_dma_segment_t segs[RL_NTXSEGS];
bus_dmamap_t map;
struct mbuf *m_new;
struct rl_desc *desc;
int nsegs, prod;
int i, error, ei, si;
int padlen;
uint32_t cmdstat, csum_flags, vlanctl;
RL_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
/*
* With some of the RealTek chips, using the checksum offload
* support in conjunction with the autopadding feature results
* in the transmission of corrupt frames. For example, if we
* need to send a really small IP fragment that's less than 60
* bytes in size, and IP header checksumming is enabled, the
* resulting ethernet frame that appears on the wire will
* have garbled payload. To work around this, if TX IP checksum
* offload is enabled, we always manually pad short frames out
* to the minimum ethernet frame size.
*/
if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
(*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m_new = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m_new == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m_new;
}
if ((*m_head)->m_next != NULL ||
M_TRAILINGSPACE(*m_head) < padlen) {
m_new = m_defrag(*m_head, M_NOWAIT);
if (m_new == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
} else
m_new = *m_head;
/*
* Manually pad short frames, and zero the pad space
* to avoid leaking data.
*/
bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
m_new->m_pkthdr.len += padlen;
m_new->m_len = m_new->m_pkthdr.len;
*m_head = m_new;
}
prod = sc->rl_ldata.rl_tx_prodidx;
txd = &sc->rl_ldata.rl_tx_desc[prod];
error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
*m_head, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
if (m_new == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m_new;
error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check for number of available descriptors. */
if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
return (ENOBUFS);
}
bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
/*
* Set up checksum offload. Note: checksum offload bits must
* appear in all descriptors of a multi-descriptor transmit
* attempt. This is according to testing done with an 8169
* chip. This is a requirement.
*/
vlanctl = 0;
csum_flags = 0;
if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
csum_flags |= RL_TDESC_CMD_LGSEND;
vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
RL_TDESC_CMD_MSSVALV2_SHIFT);
} else {
csum_flags |= RL_TDESC_CMD_LGSEND |
((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
RL_TDESC_CMD_MSSVAL_SHIFT);
}
} else {
/*
* Unconditionally enable IP checksum if TCP or UDP
* checksum is required. Otherwise, TCP/UDP checksum
* doesn't make effects.
*/
if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
csum_flags |= RL_TDESC_CMD_IPCSUM;
if (((*m_head)->m_pkthdr.csum_flags &
CSUM_TCP) != 0)
csum_flags |= RL_TDESC_CMD_TCPCSUM;
if (((*m_head)->m_pkthdr.csum_flags &
CSUM_UDP) != 0)
csum_flags |= RL_TDESC_CMD_UDPCSUM;
} else {
vlanctl |= RL_TDESC_CMD_IPCSUMV2;
if (((*m_head)->m_pkthdr.csum_flags &
CSUM_TCP) != 0)
vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
if (((*m_head)->m_pkthdr.csum_flags &
CSUM_UDP) != 0)
vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
}
}
}
/*
* Set up hardware VLAN tagging. Note: vlan tag info must
* appear in all descriptors of a multi-descriptor
* transmission attempt.
*/
if ((*m_head)->m_flags & M_VLANTAG)
vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
RL_TDESC_VLANCTL_TAG;
si = prod;
for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
desc = &sc->rl_ldata.rl_tx_list[prod];
desc->rl_vlanctl = htole32(vlanctl);
desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
cmdstat = segs[i].ds_len;
if (i != 0)
cmdstat |= RL_TDESC_CMD_OWN;
if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
cmdstat |= RL_TDESC_CMD_EOR;
desc->rl_cmdstat = htole32(cmdstat | csum_flags);
sc->rl_ldata.rl_tx_free--;
}
/* Update producer index. */
sc->rl_ldata.rl_tx_prodidx = prod;
/* Set EOF on the last descriptor. */
ei = RL_TX_DESC_PRV(sc, prod);
desc = &sc->rl_ldata.rl_tx_list[ei];
desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
desc = &sc->rl_ldata.rl_tx_list[si];
/* Set SOF and transfer ownership of packet to the chip. */
desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
/*
* Insure that the map for this transmission
* is placed at the array index of the last descriptor
* in this chain. (Swap last and first dmamaps.)
*/
txd_last = &sc->rl_ldata.rl_tx_desc[ei];
map = txd->tx_dmamap;
txd->tx_dmamap = txd_last->tx_dmamap;
txd_last->tx_dmamap = map;
txd_last->tx_m = *m_head;
return (0);
}
static void
re_start(if_t ifp)
{
struct rl_softc *sc;
sc = if_getsoftc(ifp);
RL_LOCK(sc);
re_start_locked(ifp);
RL_UNLOCK(sc);
}
/*
* Main transmit routine for C+ and gigE NICs.
*/
static void
re_start_locked(if_t ifp)
{
struct rl_softc *sc;
struct mbuf *m_head;
int queued;
sc = if_getsoftc(ifp);
#ifdef DEV_NETMAP
/* XXX is this necessary ? */
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
struct netmap_kring *kring = NA(ifp)->tx_rings[0];
if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
/* kick the tx unit */
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
#ifdef RE_TX_MODERATION
CSR_WRITE_4(sc, RL_TIMERCNT, 1);
#endif
sc->rl_watchdog_timer = 5;
}
return;
}
#endif /* DEV_NETMAP */
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
return;
for (queued = 0; !if_sendq_empty(ifp) &&
sc->rl_ldata.rl_tx_free > 1;) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
if (re_encap(sc, &m_head) != 0) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
queued++;
}
if (queued == 0) {
#ifdef RE_TX_MODERATION
if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
CSR_WRITE_4(sc, RL_TIMERCNT, 1);
#endif
return;
}
re_start_tx(sc);
}
static void
re_start_tx(struct rl_softc *sc)
{
/* Flush the TX descriptors */
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
sc->rl_ldata.rl_tx_list_map,
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
#ifdef RE_TX_MODERATION
/*
* Use the countdown timer for interrupt moderation.
* 'TX done' interrupts are disabled. Instead, we reset the
* countdown timer, which will begin counting until it hits
* the value in the TIMERINT register, and then trigger an
* interrupt. Each time we write to the TIMERCNT register,
* the timer count is reset to 0.
*/
CSR_WRITE_4(sc, RL_TIMERCNT, 1);
#endif
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->rl_watchdog_timer = 5;
}
static void
re_set_jumbo(struct rl_softc *sc, int jumbo)
{
if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
pci_set_max_read_req(sc->rl_dev, 4096);
return;
}
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
if (jumbo != 0) {
CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
RL_CFG3_JUMBO_EN0);
switch (sc->rl_hwrev->rl_rev) {
case RL_HWREV_8168DP:
break;
case RL_HWREV_8168E:
CSR_WRITE_1(sc, sc->rl_cfg4,
CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
break;
default:
CSR_WRITE_1(sc, sc->rl_cfg4,
CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
}
} else {
CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
~RL_CFG3_JUMBO_EN0);
switch (sc->rl_hwrev->rl_rev) {
case RL_HWREV_8168DP:
break;
case RL_HWREV_8168E:
CSR_WRITE_1(sc, sc->rl_cfg4,
CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
break;
default:
CSR_WRITE_1(sc, sc->rl_cfg4,
CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
}
}
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
switch (sc->rl_hwrev->rl_rev) {
case RL_HWREV_8168DP:
pci_set_max_read_req(sc->rl_dev, 4096);
break;
default:
if (jumbo != 0)
pci_set_max_read_req(sc->rl_dev, 512);
else
pci_set_max_read_req(sc->rl_dev, 4096);
}
}
static void
re_init(void *xsc)
{
struct rl_softc *sc = xsc;
RL_LOCK(sc);
re_init_locked(sc);
RL_UNLOCK(sc);
}
static void
re_init_locked(struct rl_softc *sc)
{
if_t ifp = sc->rl_ifp;
struct mii_data *mii;
uint32_t reg;
uint16_t cfg;
uint32_t idr[2];
RL_LOCK_ASSERT(sc);
mii = device_get_softc(sc->rl_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
re_stop(sc);
/* Put controller into known state. */
re_reset(sc);
/*
* For C+ mode, initialize the RX descriptors and mbufs.
*/
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
if (if_getmtu(ifp) > RL_MTU) {
if (re_jrx_list_init(sc) != 0) {
device_printf(sc->rl_dev,
"no memory for jumbo RX buffers\n");
re_stop(sc);
return;
}
/* Disable checksum offloading for jumbo frames. */
if_setcapenablebit(ifp, 0, (IFCAP_HWCSUM | IFCAP_TSO4));
if_sethwassistbits(ifp, 0, (RE_CSUM_FEATURES | CSUM_TSO));
} else {
if (re_rx_list_init(sc) != 0) {
device_printf(sc->rl_dev,
"no memory for RX buffers\n");
re_stop(sc);
return;
}
}
re_set_jumbo(sc, if_getmtu(ifp) > RL_MTU);
} else {
if (re_rx_list_init(sc) != 0) {
device_printf(sc->rl_dev, "no memory for RX buffers\n");
re_stop(sc);
return;
}
if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
if (if_getmtu(ifp) > RL_MTU)
pci_set_max_read_req(sc->rl_dev, 512);
else
pci_set_max_read_req(sc->rl_dev, 4096);
}
}
re_tx_list_init(sc);
/*
* Enable C+ RX and TX mode, as well as VLAN stripping and
* RX checksum offload. We must configure the C+ register
* before all others.
*/
cfg = RL_CPLUSCMD_PCI_MRW;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
cfg |= RL_CPLUSCMD_RXCSUM_ENB;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
cfg |= RL_CPLUSCMD_VLANSTRIP;
if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
cfg |= RL_CPLUSCMD_MACSTAT_DIS;
/* XXX magic. */
cfg |= 0x0001;
} else
cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
reg = 0x000fff00;
if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
reg |= 0x000000ff;
if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
reg |= 0x00f00000;
CSR_WRITE_4(sc, 0x7c, reg);
/* Disable interrupt mitigation. */
CSR_WRITE_2(sc, 0xe2, 0);
}
/*
* Disable TSO if interface MTU size is greater than MSS
* allowed in controller.
*/
if (if_getmtu(ifp) > RL_TSO_MTU && (if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
/*
* Init our MAC address. Even though the chipset
* documentation doesn't mention it, we need to enter "Config
* register write enable" mode to modify the ID registers.
*/
/* Copy MAC address on stack to align. */
bzero(idr, sizeof(idr));
bcopy(if_getlladdr(ifp), idr, ETHER_ADDR_LEN);
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
CSR_WRITE_4(sc, RL_IDR0, htole32(idr[0]));
CSR_WRITE_4(sc, RL_IDR4, htole32(idr[1]));
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
/*
* Load the addresses of the RX and TX lists into the chip.
*/
CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
/* Disable RXDV gate. */
CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
~0x00080000);
}
/*
* Enable transmit and receive for pre-RTL8168G controllers.
* RX/TX MACs should be enabled before RX/TX configuration.
*/
if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0)
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
/*
* Set the initial TX configuration.
*/
if (sc->rl_testmode) {
if (sc->rl_type == RL_8169)
CSR_WRITE_4(sc, RL_TXCFG,
RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
else
CSR_WRITE_4(sc, RL_TXCFG,
RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
} else
CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
/*
* Set the initial RX configuration.
*/
re_set_rxmode(sc);
/* Configure interrupt moderation. */
if (sc->rl_type == RL_8169) {
/* Magic from vendor. */
CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
}
/*
* Enable transmit and receive for RTL8168G and later controllers.
* RX/TX MACs should be enabled after RX/TX configuration.
*/
if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
#ifdef DEVICE_POLLING
/*
* Disable interrupts if we are polling.
*/
if (if_getcapenable(ifp) & IFCAP_POLLING)
CSR_WRITE_2(sc, RL_IMR, 0);
else /* otherwise ... */
#endif
/*
* Enable interrupts.
*/
if (sc->rl_testmode)
CSR_WRITE_2(sc, RL_IMR, 0);
else
CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
/* Set initial TX threshold */
sc->rl_txthresh = RL_TX_THRESH_INIT;
/* Start RX/TX process. */
CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
/*
* Initialize the timer interrupt register so that
* a timer interrupt will be generated once the timer
* reaches a certain number of ticks. The timer is
* reloaded on each transmit.
*/
#ifdef RE_TX_MODERATION
/*
* Use timer interrupt register to moderate TX interrupt
* moderation, which dramatically improves TX frame rate.
*/
if (sc->rl_type == RL_8169)
CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
else
CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
#else
/*
* Use timer interrupt register to moderate RX interrupt
* moderation.
*/
if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
intr_filter == 0) {
if (sc->rl_type == RL_8169)
CSR_WRITE_4(sc, RL_TIMERINT_8169,
RL_USECS(sc->rl_int_rx_mod));
} else {
if (sc->rl_type == RL_8169)
CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
}
#endif
/*
* For 8169 gigE NICs, set the max allowed RX packet
* size so we can receive jumbo frames.
*/
if (sc->rl_type == RL_8169) {
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
/*
* For controllers that use new jumbo frame scheme,
* set maximum size of jumbo frame depending on
* controller revisions.
*/
if (if_getmtu(ifp) > RL_MTU)
CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
sc->rl_hwrev->rl_max_mtu +
ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
ETHER_CRC_LEN);
else
CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
RE_RX_DESC_BUFLEN);
} else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
sc->rl_hwrev->rl_max_mtu == RL_MTU) {
/* RTL810x has no jumbo frame support. */
CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
} else
CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
}
if (sc->rl_testmode)
return;
CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
RL_CFG1_DRVLOAD);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->rl_flags &= ~RL_FLAG_LINK;
mii_mediachg(mii);
sc->rl_watchdog_timer = 0;
callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
#ifdef DEV_NETMAP
netmap_enable_all_rings(ifp);
#endif /* DEV_NETMAP */
}
/*
* Set media options.
*/
static int
re_ifmedia_upd(if_t ifp)
{
struct rl_softc *sc;
struct mii_data *mii;
int error;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->rl_miibus);
RL_LOCK(sc);
error = mii_mediachg(mii);
RL_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
re_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct rl_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->rl_miibus);
RL_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
RL_UNLOCK(sc);
}
static int
re_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct rl_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int error = 0;
switch (command) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN ||
ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu ||
((sc->rl_flags & RL_FLAG_FASTETHER) != 0 &&
ifr->ifr_mtu > RL_MTU)) {
error = EINVAL;
break;
}
RL_LOCK(sc);
if (if_getmtu(ifp) != ifr->ifr_mtu) {
if_setmtu(ifp, ifr->ifr_mtu);
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
}
if (if_getmtu(ifp) > RL_TSO_MTU &&
(if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
if_setcapenablebit(ifp, 0,
IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
VLAN_CAPABILITIES(ifp);
}
RL_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
RL_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->rl_if_flags)
& (IFF_PROMISC | IFF_ALLMULTI)) != 0)
re_set_rxmode(sc);
} else
re_init_locked(sc);
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
re_stop(sc);
}
sc->rl_if_flags = if_getflags(ifp);
RL_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
RL_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
re_set_rxmode(sc);
RL_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->rl_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
{
int mask, reinit;
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
reinit = 0;
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(re_poll, ifp);
if (error)
return (error);
RL_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, RL_IMR, 0x0000);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
RL_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
RL_LOCK(sc);
CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
RL_UNLOCK(sc);
}
}
#endif /* DEVICE_POLLING */
RL_LOCK(sc);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, RE_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, RE_CSUM_FEATURES);
reinit = 1;
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
reinit = 1;
}
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
if (if_getmtu(ifp) > RL_TSO_MTU &&
(if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
}
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
/* TSO over VLAN requires VLAN hardware tagging. */
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
reinit = 1;
}
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
(mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
IFCAP_VLAN_HWTSO)) != 0)
reinit = 1;
if ((mask & IFCAP_WOL) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
if ((mask & IFCAP_WOL_UCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_UCAST);
if ((mask & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
}
if (reinit && if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
}
RL_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
}
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
re_watchdog(struct rl_softc *sc)
{
if_t ifp;
RL_LOCK_ASSERT(sc);
if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
return;
ifp = sc->rl_ifp;
re_txeof(sc);
if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
"-- recovering\n");
if (!if_sendq_empty(ifp))
re_start_locked(ifp);
return;
}
if_printf(ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
re_rxeof(sc, NULL);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
if (!if_sendq_empty(ifp))
re_start_locked(ifp);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
re_stop(struct rl_softc *sc)
{
int i;
if_t ifp;
struct rl_txdesc *txd;
struct rl_rxdesc *rxd;
RL_LOCK_ASSERT(sc);
ifp = sc->rl_ifp;
sc->rl_watchdog_timer = 0;
callout_stop(&sc->rl_stat_callout);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
#ifdef DEV_NETMAP
netmap_disable_all_rings(ifp);
#endif /* DEV_NETMAP */
/*
* Disable accepting frames to put RX MAC into idle state.
* Otherwise it's possible to get frames while stop command
* execution is in progress and controller can DMA the frame
* to already freed RX buffer during that period.
*/
CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
RL_RXCFG_RX_BROAD));
if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
/* Enable RXDV gate. */
CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) |
0x00080000);
}
if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
for (i = RL_TIMEOUT; i > 0; i--) {
if ((CSR_READ_1(sc, sc->rl_txstart) &
RL_TXSTART_START) == 0)
break;
DELAY(20);
}
if (i == 0)
device_printf(sc->rl_dev,
"stopping TX poll timed out!\n");
CSR_WRITE_1(sc, RL_COMMAND, 0x00);
} else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
RL_CMD_RX_ENB);
if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
for (i = RL_TIMEOUT; i > 0; i--) {
if ((CSR_READ_4(sc, RL_TXCFG) &
RL_TXCFG_QUEUE_EMPTY) != 0)
break;
DELAY(100);
}
if (i == 0)
device_printf(sc->rl_dev,
"stopping TXQ timed out!\n");
}
} else
CSR_WRITE_1(sc, RL_COMMAND, 0x00);
DELAY(1000);
CSR_WRITE_2(sc, RL_IMR, 0x0000);
CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
if (sc->rl_head != NULL) {
m_freem(sc->rl_head);
sc->rl_head = sc->rl_tail = NULL;
}
/* Free the TX list buffers. */
for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
txd = &sc->rl_ldata.rl_tx_desc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
/* Free the RX list buffers. */
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
rxd = &sc->rl_ldata.rl_rx_desc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
rxd = &sc->rl_ldata.rl_jrx_desc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
}
}
/*
* Device suspend routine. Stop the interface and save some PCI
* settings in case the BIOS doesn't restore them properly on
* resume.
*/
static int
re_suspend(device_t dev)
{
struct rl_softc *sc;
sc = device_get_softc(dev);
RL_LOCK(sc);
re_stop(sc);
re_setwol(sc);
sc->suspended = 1;
RL_UNLOCK(sc);
return (0);
}
/*
* Device resume routine. Restore some PCI settings in case the BIOS
* doesn't, re-enable busmastering, and restart the interface if
* appropriate.
*/
static int
re_resume(device_t dev)
{
struct rl_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
RL_LOCK(sc);
ifp = sc->rl_ifp;
/* Take controller out of sleep mode. */
if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
CSR_WRITE_1(sc, RL_GPIO,
CSR_READ_1(sc, RL_GPIO) | 0x01);
}
/*
* Clear WOL matching such that normal Rx filtering
* wouldn't interfere with WOL patterns.
*/
re_clrwol(sc);
/* reinitialize interface if necessary */
if (if_getflags(ifp) & IFF_UP)
re_init_locked(sc);
sc->suspended = 0;
RL_UNLOCK(sc);
return (0);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
re_shutdown(device_t dev)
{
struct rl_softc *sc;
sc = device_get_softc(dev);
RL_LOCK(sc);
re_stop(sc);
/*
* Mark interface as down since otherwise we will panic if
* interrupt comes in later on, which can happen in some
* cases.
*/
if_setflagbits(sc->rl_ifp, 0, IFF_UP);
re_setwol(sc);
RL_UNLOCK(sc);
return (0);
}
static void
re_set_linkspeed(struct rl_softc *sc)
{
struct mii_softc *miisc;
struct mii_data *mii;
int aneg, i, phyno;
RL_LOCK_ASSERT(sc);
mii = device_get_softc(sc->rl_miibus);
mii_pollstat(mii);
aneg = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch IFM_SUBTYPE(mii->mii_media_active) {
case IFM_10_T:
case IFM_100_TX:
return;
case IFM_1000_T:
aneg++;
break;
default:
break;
}
}
miisc = LIST_FIRST(&mii->mii_phys);
phyno = miisc->mii_phy;
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
re_miibus_writereg(sc->rl_dev, phyno,
MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
re_miibus_writereg(sc->rl_dev, phyno,
MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
DELAY(1000);
if (aneg != 0) {
/*
* Poll link state until re(4) get a 10/100Mbps link.
*/
for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
mii_pollstat(mii);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
== (IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
return;
default:
break;
}
}
RL_UNLOCK(sc);
pause("relnk", hz);
RL_LOCK(sc);
}
if (i == MII_ANEGTICKS_GIGE)
device_printf(sc->rl_dev,
"establishing a link failed, WOL may not work!");
}
/*
* No link, force MAC to have 100Mbps, full-duplex link.
* MAC does not require reprogramming on resolved speed/duplex,
* so this is just for completeness.
*/
mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
}
static void
re_setwol(struct rl_softc *sc)
{
if_t ifp;
int pmc;
uint16_t pmstat;
uint8_t v;
RL_LOCK_ASSERT(sc);
if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
return;
ifp = sc->rl_ifp;
/* Put controller into sleep mode. */
if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
CSR_WRITE_1(sc, RL_GPIO,
CSR_READ_1(sc, RL_GPIO) & ~0x01);
}
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
/* Disable RXDV gate. */
CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
~0x00080000);
}
re_set_rxmode(sc);
if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
re_set_linkspeed(sc);
if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
}
/* Enable config register write. */
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
/* Enable PME. */
v = CSR_READ_1(sc, sc->rl_cfg1);
v &= ~RL_CFG1_PME;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
v |= RL_CFG1_PME;
CSR_WRITE_1(sc, sc->rl_cfg1, v);
v = CSR_READ_1(sc, sc->rl_cfg3);
v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
v |= RL_CFG3_WOL_MAGIC;
CSR_WRITE_1(sc, sc->rl_cfg3, v);
v = CSR_READ_1(sc, sc->rl_cfg5);
v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
RL_CFG5_WOL_LANWAKE);
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
v |= RL_CFG5_WOL_UCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
v |= RL_CFG5_WOL_LANWAKE;
CSR_WRITE_1(sc, sc->rl_cfg5, v);
/* Config register write done. */
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0 &&
(sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
/*
* It seems that hardware resets its link speed to 100Mbps in
* power down mode so switching to 100Mbps in driver is not
* needed.
*/
/* Request PME if WOL is requested. */
pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
static void
re_clrwol(struct rl_softc *sc)
{
int pmc;
uint8_t v;
RL_LOCK_ASSERT(sc);
if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
return;
/* Enable config register write. */
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
v = CSR_READ_1(sc, sc->rl_cfg3);
v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
CSR_WRITE_1(sc, sc->rl_cfg3, v);
/* Config register write done. */
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
v = CSR_READ_1(sc, sc->rl_cfg5);
v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
v &= ~RL_CFG5_WOL_LANWAKE;
CSR_WRITE_1(sc, sc->rl_cfg5, v);
}
static void
re_add_sysctls(struct rl_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
int error;
ctx = device_get_sysctl_ctx(sc->rl_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
re_sysctl_stats, "I", "Statistics Information");
if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
return;
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I",
"re RX interrupt moderation");
/* Pull in device tunables. */
sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
error = resource_int_value(device_get_name(sc->rl_dev),
device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
if (error == 0) {
if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
sc->rl_int_rx_mod > RL_TIMER_MAX) {
device_printf(sc->rl_dev, "int_rx_mod value out of "
"range; using default: %d\n",
RL_TIMER_DEFAULT);
sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
}
}
}
static int
re_sysctl_stats(SYSCTL_HANDLER_ARGS)
{
struct rl_softc *sc;
struct rl_stats *stats;
int error, i, result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || req->newptr == NULL)
return (error);
if (result == 1) {
sc = (struct rl_softc *)arg1;
RL_LOCK(sc);
if ((if_getdrvflags(sc->rl_ifp) & IFF_DRV_RUNNING) == 0) {
RL_UNLOCK(sc);
goto done;
}
bus_dmamap_sync(sc->rl_ldata.rl_stag,
sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
RL_DUMPSTATS_START));
for (i = RL_TIMEOUT; i > 0; i--) {
if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
RL_DUMPSTATS_START) == 0)
break;
DELAY(1000);
}
bus_dmamap_sync(sc->rl_ldata.rl_stag,
sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
RL_UNLOCK(sc);
if (i == 0) {
device_printf(sc->rl_dev,
"DUMP statistics request timed out\n");
return (ETIMEDOUT);
}
done:
stats = sc->rl_ldata.rl_stats;
printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
printf("Tx frames : %ju\n",
(uintmax_t)le64toh(stats->rl_tx_pkts));
printf("Rx frames : %ju\n",
(uintmax_t)le64toh(stats->rl_rx_pkts));
printf("Tx errors : %ju\n",
(uintmax_t)le64toh(stats->rl_tx_errs));
printf("Rx errors : %u\n",
le32toh(stats->rl_rx_errs));
printf("Rx missed frames : %u\n",
(uint32_t)le16toh(stats->rl_missed_pkts));
printf("Rx frame alignment errs : %u\n",
(uint32_t)le16toh(stats->rl_rx_framealign_errs));
printf("Tx single collisions : %u\n",
le32toh(stats->rl_tx_onecoll));
printf("Tx multiple collisions : %u\n",
le32toh(stats->rl_tx_multicolls));
printf("Rx unicast frames : %ju\n",
(uintmax_t)le64toh(stats->rl_rx_ucasts));
printf("Rx broadcast frames : %ju\n",
(uintmax_t)le64toh(stats->rl_rx_bcasts));
printf("Rx multicast frames : %u\n",
le32toh(stats->rl_rx_mcasts));
printf("Tx aborts : %u\n",
(uint32_t)le16toh(stats->rl_tx_aborts));
printf("Tx underruns : %u\n",
(uint32_t)le16toh(stats->rl_rx_underruns));
}
return (error);
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
RL_TIMER_MAX));
}
#ifdef DEBUGNET
static void
re_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
{
struct rl_softc *sc;
sc = if_getsoftc(ifp);
RL_LOCK(sc);
*nrxr = sc->rl_ldata.rl_rx_desc_cnt;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
*clsize = (if_getmtu(ifp) > RL_MTU &&
(sc->rl_flags & RL_FLAG_JUMBOV2) != 0) ? MJUM9BYTES : MCLBYTES;
RL_UNLOCK(sc);
}
static void
re_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
{
}
static int
re_debugnet_transmit(if_t ifp, struct mbuf *m)
{
struct rl_softc *sc;
int error;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
return (EBUSY);
error = re_encap(sc, &m);
if (error == 0)
re_start_tx(sc);
return (error);
}
static int
re_debugnet_poll(if_t ifp, int count)
{
struct rl_softc *sc;
int error;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
(sc->rl_flags & RL_FLAG_LINK) == 0)
return (EBUSY);
re_txeof(sc);
error = re_rxeof(sc, NULL);
if (error != 0 && error != EAGAIN)
return (error);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/dev/rl/if_rl.c b/sys/dev/rl/if_rl.c
index b8c21c3265de..cd60561ad2d8 100644
--- a/sys/dev/rl/if_rl.c
+++ b/sys/dev/rl/if_rl.c
@@ -1,2121 +1,2116 @@
/*-
* Copyright (c) 1997, 1998
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* RealTek 8129/8139 PCI NIC driver
*
* Supports several extremely cheap PCI 10/100 adapters based on
* the RealTek chipset. Datasheets can be obtained from
* www.realtek.com.tw.
*
* Written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
/*
* The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
* probably the worst PCI ethernet controller ever made, with the possible
* exception of the FEAST chip made by SMC. The 8139 supports bus-master
* DMA, but it has a terrible interface that nullifies any performance
* gains that bus-master DMA usually offers.
*
* For transmission, the chip offers a series of four TX descriptor
* registers. Each transmit frame must be in a contiguous buffer, aligned
* on a longword (32-bit) boundary. This means we almost always have to
* do mbuf copies in order to transmit a frame, except in the unlikely
* case where a) the packet fits into a single mbuf, and b) the packet
* is 32-bit aligned within the mbuf's data area. The presence of only
* four descriptor registers means that we can never have more than four
* packets queued for transmission at any one time.
*
* Reception is not much better. The driver has to allocate a single large
* buffer area (up to 64K in size) into which the chip will DMA received
* frames. Because we don't know where within this region received packets
* will begin or end, we have no choice but to copy data from the buffer
* area into mbufs in order to pass the packets up to the higher protocol
* levels.
*
* It's impossible given this rotten design to really achieve decent
* performance at 100Mbps, unless you happen to have a 400Mhz PII or
* some equally overmuscled CPU to drive it.
*
* On the bright side, the 8139 does have a built-in PHY, although
* rather than using an MDIO serial interface like most other NICs, the
* PHY registers are directly accessible through the 8139's register
* space. The 8139 supports autonegotiation, as well as a 64-bit multicast
* filter.
*
* The 8129 chip is an older version of the 8139 that uses an external PHY
* chip. The 8129 has a serial MDIO interface for accessing the MII where
* the 8139 lets you directly access the on-board PHY registers. We need
* to select which interface to use depending on the chip type.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
MODULE_DEPEND(rl, pci, 1, 1, 1);
MODULE_DEPEND(rl, ether, 1, 1, 1);
MODULE_DEPEND(rl, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#include <dev/rl/if_rlreg.h>
/*
* Various supported device vendors/types and their names.
*/
static const struct rl_type rl_devs[] = {
{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
"RealTek 8129 10/100BaseTX" },
{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
"RealTek 8139 10/100BaseTX" },
{ RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
"RealTek 8139 10/100BaseTX" },
{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
"RealTek 8139 10/100BaseTX CardBus" },
{ RT_VENDORID, RT_DEVICEID_8100, RL_8139,
"RealTek 8100 10/100BaseTX" },
{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
"Accton MPX 5030/5038 10/100BaseTX" },
{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
"Delta Electronics 8139 10/100BaseTX" },
{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
"Addtron Technology 8139 10/100BaseTX" },
{ DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139,
"D-Link DFE-520TX (rev. C1) 10/100BaseTX" },
{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
"D-Link DFE-530TX+ 10/100BaseTX" },
{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
"D-Link DFE-690TXD 10/100BaseTX" },
{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
"Nortel Networks 10/100BaseTX" },
{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
"Corega FEther CB-TXD" },
{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
"Corega FEtherII CB-TXD" },
{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
"Peppercon AG ROL-F" },
{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
"Planex FNW-3603-TX" },
{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
"Planex FNW-3800-TX" },
{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
"Compaq HNE-300" },
{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
"LevelOne FPC-0106TX" },
{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
"Edimax EP-4103DL CardBus" }
};
static int rl_attach(device_t);
static int rl_detach(device_t);
static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int rl_dma_alloc(struct rl_softc *);
static void rl_dma_free(struct rl_softc *);
static void rl_eeprom_putbyte(struct rl_softc *, int);
static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
static int rl_encap(struct rl_softc *, struct mbuf **);
static int rl_list_tx_init(struct rl_softc *);
static int rl_list_rx_init(struct rl_softc *);
static int rl_ifmedia_upd(if_t);
static void rl_ifmedia_sts(if_t, struct ifmediareq *);
static int rl_ioctl(if_t, u_long, caddr_t);
static void rl_intr(void *);
static void rl_init(void *);
static void rl_init_locked(struct rl_softc *sc);
static int rl_miibus_readreg(device_t, int, int);
static void rl_miibus_statchg(device_t);
static int rl_miibus_writereg(device_t, int, int, int);
#ifdef DEVICE_POLLING
static int rl_poll(if_t ifp, enum poll_cmd cmd, int count);
static int rl_poll_locked(if_t ifp, enum poll_cmd cmd, int count);
#endif
static int rl_probe(device_t);
static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
static void rl_reset(struct rl_softc *);
static int rl_resume(device_t);
static int rl_rxeof(struct rl_softc *);
static void rl_rxfilter(struct rl_softc *);
static int rl_shutdown(device_t);
static void rl_start(if_t);
static void rl_start_locked(if_t);
static void rl_stop(struct rl_softc *);
static int rl_suspend(device_t);
static void rl_tick(void *);
static void rl_txeof(struct rl_softc *);
static void rl_watchdog(struct rl_softc *);
static void rl_setwol(struct rl_softc *);
static void rl_clrwol(struct rl_softc *);
/*
* MII bit-bang glue
*/
static uint32_t rl_mii_bitbang_read(device_t);
static void rl_mii_bitbang_write(device_t, uint32_t);
static const struct mii_bitbang_ops rl_mii_bitbang_ops = {
rl_mii_bitbang_read,
rl_mii_bitbang_write,
{
RL_MII_DATAOUT, /* MII_BIT_MDO */
RL_MII_DATAIN, /* MII_BIT_MDI */
RL_MII_CLK, /* MII_BIT_MDC */
RL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
0, /* MII_BIT_DIR_PHY_HOST */
}
};
static device_method_t rl_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, rl_probe),
DEVMETHOD(device_attach, rl_attach),
DEVMETHOD(device_detach, rl_detach),
DEVMETHOD(device_suspend, rl_suspend),
DEVMETHOD(device_resume, rl_resume),
DEVMETHOD(device_shutdown, rl_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, rl_miibus_readreg),
DEVMETHOD(miibus_writereg, rl_miibus_writereg),
DEVMETHOD(miibus_statchg, rl_miibus_statchg),
DEVMETHOD_END
};
static driver_t rl_driver = {
"rl",
rl_methods,
sizeof(struct rl_softc)
};
DRIVER_MODULE(rl, pci, rl_driver, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device", pci, rl, rl_devs,
nitems(rl_devs) - 1);
DRIVER_MODULE(rl, cardbus, rl_driver, 0, 0);
DRIVER_MODULE(miibus, rl, miibus_driver, 0, 0);
#define EE_SET(x) \
CSR_WRITE_1(sc, RL_EECMD, \
CSR_READ_1(sc, RL_EECMD) | x)
#define EE_CLR(x) \
CSR_WRITE_1(sc, RL_EECMD, \
CSR_READ_1(sc, RL_EECMD) & ~x)
/*
* Send a read command and address to the EEPROM, check for ACK.
*/
static void
rl_eeprom_putbyte(struct rl_softc *sc, int addr)
{
int d, i;
d = addr | sc->rl_eecmd_read;
/*
* Feed in each bit and strobe the clock.
*/
for (i = 0x400; i; i >>= 1) {
if (d & i) {
EE_SET(RL_EE_DATAIN);
} else {
EE_CLR(RL_EE_DATAIN);
}
DELAY(100);
EE_SET(RL_EE_CLK);
DELAY(150);
EE_CLR(RL_EE_CLK);
DELAY(100);
}
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
{
int i;
uint16_t word = 0;
/* Enter EEPROM access mode. */
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
/*
* Send address of word we want to read.
*/
rl_eeprom_putbyte(sc, addr);
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
/*
* Start reading bits from EEPROM.
*/
for (i = 0x8000; i; i >>= 1) {
EE_SET(RL_EE_CLK);
DELAY(100);
if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
word |= i;
EE_CLR(RL_EE_CLK);
DELAY(100);
}
/* Turn off EEPROM access mode. */
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
*dest = word;
}
/*
* Read a sequence of words from the EEPROM.
*/
static void
rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
{
int i;
uint16_t word = 0, *ptr;
for (i = 0; i < cnt; i++) {
rl_eeprom_getword(sc, off + i, &word);
ptr = (uint16_t *)(dest + (i * 2));
if (swap)
*ptr = ntohs(word);
else
*ptr = word;
}
}
/*
* Read the MII serial port for the MII bit-bang module.
*/
static uint32_t
rl_mii_bitbang_read(device_t dev)
{
struct rl_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
val = CSR_READ_1(sc, RL_MII);
CSR_BARRIER(sc, RL_MII, 1,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
/*
* Write the MII serial port for the MII bit-bang module.
*/
static void
rl_mii_bitbang_write(device_t dev, uint32_t val)
{
struct rl_softc *sc;
sc = device_get_softc(dev);
CSR_WRITE_1(sc, RL_MII, val);
CSR_BARRIER(sc, RL_MII, 1,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
rl_miibus_readreg(device_t dev, int phy, int reg)
{
struct rl_softc *sc;
uint16_t rl8139_reg;
sc = device_get_softc(dev);
if (sc->rl_type == RL_8139) {
switch (reg) {
case MII_BMCR:
rl8139_reg = RL_BMCR;
break;
case MII_BMSR:
rl8139_reg = RL_BMSR;
break;
case MII_ANAR:
rl8139_reg = RL_ANAR;
break;
case MII_ANER:
rl8139_reg = RL_ANER;
break;
case MII_ANLPAR:
rl8139_reg = RL_LPAR;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
/*
* Allow the rlphy driver to read the media status
* register. If we have a link partner which does not
* support NWAY, this is the register which will tell
* us the results of parallel detection.
*/
case RL_MEDIASTAT:
return (CSR_READ_1(sc, RL_MEDIASTAT));
default:
device_printf(sc->rl_dev, "bad phy register\n");
return (0);
}
return (CSR_READ_2(sc, rl8139_reg));
}
return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg));
}
static int
rl_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct rl_softc *sc;
uint16_t rl8139_reg;
sc = device_get_softc(dev);
if (sc->rl_type == RL_8139) {
switch (reg) {
case MII_BMCR:
rl8139_reg = RL_BMCR;
break;
case MII_BMSR:
rl8139_reg = RL_BMSR;
break;
case MII_ANAR:
rl8139_reg = RL_ANAR;
break;
case MII_ANER:
rl8139_reg = RL_ANER;
break;
case MII_ANLPAR:
rl8139_reg = RL_LPAR;
break;
case MII_PHYIDR1:
case MII_PHYIDR2:
return (0);
break;
default:
device_printf(sc->rl_dev, "bad phy register\n");
return (0);
}
CSR_WRITE_2(sc, rl8139_reg, data);
return (0);
}
mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data);
return (0);
}
static void
rl_miibus_statchg(device_t dev)
{
struct rl_softc *sc;
if_t ifp;
struct mii_data *mii;
sc = device_get_softc(dev);
mii = device_get_softc(sc->rl_miibus);
ifp = sc->rl_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->rl_flags &= ~RL_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->rl_flags |= RL_FLAG_LINK;
break;
default:
break;
}
}
/*
* RealTek controllers do not provide any interface to
* Tx/Rx MACs for resolved speed, duplex and flow-control
* parameters.
*/
}
static u_int
rl_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *hashes = arg;
int h;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
/*
* Program the 64-bit multicast hash filter.
*/
static void
rl_rxfilter(struct rl_softc *sc)
{
if_t ifp = sc->rl_ifp;
uint32_t hashes[2] = { 0, 0 };
uint32_t rxfilt;
RL_LOCK_ASSERT(sc);
rxfilt = CSR_READ_4(sc, RL_RXCFG);
rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
RL_RXCFG_RX_MULTI);
/* Always accept frames destined for this host. */
rxfilt |= RL_RXCFG_RX_INDIV;
/* Set capture broadcast bit to capture broadcast frames. */
if (if_getflags(ifp) & IFF_BROADCAST)
rxfilt |= RL_RXCFG_RX_BROAD;
if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
rxfilt |= RL_RXCFG_RX_MULTI;
if (if_getflags(ifp) & IFF_PROMISC)
rxfilt |= RL_RXCFG_RX_ALLPHYS;
hashes[0] = 0xFFFFFFFF;
hashes[1] = 0xFFFFFFFF;
} else {
/* Now program new ones. */
if_foreach_llmaddr(ifp, rl_hash_maddr, hashes);
if (hashes[0] != 0 || hashes[1] != 0)
rxfilt |= RL_RXCFG_RX_MULTI;
}
CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
}
static void
rl_reset(struct rl_softc *sc)
{
int i;
RL_LOCK_ASSERT(sc);
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
for (i = 0; i < RL_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
break;
}
if (i == RL_TIMEOUT)
device_printf(sc->rl_dev, "reset never completed!\n");
}
/*
* Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
rl_probe(device_t dev)
{
const struct rl_type *t;
uint16_t devid, revid, vendor;
int i;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
revid = pci_get_revid(dev);
if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
if (revid == 0x20) {
/* 8139C+, let re(4) take care of this device. */
return (ENXIO);
}
}
t = rl_devs;
for (i = 0; i < nitems(rl_devs); i++, t++) {
if (vendor == t->rl_vid && devid == t->rl_did) {
device_set_desc(dev, t->rl_name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
struct rl_dmamap_arg {
bus_addr_t rl_busaddr;
};
static void
rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct rl_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct rl_dmamap_arg *)arg;
ctx->rl_busaddr = segs[0].ds_addr;
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
rl_attach(device_t dev)
{
uint8_t eaddr[ETHER_ADDR_LEN];
uint16_t as[3];
if_t ifp;
struct rl_softc *sc;
const struct rl_type *t;
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
int error = 0, hwrev, i, phy, pmc, rid;
int prefer_iomap, unit;
uint16_t rl_did = 0;
char tn[32];
sc = device_get_softc(dev);
unit = device_get_unit(dev);
sc->rl_dev = dev;
sc->rl_twister_enable = 0;
snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
ctx = device_get_sysctl_ctx(sc->rl_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
&sc->rl_twister_enable, 0, "");
mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
pci_enable_busmaster(dev);
/*
* Map control/status registers.
* Default to using PIO access for this driver. On SMP systems,
* there appear to be problems with memory mapped mode: it looks
* like doing too many memory mapped access back to back in rapid
* succession can hang the bus. I'm inclined to blame this on
* crummy design/construction on the part of RealTek. Memory
* mapped mode does appear to work on uniprocessor systems though.
*/
prefer_iomap = 1;
snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
TUNABLE_INT_FETCH(tn, &prefer_iomap);
if (prefer_iomap) {
sc->rl_res_id = PCIR_BAR(0);
sc->rl_res_type = SYS_RES_IOPORT;
sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
&sc->rl_res_id, RF_ACTIVE);
}
if (prefer_iomap == 0 || sc->rl_res == NULL) {
sc->rl_res_id = PCIR_BAR(1);
sc->rl_res_type = SYS_RES_MEMORY;
sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
&sc->rl_res_id, RF_ACTIVE);
}
if (sc->rl_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
#ifdef notdef
/*
* Detect the Realtek 8139B. For some reason, this chip is very
* unstable when left to autoselect the media
* The best workaround is to set the device to the required
* media type or to set it to the 10 Meg speed.
*/
if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
device_printf(dev,
"Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
#endif
sc->rl_btag = rman_get_bustag(sc->rl_res);
sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
/* Allocate interrupt */
rid = 0;
sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->rl_irq[0] == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
sc->rl_cfg0 = RL_8139_CFG0;
sc->rl_cfg1 = RL_8139_CFG1;
sc->rl_cfg2 = 0;
sc->rl_cfg3 = RL_8139_CFG3;
sc->rl_cfg4 = RL_8139_CFG4;
sc->rl_cfg5 = RL_8139_CFG5;
/*
* Reset the adapter. Only take the lock here as it's needed in
* order to call rl_reset().
*/
RL_LOCK(sc);
rl_reset(sc);
RL_UNLOCK(sc);
sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
if (rl_did != 0x8129)
sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
/*
* Get station address from the EEPROM.
*/
rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
for (i = 0; i < 3; i++) {
eaddr[(i * 2) + 0] = as[i] & 0xff;
eaddr[(i * 2) + 1] = as[i] >> 8;
}
/*
* Now read the exact device type from the EEPROM to find
* out if it's an 8129 or 8139.
*/
rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
t = rl_devs;
sc->rl_type = 0;
while(t->rl_name != NULL) {
if (rl_did == t->rl_did) {
sc->rl_type = t->rl_basetype;
break;
}
t++;
}
if (sc->rl_type == 0) {
device_printf(dev, "unknown device ID: %x assuming 8139\n",
rl_did);
sc->rl_type = RL_8139;
/*
* Read RL_IDR register to get ethernet address as accessing
* EEPROM may not extract correct address.
*/
for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
}
if ((error = rl_dma_alloc(sc)) != 0)
goto fail;
ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
#define RL_PHYAD_INTERNAL 0
/* Do MII setup */
phy = MII_PHY_ANY;
if (sc->rl_type == RL_8139)
phy = RL_PHYAD_INTERNAL;
error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setmtu(ifp, ETHERMTU);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, rl_ioctl);
if_setstartfn(ifp, rl_start);
if_setinitfn(ifp, rl_init);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
/* Check WOL for RTL8139B or newer controllers. */
if (sc->rl_type == RL_8139 &&
pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
switch (hwrev) {
case RL_HWREV_8139B:
case RL_HWREV_8130:
case RL_HWREV_8139C:
case RL_HWREV_8139D:
case RL_HWREV_8101:
case RL_HWREV_8100:
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
/* Disable WOL. */
rl_clrwol(sc);
break;
default:
break;
}
}
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setcapenablebit(ifp, 0, (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
NULL, rl_intr, sc, &sc->rl_intrhand[0]);
if (error) {
device_printf(sc->rl_dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
}
fail:
if (error)
rl_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
rl_detach(device_t dev)
{
struct rl_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->rl_ifp;
KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
RL_LOCK(sc);
rl_stop(sc);
RL_UNLOCK(sc);
callout_drain(&sc->rl_stat_callout);
ether_ifdetach(ifp);
}
#if 0
sc->suspended = 1;
#endif
if (sc->rl_miibus)
device_delete_child(dev, sc->rl_miibus);
bus_generic_detach(dev);
if (sc->rl_intrhand[0])
bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
if (sc->rl_irq[0])
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
if (sc->rl_res)
bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
sc->rl_res);
if (ifp)
if_free(ifp);
rl_dma_free(sc);
mtx_destroy(&sc->rl_mtx);
return (0);
}
static int
rl_dma_alloc(struct rl_softc *sc)
{
struct rl_dmamap_arg ctx;
int error, i;
/*
* Allocate the parent bus DMA tag appropriate for PCI.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rl_parent_tag);
if (error) {
device_printf(sc->rl_dev,
"failed to create parent DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx memory block. */
error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */
RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rl_cdata.rl_rx_tag);
if (error) {
device_printf(sc->rl_dev,
"failed to create Rx memory block DMA tag.\n");
goto fail;
}
/* Create DMA tag for Tx buffer. */
error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, 1, /* maxsize, nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rl_cdata.rl_tx_tag);
if (error) {
device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
goto fail;
}
/*
* Allocate DMA'able memory and load DMA map for Rx memory block.
*/
error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
(void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
if (error != 0) {
device_printf(sc->rl_dev,
"failed to allocate Rx DMA memory block.\n");
goto fail;
}
ctx.rl_busaddr = 0;
error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
BUS_DMA_NOWAIT);
if (error != 0 || ctx.rl_busaddr == 0) {
device_printf(sc->rl_dev,
"could not load Rx DMA memory block.\n");
goto fail;
}
sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
/* Create DMA maps for Tx buffers. */
for (i = 0; i < RL_TX_LIST_CNT; i++) {
sc->rl_cdata.rl_tx_chain[i] = NULL;
sc->rl_cdata.rl_tx_dmamap[i] = NULL;
error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
&sc->rl_cdata.rl_tx_dmamap[i]);
if (error != 0) {
device_printf(sc->rl_dev,
"could not create Tx dmamap.\n");
goto fail;
}
}
/* Leave a few bytes before the start of the RX ring buffer. */
sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
fail:
return (error);
}
static void
rl_dma_free(struct rl_softc *sc)
{
int i;
/* Rx memory block. */
if (sc->rl_cdata.rl_rx_tag != NULL) {
if (sc->rl_cdata.rl_rx_buf_paddr != 0)
bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
sc->rl_cdata.rl_rx_dmamap);
if (sc->rl_cdata.rl_rx_buf_ptr != NULL)
bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
sc->rl_cdata.rl_rx_buf_ptr,
sc->rl_cdata.rl_rx_dmamap);
sc->rl_cdata.rl_rx_buf_ptr = NULL;
sc->rl_cdata.rl_rx_buf = NULL;
sc->rl_cdata.rl_rx_buf_paddr = 0;
bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
sc->rl_cdata.rl_tx_tag = NULL;
}
/* Tx buffers. */
if (sc->rl_cdata.rl_tx_tag != NULL) {
for (i = 0; i < RL_TX_LIST_CNT; i++) {
if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
bus_dmamap_destroy(
sc->rl_cdata.rl_tx_tag,
sc->rl_cdata.rl_tx_dmamap[i]);
sc->rl_cdata.rl_tx_dmamap[i] = NULL;
}
}
bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
sc->rl_cdata.rl_tx_tag = NULL;
}
if (sc->rl_parent_tag != NULL) {
bus_dma_tag_destroy(sc->rl_parent_tag);
sc->rl_parent_tag = NULL;
}
}
/*
* Initialize the transmit descriptors.
*/
static int
rl_list_tx_init(struct rl_softc *sc)
{
struct rl_chain_data *cd;
int i;
RL_LOCK_ASSERT(sc);
cd = &sc->rl_cdata;
for (i = 0; i < RL_TX_LIST_CNT; i++) {
cd->rl_tx_chain[i] = NULL;
CSR_WRITE_4(sc,
RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
}
sc->rl_cdata.cur_tx = 0;
sc->rl_cdata.last_tx = 0;
return (0);
}
static int
rl_list_rx_init(struct rl_softc *sc)
{
RL_LOCK_ASSERT(sc);
bzero(sc->rl_cdata.rl_rx_buf_ptr,
RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*
* You know there's something wrong with a PCI bus-master chip design
* when you have to use m_devget().
*
* The receive operation is badly documented in the datasheet, so I'll
* attempt to document it here. The driver provides a buffer area and
* places its base address in the RX buffer start address register.
* The chip then begins copying frames into the RX buffer. Each frame
* is preceded by a 32-bit RX status word which specifies the length
* of the frame and certain other status bits. Each frame (starting with
* the status word) is also 32-bit aligned. The frame length is in the
* first 16 bits of the status word; the lower 15 bits correspond with
* the 'rx status register' mentioned in the datasheet.
*
* Note: to make the Alpha happy, the frame payload needs to be aligned
* on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
* as the offset argument to m_devget().
*/
static int
rl_rxeof(struct rl_softc *sc)
{
struct mbuf *m;
if_t ifp = sc->rl_ifp;
uint8_t *rxbufpos;
int total_len = 0;
int wrap = 0;
int rx_npkts = 0;
uint32_t rxstat;
uint16_t cur_rx;
uint16_t limit;
uint16_t max_bytes, rx_bytes = 0;
RL_LOCK_ASSERT(sc);
bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
/* Do not try to read past this point. */
limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
if (limit < cur_rx)
max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
else
max_bytes = limit - cur_rx;
while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
}
#endif
rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
rxstat = le32toh(*(uint32_t *)rxbufpos);
/*
* Here's a totally undocumented fact for you. When the
* RealTek chip is in the process of copying a packet into
* RAM for you, the length will be 0xfff0. If you spot a
* packet header with this value, you need to stop. The
* datasheet makes absolutely no mention of this and
* RealTek should be shot for this.
*/
total_len = rxstat >> 16;
if (total_len == RL_RXSTAT_UNFINISHED)
break;
if (!(rxstat & RL_RXSTAT_RXOK) ||
total_len < ETHER_MIN_LEN ||
total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
rl_init_locked(sc);
return (rx_npkts);
}
/* No errors; receive the packet. */
rx_bytes += total_len + 4;
/*
* XXX The RealTek chip includes the CRC with every
* received frame, and there's no way to turn this
* behavior off (at least, I can't find anything in
* the manual that explains how to do it) so we have
* to trim off the CRC manually.
*/
total_len -= ETHER_CRC_LEN;
/*
* Avoid trying to read more bytes than we know
* the chip has prepared for us.
*/
if (rx_bytes > max_bytes)
break;
rxbufpos = sc->rl_cdata.rl_rx_buf +
((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
rxbufpos = sc->rl_cdata.rl_rx_buf;
wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
if (total_len > wrap) {
m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
NULL);
if (m != NULL)
m_copyback(m, wrap, total_len - wrap,
sc->rl_cdata.rl_rx_buf);
cur_rx = (total_len - wrap + ETHER_CRC_LEN);
} else {
m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
NULL);
cur_rx += total_len + 4 + ETHER_CRC_LEN;
}
/* Round up to 32-bit boundary. */
cur_rx = (cur_rx + 3) & ~3;
CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
RL_UNLOCK(sc);
if_input(ifp, m);
RL_LOCK(sc);
rx_npkts++;
}
/* No need to sync Rx memory block as we didn't modify it. */
return (rx_npkts);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
rl_txeof(struct rl_softc *sc)
{
if_t ifp = sc->rl_ifp;
uint32_t txstat;
RL_LOCK_ASSERT(sc);
/*
* Go through our tx list and free mbufs for those
* frames that have been uploaded.
*/
do {
if (RL_LAST_TXMBUF(sc) == NULL)
break;
txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
if (!(txstat & (RL_TXSTAT_TX_OK|
RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
break;
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & RL_TXSTAT_COLLCNT) >> 24);
bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
m_freem(RL_LAST_TXMBUF(sc));
RL_LAST_TXMBUF(sc) = NULL;
/*
* If there was a transmit underrun, bump the TX threshold.
* Make sure not to overflow the 63 * 32byte we can address
* with the 6 available bit.
*/
if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
(sc->rl_txthresh < 2016))
sc->rl_txthresh += 32;
if (txstat & RL_TXSTAT_TX_OK)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
else {
int oldthresh;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if ((txstat & RL_TXSTAT_TXABRT) ||
(txstat & RL_TXSTAT_OUTOFWIN))
CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
oldthresh = sc->rl_txthresh;
/* error recovery */
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
rl_init_locked(sc);
/* restore original threshold */
sc->rl_txthresh = oldthresh;
return;
}
RL_INC(sc->rl_cdata.last_tx);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
if (RL_LAST_TXMBUF(sc) == NULL)
sc->rl_watchdog_timer = 0;
}
static void
rl_twister_update(struct rl_softc *sc)
{
uint16_t linktest;
/*
* Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
* Linux driver. Values undocumented otherwise.
*/
static const uint32_t param[4][4] = {
{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
};
/*
* Tune the so-called twister registers of the RTL8139. These
* are used to compensate for impedance mismatches. The
* method for tuning these registers is undocumented and the
* following procedure is collected from public sources.
*/
switch (sc->rl_twister)
{
case CHK_LINK:
/*
* If we have a sufficient link, then we can proceed in
* the state machine to the next stage. If not, then
* disable further tuning after writing sane defaults.
*/
if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
sc->rl_twister = FIND_ROW;
} else {
CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
sc->rl_twister = DONE;
}
break;
case FIND_ROW:
/*
* Read how long it took to see the echo to find the tuning
* row to use.
*/
linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
if (linktest == RL_CSCFG_ROW3)
sc->rl_twist_row = 3;
else if (linktest == RL_CSCFG_ROW2)
sc->rl_twist_row = 2;
else if (linktest == RL_CSCFG_ROW1)
sc->rl_twist_row = 1;
else
sc->rl_twist_row = 0;
sc->rl_twist_col = 0;
sc->rl_twister = SET_PARAM;
break;
case SET_PARAM:
if (sc->rl_twist_col == 0)
CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
CSR_WRITE_4(sc, RL_PARA7C,
param[sc->rl_twist_row][sc->rl_twist_col]);
if (++sc->rl_twist_col == 4) {
if (sc->rl_twist_row == 3)
sc->rl_twister = RECHK_LONG;
else
sc->rl_twister = DONE;
}
break;
case RECHK_LONG:
/*
* For long cables, we have to double check to make sure we
* don't mistune.
*/
linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
if (linktest == RL_CSCFG_ROW3)
sc->rl_twister = DONE;
else {
CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
sc->rl_twister = RETUNE;
}
break;
case RETUNE:
/* Retune for a shorter cable (try column 2) */
CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
sc->rl_twist_row--;
sc->rl_twist_col = 0;
sc->rl_twister = SET_PARAM;
break;
case DONE:
break;
}
}
static void
rl_tick(void *xsc)
{
struct rl_softc *sc = xsc;
struct mii_data *mii;
int ticks;
RL_LOCK_ASSERT(sc);
/*
* If we're doing the twister cable calibration, then we need to defer
* watchdog timeouts. This is a no-op in normal operations, but
* can falsely trigger when the cable calibration takes a while and
* there was traffic ready to go when rl was started.
*
* We don't defer mii_tick since that updates the mii status, which
* helps the twister process, at least according to similar patches
* for the Linux driver I found online while doing the fixes. Worst
* case is a few extra mii reads during calibration.
*/
mii = device_get_softc(sc->rl_miibus);
mii_tick(mii);
if ((sc->rl_flags & RL_FLAG_LINK) == 0)
rl_miibus_statchg(sc->rl_dev);
if (sc->rl_twister_enable) {
if (sc->rl_twister == DONE)
rl_watchdog(sc);
else
rl_twister_update(sc);
if (sc->rl_twister == DONE)
ticks = hz;
else
ticks = hz / 10;
} else {
rl_watchdog(sc);
ticks = hz;
}
callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
}
#ifdef DEVICE_POLLING
static int
rl_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct rl_softc *sc = if_getsoftc(ifp);
int rx_npkts = 0;
RL_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rx_npkts = rl_poll_locked(ifp, cmd, count);
RL_UNLOCK(sc);
return (rx_npkts);
}
static int
rl_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
{
struct rl_softc *sc = if_getsoftc(ifp);
int rx_npkts;
RL_LOCK_ASSERT(sc);
sc->rxcycles = count;
rx_npkts = rl_rxeof(sc);
rl_txeof(sc);
if (!if_sendq_empty(ifp))
rl_start_locked(ifp);
if (cmd == POLL_AND_CHECK_STATUS) {
uint16_t status;
/* We should also check the status register. */
status = CSR_READ_2(sc, RL_ISR);
if (status == 0xffff)
return (rx_npkts);
if (status != 0)
CSR_WRITE_2(sc, RL_ISR, status);
/* XXX We should check behaviour on receiver stalls. */
if (status & RL_ISR_SYSTEM_ERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
rl_init_locked(sc);
}
}
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
rl_intr(void *arg)
{
struct rl_softc *sc = arg;
if_t ifp = sc->rl_ifp;
uint16_t status;
int count;
RL_LOCK(sc);
if (sc->suspended)
goto done_locked;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
goto done_locked;
#endif
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
goto done_locked2;
status = CSR_READ_2(sc, RL_ISR);
if (status == 0xffff || (status & RL_INTRS) == 0)
goto done_locked;
/*
* Ours, disable further interrupts.
*/
CSR_WRITE_2(sc, RL_IMR, 0);
for (count = 16; count > 0; count--) {
CSR_WRITE_2(sc, RL_ISR, status);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
rl_rxeof(sc);
if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
rl_txeof(sc);
if (status & RL_ISR_SYSTEM_ERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
rl_init_locked(sc);
RL_UNLOCK(sc);
return;
}
}
status = CSR_READ_2(sc, RL_ISR);
/* If the card has gone away, the read returns 0xffff. */
if (status == 0xffff || (status & RL_INTRS) == 0)
break;
}
if (!if_sendq_empty(ifp))
rl_start_locked(ifp);
done_locked2:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
done_locked:
RL_UNLOCK(sc);
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
rl_encap(struct rl_softc *sc, struct mbuf **m_head)
{
struct mbuf *m;
bus_dma_segment_t txsegs[1];
int error, nsegs, padlen;
RL_LOCK_ASSERT(sc);
m = *m_head;
padlen = 0;
/*
* Hardware doesn't auto-pad, so we have to make sure
* pad short frames out to the minimum frame length.
*/
if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
/*
* The RealTek is brain damaged and wants longword-aligned
* TX buffers, plus we can only have one fragment buffer
* per packet. We have to copy pretty much all the time.
*/
if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
(padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
}
*m_head = m;
if (padlen > 0) {
/*
* Make security-conscious people happy: zero out the
* bytes in the pad area, since we don't know what
* this mbuf cluster buffer's previous user might
* have left in it.
*/
bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
m->m_pkthdr.len += padlen;
m->m_len = m->m_pkthdr.len;
}
error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
RL_CUR_TXMBUF(sc) = m;
bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
return (0);
}
/*
* Main transmit routine.
*/
static void
rl_start(if_t ifp)
{
struct rl_softc *sc = if_getsoftc(ifp);
RL_LOCK(sc);
rl_start_locked(ifp);
RL_UNLOCK(sc);
}
static void
rl_start_locked(if_t ifp)
{
struct rl_softc *sc = if_getsoftc(ifp);
struct mbuf *m_head = NULL;
RL_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
return;
while (RL_CUR_TXMBUF(sc) == NULL) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
if (rl_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
/* Pass a copy of this mbuf chain to the bpf subsystem. */
BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
/* Transmit the frame. */
CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
RL_TXTHRESH(sc->rl_txthresh) |
RL_CUR_TXMBUF(sc)->m_pkthdr.len);
RL_INC(sc->rl_cdata.cur_tx);
/* Set a timeout in case the chip goes out to lunch. */
sc->rl_watchdog_timer = 5;
}
/*
* We broke out of the loop because all our TX slots are
* full. Mark the NIC as busy until it drains some of the
* packets from the queue.
*/
if (RL_CUR_TXMBUF(sc) != NULL)
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
}
static void
rl_init(void *xsc)
{
struct rl_softc *sc = xsc;
RL_LOCK(sc);
rl_init_locked(sc);
RL_UNLOCK(sc);
}
static void
rl_init_locked(struct rl_softc *sc)
{
if_t ifp = sc->rl_ifp;
struct mii_data *mii;
uint32_t eaddr[2];
RL_LOCK_ASSERT(sc);
mii = device_get_softc(sc->rl_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
rl_stop(sc);
rl_reset(sc);
if (sc->rl_twister_enable) {
/*
* Reset twister register tuning state. The twister
* registers and their tuning are undocumented, but
* are necessary to cope with bad links. rl_twister =
* DONE here will disable this entirely.
*/
sc->rl_twister = CHK_LINK;
}
/*
* Init our MAC address. Even though the chipset
* documentation doesn't mention it, we need to enter "Config
* register write enable" mode to modify the ID registers.
*/
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
bzero(eaddr, sizeof(eaddr));
bcopy(if_getlladdr(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
/* Init the RX memory block pointer register. */
CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
RL_RX_8139_BUF_RESERVE);
/* Init TX descriptors. */
rl_list_tx_init(sc);
/* Init Rx memory block. */
rl_list_rx_init(sc);
/*
* Enable transmit and receive.
*/
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
/*
* Set the initial TX and RX configuration.
*/
CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
/* Set RX filter. */
rl_rxfilter(sc);
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
if (if_getcapenable(ifp) & IFCAP_POLLING)
CSR_WRITE_2(sc, RL_IMR, 0);
else
#endif
/* Enable interrupts. */
CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
/* Set initial TX threshold */
sc->rl_txthresh = RL_TX_THRESH_INIT;
/* Start RX/TX process. */
CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
/* Enable receiver and transmitter. */
CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
sc->rl_flags &= ~RL_FLAG_LINK;
mii_mediachg(mii);
CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
}
/*
* Set media options.
*/
static int
rl_ifmedia_upd(if_t ifp)
{
struct rl_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
mii = device_get_softc(sc->rl_miibus);
RL_LOCK(sc);
mii_mediachg(mii);
RL_UNLOCK(sc);
return (0);
}
/*
* Report current media status.
*/
static void
rl_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct rl_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
mii = device_get_softc(sc->rl_miibus);
RL_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
RL_UNLOCK(sc);
}
static int
rl_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct ifreq *ifr = (struct ifreq *)data;
struct mii_data *mii;
struct rl_softc *sc = if_getsoftc(ifp);
int error = 0, mask;
switch (command) {
case SIOCSIFFLAGS:
RL_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
((if_getflags(ifp) ^ sc->rl_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)))
rl_rxfilter(sc);
else
rl_init_locked(sc);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rl_stop(sc);
sc->rl_if_flags = if_getflags(ifp);
RL_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
RL_LOCK(sc);
rl_rxfilter(sc);
RL_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->rl_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (ifr->ifr_reqcap & IFCAP_POLLING &&
!(if_getcapenable(ifp) & IFCAP_POLLING)) {
error = ether_poll_register(rl_poll, ifp);
if (error)
return(error);
RL_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, RL_IMR, 0x0000);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
RL_UNLOCK(sc);
return (error);
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
if_getcapenable(ifp) & IFCAP_POLLING) {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
RL_LOCK(sc);
CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
RL_UNLOCK(sc);
return (error);
}
#endif /* DEVICE_POLLING */
if ((mask & IFCAP_WOL) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
if ((mask & IFCAP_WOL_UCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_UCAST);
if ((mask & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
}
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
rl_watchdog(struct rl_softc *sc)
{
RL_LOCK_ASSERT(sc);
if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
return;
device_printf(sc->rl_dev, "watchdog timeout\n");
if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1);
rl_txeof(sc);
rl_rxeof(sc);
if_setdrvflagbits(sc->rl_ifp, 0, IFF_DRV_RUNNING);
rl_init_locked(sc);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
rl_stop(struct rl_softc *sc)
{
int i;
if_t ifp = sc->rl_ifp;
RL_LOCK_ASSERT(sc);
sc->rl_watchdog_timer = 0;
callout_stop(&sc->rl_stat_callout);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->rl_flags &= ~RL_FLAG_LINK;
CSR_WRITE_1(sc, RL_COMMAND, 0x00);
CSR_WRITE_2(sc, RL_IMR, 0x0000);
for (i = 0; i < RL_TIMEOUT; i++) {
DELAY(10);
if ((CSR_READ_1(sc, RL_COMMAND) &
(RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
break;
}
if (i == RL_TIMEOUT)
device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
/*
* Free the TX list buffers.
*/
for (i = 0; i < RL_TX_LIST_CNT; i++) {
if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
sc->rl_cdata.rl_tx_dmamap[i],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
sc->rl_cdata.rl_tx_dmamap[i]);
m_freem(sc->rl_cdata.rl_tx_chain[i]);
sc->rl_cdata.rl_tx_chain[i] = NULL;
CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
0x0000000);
}
}
}
/*
* Device suspend routine. Stop the interface and save some PCI
* settings in case the BIOS doesn't restore them properly on
* resume.
*/
static int
rl_suspend(device_t dev)
{
struct rl_softc *sc;
sc = device_get_softc(dev);
RL_LOCK(sc);
rl_stop(sc);
rl_setwol(sc);
sc->suspended = 1;
RL_UNLOCK(sc);
return (0);
}
/*
* Device resume routine. Restore some PCI settings in case the BIOS
* doesn't, re-enable busmastering, and restart the interface if
* appropriate.
*/
static int
rl_resume(device_t dev)
{
struct rl_softc *sc;
if_t ifp;
int pmc;
uint16_t pmstat;
sc = device_get_softc(dev);
ifp = sc->rl_ifp;
RL_LOCK(sc);
if ((if_getcapabilities(ifp) & IFCAP_WOL) != 0 &&
pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
/* Disable PME and clear PME status. */
pmstat = pci_read_config(sc->rl_dev,
pmc + PCIR_POWER_STATUS, 2);
if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->rl_dev,
pmc + PCIR_POWER_STATUS, pmstat, 2);
}
/*
* Clear WOL matching such that normal Rx filtering
* wouldn't interfere with WOL patterns.
*/
rl_clrwol(sc);
}
/* reinitialize interface if necessary */
if (if_getflags(ifp) & IFF_UP)
rl_init_locked(sc);
sc->suspended = 0;
RL_UNLOCK(sc);
return (0);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
rl_shutdown(device_t dev)
{
struct rl_softc *sc;
sc = device_get_softc(dev);
RL_LOCK(sc);
rl_stop(sc);
/*
* Mark interface as down since otherwise we will panic if
* interrupt comes in later on, which can happen in some
* cases.
*/
if_setflagbits(sc->rl_ifp, 0, IFF_UP);
rl_setwol(sc);
RL_UNLOCK(sc);
return (0);
}
static void
rl_setwol(struct rl_softc *sc)
{
if_t ifp;
int pmc;
uint16_t pmstat;
uint8_t v;
RL_LOCK_ASSERT(sc);
ifp = sc->rl_ifp;
if ((if_getcapabilities(ifp) & IFCAP_WOL) == 0)
return;
if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
return;
/* Enable config register write. */
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
/* Enable PME. */
v = CSR_READ_1(sc, sc->rl_cfg1);
v &= ~RL_CFG1_PME;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
v |= RL_CFG1_PME;
CSR_WRITE_1(sc, sc->rl_cfg1, v);
v = CSR_READ_1(sc, sc->rl_cfg3);
v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
v |= RL_CFG3_WOL_MAGIC;
CSR_WRITE_1(sc, sc->rl_cfg3, v);
v = CSR_READ_1(sc, sc->rl_cfg5);
v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
v &= ~RL_CFG5_WOL_LANWAKE;
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
v |= RL_CFG5_WOL_UCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
v |= RL_CFG5_WOL_LANWAKE;
CSR_WRITE_1(sc, sc->rl_cfg5, v);
/* Config register write done. */
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
/* Request PME if WOL is requested. */
pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
static void
rl_clrwol(struct rl_softc *sc)
{
if_t ifp;
uint8_t v;
ifp = sc->rl_ifp;
if ((if_getcapabilities(ifp) & IFCAP_WOL) == 0)
return;
/* Enable config register write. */
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
v = CSR_READ_1(sc, sc->rl_cfg3);
v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
CSR_WRITE_1(sc, sc->rl_cfg3, v);
/* Config register write done. */
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
v = CSR_READ_1(sc, sc->rl_cfg5);
v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
v &= ~RL_CFG5_WOL_LANWAKE;
CSR_WRITE_1(sc, sc->rl_cfg5, v);
}
diff --git a/sys/dev/sbni/if_sbni.c b/sys/dev/sbni/if_sbni.c
index 4dbfae7777ef..ffbce54a56f2 100644
--- a/sys/dev/sbni/if_sbni.c
+++ b/sys/dev/sbni/if_sbni.c
@@ -1,1249 +1,1246 @@
/*-
* Copyright (c) 1997-2001 Granch, Ltd. All rights reserved.
* Author: Denis I.Timofeev <timofeev@granch.ru>
*
* Redistributon and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
/*
* Device driver for Granch SBNI12 leased line adapters
*
* Revision 2.0.0 1997/08/06
* Initial revision by Alexey Zverev
*
* Revision 2.0.1 1997/08/11
* Additional internal statistics support (tx statistics)
*
* Revision 2.0.2 1997/11/05
* if_bpf bug has been fixed
*
* Revision 2.0.3 1998/12/20
* Memory leakage has been eliminated in
* the sbni_st and sbni_timeout routines.
*
* Revision 3.0 2000/08/10 by Yaroslav Polyakov
* Support for PCI cards. 4.1 modification.
*
* Revision 3.1 2000/09/12
* Removed extra #defines around bpf functions
*
* Revision 4.0 2000/11/23 by Denis Timofeev
* Completely redesigned the buffer management
*
* Revision 4.1 2001/01/21
* Support for PCI Dual cards and new SBNI12D-10, -11 Dual/ISA cards
*
* Written with reference to NE2000 driver developed by David Greenman.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/callout.h>
#include <sys/syslog.h>
#include <sys/random.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <dev/sbni/if_sbnireg.h>
#include <dev/sbni/if_sbnivar.h>
static void sbni_init(void *);
static void sbni_init_locked(struct sbni_softc *);
static void sbni_start(if_t);
static void sbni_start_locked(if_t);
static int sbni_ioctl(if_t, u_long, caddr_t);
static void sbni_stop(struct sbni_softc *);
static void handle_channel(struct sbni_softc *);
static void card_start(struct sbni_softc *);
static int recv_frame(struct sbni_softc *);
static void send_frame(struct sbni_softc *);
static int upload_data(struct sbni_softc *, u_int, u_int, u_int, u_int32_t);
static int skip_tail(struct sbni_softc *, u_int, u_int32_t);
static void interpret_ack(struct sbni_softc *, u_int);
static void download_data(struct sbni_softc *, u_int32_t *);
static void prepare_to_send(struct sbni_softc *);
static void drop_xmit_queue(struct sbni_softc *);
static int get_rx_buf(struct sbni_softc *);
static void indicate_pkt(struct sbni_softc *);
static void change_level(struct sbni_softc *);
static int check_fhdr(struct sbni_softc *, u_int *, u_int *,
u_int *, u_int *, u_int32_t *);
static int append_frame_to_pkt(struct sbni_softc *, u_int, u_int32_t);
static void timeout_change_level(struct sbni_softc *);
static void send_frame_header(struct sbni_softc *, u_int32_t *);
static void set_initial_values(struct sbni_softc *, struct sbni_flags);
static u_int32_t calc_crc32(u_int32_t, caddr_t, u_int);
static callout_func_t sbni_timeout;
static __inline u_char sbni_inb(struct sbni_softc *, enum sbni_reg);
static __inline void sbni_outb(struct sbni_softc *, enum sbni_reg, u_char);
static __inline void sbni_insb(struct sbni_softc *, u_char *, u_int);
static __inline void sbni_outsb(struct sbni_softc *, u_char *, u_int);
static u_int32_t crc32tab[];
#ifdef SBNI_DUAL_COMPOUND
static struct mtx headlist_lock;
MTX_SYSINIT(headlist_lock, &headlist_lock, "sbni headlist", MTX_DEF);
static struct sbni_softc *sbni_headlist;
#endif
/* -------------------------------------------------------------------------- */
static __inline u_char
sbni_inb(struct sbni_softc *sc, enum sbni_reg reg)
{
return bus_space_read_1(
rman_get_bustag(sc->io_res),
rman_get_bushandle(sc->io_res),
sc->io_off + reg);
}
static __inline void
sbni_outb(struct sbni_softc *sc, enum sbni_reg reg, u_char value)
{
bus_space_write_1(
rman_get_bustag(sc->io_res),
rman_get_bushandle(sc->io_res),
sc->io_off + reg, value);
}
static __inline void
sbni_insb(struct sbni_softc *sc, u_char *to, u_int len)
{
bus_space_read_multi_1(
rman_get_bustag(sc->io_res),
rman_get_bushandle(sc->io_res),
sc->io_off + DAT, to, len);
}
static __inline void
sbni_outsb(struct sbni_softc *sc, u_char *from, u_int len)
{
bus_space_write_multi_1(
rman_get_bustag(sc->io_res),
rman_get_bushandle(sc->io_res),
sc->io_off + DAT, from, len);
}
/*
Valid combinations in CSR0 (for probing):
VALID_DECODER 0000,0011,1011,1010
; 0 ; -
TR_REQ ; 1 ; +
TR_RDY ; 2 ; -
TR_RDY TR_REQ ; 3 ; +
BU_EMP ; 4 ; +
BU_EMP TR_REQ ; 5 ; +
BU_EMP TR_RDY ; 6 ; -
BU_EMP TR_RDY TR_REQ ; 7 ; +
RC_RDY ; 8 ; +
RC_RDY TR_REQ ; 9 ; +
RC_RDY TR_RDY ; 10 ; -
RC_RDY TR_RDY TR_REQ ; 11 ; -
RC_RDY BU_EMP ; 12 ; -
RC_RDY BU_EMP TR_REQ ; 13 ; -
RC_RDY BU_EMP TR_RDY ; 14 ; -
RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
*/
#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
int
sbni_probe(struct sbni_softc *sc)
{
u_char csr0;
csr0 = sbni_inb(sc, CSR0);
if (csr0 != 0xff && csr0 != 0x00) {
csr0 &= ~EN_INT;
if (csr0 & BU_EMP)
csr0 |= EN_INT;
if (VALID_DECODER & (1 << (csr0 >> 4)))
return (0);
}
return (ENXIO);
}
/*
* Install interface into kernel networking data structures
*/
-int
+void
sbni_attach(struct sbni_softc *sc, int unit, struct sbni_flags flags)
{
if_t ifp;
u_char csr0;
uint64_t baudrate;
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOMEM);
sbni_outb(sc, CSR0, 0);
set_initial_values(sc, flags);
/* Initialize ifnet structure */
if_setsoftc(ifp, sc);
if_initname(ifp, "sbni", unit);
if_setinitfn(ifp, sbni_init);
if_setstartfn(ifp, sbni_start);
if_setioctlfn(ifp, sbni_ioctl);
if_setsendqlen(ifp, ifqmaxlen);
/* report real baud rate */
csr0 = sbni_inb(sc, CSR0);
baudrate = (csr0 & 0x01 ? 500000 : 2000000) / (1 << flags.rate);
if_setbaudrate(ifp, baudrate);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
mtx_init(&sc->lock, if_name(ifp), MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->wch, &sc->lock, 0);
ether_ifattach(ifp, sc->enaddr);
/* device attach does transition from UNCONFIGURED to IDLE state */
if_printf(ifp, "speed %ju, rxl ", (uintmax_t)baudrate);
if (sc->delta_rxl)
printf("auto\n");
else
printf("%d (fixed)\n", sc->cur_rxl_index);
- return (0);
}
void
sbni_detach(struct sbni_softc *sc)
{
SBNI_LOCK(sc);
sbni_stop(sc);
SBNI_UNLOCK(sc);
callout_drain(&sc->wch);
ether_ifdetach(sc->ifp);
if (sc->irq_handle)
bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handle);
mtx_destroy(&sc->lock);
if_free(sc->ifp);
}
void
sbni_release_resources(struct sbni_softc *sc)
{
if (sc->irq_res)
bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
sc->irq_res);
if (sc->io_res && sc->io_off == 0)
bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io_rid,
sc->io_res);
}
/* -------------------------------------------------------------------------- */
static void
sbni_init(void *xsc)
{
struct sbni_softc *sc;
sc = (struct sbni_softc *)xsc;
SBNI_LOCK(sc);
sbni_init_locked(sc);
SBNI_UNLOCK(sc);
}
static void
sbni_init_locked(struct sbni_softc *sc)
{
if_t ifp;
ifp = sc->ifp;
/*
* kludge to avoid multiple initialization when more than once
* protocols configured
*/
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
card_start(sc);
callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
/* attempt to start output */
sbni_start_locked(ifp);
}
static void
sbni_start(if_t ifp)
{
struct sbni_softc *sc = if_getsoftc(ifp);
SBNI_LOCK(sc);
sbni_start_locked(ifp);
SBNI_UNLOCK(sc);
}
static void
sbni_start_locked(if_t ifp)
{
struct sbni_softc *sc = if_getsoftc(ifp);
if (sc->tx_frameno == 0)
prepare_to_send(sc);
}
static void
sbni_stop(struct sbni_softc *sc)
{
sbni_outb(sc, CSR0, 0);
drop_xmit_queue(sc);
if (sc->rx_buf_p) {
m_freem(sc->rx_buf_p);
sc->rx_buf_p = NULL;
}
callout_stop(&sc->wch);
if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
/* -------------------------------------------------------------------------- */
/* interrupt handler */
/*
* SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
* be looked as two independent single-channel devices. Every channel seems
* as Ethernet interface but interrupt handler must be common. Really, first
* channel ("master") driver only registers the handler. In it's struct softc
* it has got pointer to "slave" channel's struct softc and handles that's
* interrupts too.
* softc of successfully attached ISA SBNI boards is linked to list.
* While next board driver is initialized, it scans this list. If one
* has found softc with same irq and ioaddr different by 4 then it assumes
* this board to be "master".
*/
void
sbni_intr(void *arg)
{
struct sbni_softc *sc;
int repeat;
sc = (struct sbni_softc *)arg;
do {
repeat = 0;
SBNI_LOCK(sc);
if (sbni_inb(sc, CSR0) & (RC_RDY | TR_RDY)) {
handle_channel(sc);
repeat = 1;
}
SBNI_UNLOCK(sc);
if (sc->slave_sc) {
/* second channel present */
SBNI_LOCK(sc->slave_sc);
if (sbni_inb(sc->slave_sc, CSR0) & (RC_RDY | TR_RDY)) {
handle_channel(sc->slave_sc);
repeat = 1;
}
SBNI_UNLOCK(sc->slave_sc);
}
} while (repeat);
}
static void
handle_channel(struct sbni_softc *sc)
{
int req_ans;
u_char csr0;
sbni_outb(sc, CSR0, (sbni_inb(sc, CSR0) & ~EN_INT) | TR_REQ);
sc->timer_ticks = CHANGE_LEVEL_START_TICKS;
for (;;) {
csr0 = sbni_inb(sc, CSR0);
if ((csr0 & (RC_RDY | TR_RDY)) == 0)
break;
req_ans = !(sc->state & FL_PREV_OK);
if (csr0 & RC_RDY)
req_ans = recv_frame(sc);
/*
* TR_RDY always equals 1 here because we have owned the marker,
* and we set TR_REQ when disabled interrupts
*/
csr0 = sbni_inb(sc, CSR0);
if ((csr0 & TR_RDY) == 0 || (csr0 & RC_RDY) != 0)
if_printf(sc->ifp, "internal error!\n");
/* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
if (req_ans || sc->tx_frameno != 0)
send_frame(sc);
else {
/* send the marker without any data */
sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) & ~TR_REQ);
}
}
sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | EN_INT);
}
/*
* Routine returns 1 if it need to acknoweledge received frame.
* Empty frame received without errors won't be acknoweledged.
*/
static int
recv_frame(struct sbni_softc *sc)
{
u_int32_t crc;
u_int framelen, frameno, ack;
u_int is_first, frame_ok;
crc = CRC32_INITIAL;
if (check_fhdr(sc, &framelen, &frameno, &ack, &is_first, &crc)) {
frame_ok = framelen > 4 ?
upload_data(sc, framelen, frameno, is_first, crc) :
skip_tail(sc, framelen, crc);
if (frame_ok)
interpret_ack(sc, ack);
} else {
framelen = 0;
frame_ok = 0;
}
sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) ^ CT_ZER);
if (frame_ok) {
sc->state |= FL_PREV_OK;
if (framelen > 4)
sc->in_stats.all_rx_number++;
} else {
sc->state &= ~FL_PREV_OK;
change_level(sc);
sc->in_stats.all_rx_number++;
sc->in_stats.bad_rx_number++;
}
return (!frame_ok || framelen > 4);
}
static void
send_frame(struct sbni_softc *sc)
{
u_int32_t crc;
u_char csr0;
crc = CRC32_INITIAL;
if (sc->state & FL_NEED_RESEND) {
/* if frame was sended but not ACK'ed - resend it */
if (sc->trans_errors) {
sc->trans_errors--;
if (sc->framelen != 0)
sc->in_stats.resend_tx_number++;
} else {
/* cannot xmit with many attempts */
drop_xmit_queue(sc);
goto do_send;
}
} else
sc->trans_errors = TR_ERROR_COUNT;
send_frame_header(sc, &crc);
sc->state |= FL_NEED_RESEND;
/*
* FL_NEED_RESEND will be cleared after ACK, but if empty
* frame sended then in prepare_to_send next frame
*/
if (sc->framelen) {
download_data(sc, &crc);
sc->in_stats.all_tx_number++;
sc->state |= FL_WAIT_ACK;
}
sbni_outsb(sc, (u_char *)&crc, sizeof crc);
do_send:
csr0 = sbni_inb(sc, CSR0);
sbni_outb(sc, CSR0, csr0 & ~TR_REQ);
if (sc->tx_frameno) {
/* next frame exists - request to send */
sbni_outb(sc, CSR0, csr0 | TR_REQ);
}
}
static void
download_data(struct sbni_softc *sc, u_int32_t *crc_p)
{
struct mbuf *m;
caddr_t data_p;
u_int data_len, pos, slice;
data_p = NULL; /* initialized to avoid warn */
pos = 0;
for (m = sc->tx_buf_p; m != NULL && pos < sc->pktlen; m = m->m_next) {
if (pos + m->m_len > sc->outpos) {
data_len = m->m_len - (sc->outpos - pos);
data_p = mtod(m, caddr_t) + (sc->outpos - pos);
goto do_copy;
} else
pos += m->m_len;
}
data_len = 0;
do_copy:
pos = 0;
do {
if (data_len) {
slice = min(data_len, sc->framelen - pos);
sbni_outsb(sc, data_p, slice);
*crc_p = calc_crc32(*crc_p, data_p, slice);
pos += slice;
if (data_len -= slice)
data_p += slice;
else {
do {
m = m->m_next;
} while (m != NULL && m->m_len == 0);
if (m) {
data_len = m->m_len;
data_p = mtod(m, caddr_t);
}
}
} else {
/* frame too short - zero padding */
pos = sc->framelen - pos;
while (pos--) {
sbni_outb(sc, DAT, 0);
*crc_p = CRC32(0, *crc_p);
}
return;
}
} while (pos < sc->framelen);
}
static int
upload_data(struct sbni_softc *sc, u_int framelen, u_int frameno,
u_int is_first, u_int32_t crc)
{
int frame_ok;
if (is_first) {
sc->wait_frameno = frameno;
sc->inppos = 0;
}
if (sc->wait_frameno == frameno) {
if (sc->inppos + framelen <= ETHER_MAX_LEN) {
frame_ok = append_frame_to_pkt(sc, framelen, crc);
/*
* if CRC is right but framelen incorrect then transmitter
* error was occurred... drop entire packet
*/
} else if ((frame_ok = skip_tail(sc, framelen, crc)) != 0) {
sc->wait_frameno = 0;
sc->inppos = 0;
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
/* now skip all frames until is_first != 0 */
}
} else
frame_ok = skip_tail(sc, framelen, crc);
if (is_first && !frame_ok) {
/*
* Frame has been violated, but we have stored
* is_first already... Drop entire packet.
*/
sc->wait_frameno = 0;
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
}
return (frame_ok);
}
static __inline void send_complete(struct sbni_softc *);
static __inline void
send_complete(struct sbni_softc *sc)
{
m_freem(sc->tx_buf_p);
sc->tx_buf_p = NULL;
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
}
static void
interpret_ack(struct sbni_softc *sc, u_int ack)
{
if (ack == FRAME_SENT_OK) {
sc->state &= ~FL_NEED_RESEND;
if (sc->state & FL_WAIT_ACK) {
sc->outpos += sc->framelen;
if (--sc->tx_frameno) {
sc->framelen = min(
sc->maxframe, sc->pktlen - sc->outpos);
} else {
send_complete(sc);
prepare_to_send(sc);
}
}
}
sc->state &= ~FL_WAIT_ACK;
}
/*
* Glue received frame with previous fragments of packet.
* Indicate packet when last frame would be accepted.
*/
static int
append_frame_to_pkt(struct sbni_softc *sc, u_int framelen, u_int32_t crc)
{
caddr_t p;
if (sc->inppos + framelen > ETHER_MAX_LEN)
return (0);
if (!sc->rx_buf_p && !get_rx_buf(sc))
return (0);
p = sc->rx_buf_p->m_data + sc->inppos;
sbni_insb(sc, p, framelen);
if (calc_crc32(crc, p, framelen) != CRC32_REMAINDER)
return (0);
sc->inppos += framelen - 4;
if (--sc->wait_frameno == 0) { /* last frame received */
indicate_pkt(sc);
if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
}
return (1);
}
/*
* Prepare to start output on adapter. Current priority must be set to splimp
* before this routine is called.
* Transmitter will be actually activated when marker has been accepted.
*/
static void
prepare_to_send(struct sbni_softc *sc)
{
struct mbuf *m;
u_int len;
/* sc->tx_buf_p == NULL here! */
if (sc->tx_buf_p)
printf("sbni: memory leak!\n");
sc->outpos = 0;
sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
for (;;) {
sc->tx_buf_p = if_dequeue(sc->ifp);
if (!sc->tx_buf_p) {
/* nothing to transmit... */
sc->pktlen = 0;
sc->tx_frameno = 0;
sc->framelen = 0;
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
return;
}
for (len = 0, m = sc->tx_buf_p; m; m = m->m_next)
len += m->m_len;
if (len != 0)
break;
m_freem(sc->tx_buf_p);
}
if (len < SBNI_MIN_LEN)
len = SBNI_MIN_LEN;
sc->pktlen = len;
sc->tx_frameno = howmany(len, sc->maxframe);
sc->framelen = min(len, sc->maxframe);
sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | TR_REQ);
if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
BPF_MTAP(sc->ifp, sc->tx_buf_p);
}
static void
drop_xmit_queue(struct sbni_softc *sc)
{
struct mbuf *m;
if (sc->tx_buf_p) {
m_freem(sc->tx_buf_p);
sc->tx_buf_p = NULL;
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
}
for (;;) {
m = if_dequeue(sc->ifp);
if (m == NULL)
break;
m_freem(m);
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
}
sc->tx_frameno = 0;
sc->framelen = 0;
sc->outpos = 0;
sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
}
static void
send_frame_header(struct sbni_softc *sc, u_int32_t *crc_p)
{
u_int32_t crc;
u_int len_field;
u_char value;
crc = *crc_p;
len_field = sc->framelen + 6; /* CRC + frameno + reserved */
if (sc->state & FL_NEED_RESEND)
len_field |= FRAME_RETRY; /* non-first attempt... */
if (sc->outpos == 0)
len_field |= FRAME_FIRST;
len_field |= (sc->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
sbni_outb(sc, DAT, SBNI_SIG);
value = (u_char)len_field;
sbni_outb(sc, DAT, value);
crc = CRC32(value, crc);
value = (u_char)(len_field >> 8);
sbni_outb(sc, DAT, value);
crc = CRC32(value, crc);
sbni_outb(sc, DAT, sc->tx_frameno);
crc = CRC32(sc->tx_frameno, crc);
sbni_outb(sc, DAT, 0);
crc = CRC32(0, crc);
*crc_p = crc;
}
/*
* if frame tail not needed (incorrect number or received twice),
* it won't store, but CRC will be calculated
*/
static int
skip_tail(struct sbni_softc *sc, u_int tail_len, u_int32_t crc)
{
while (tail_len--)
crc = CRC32(sbni_inb(sc, DAT), crc);
return (crc == CRC32_REMAINDER);
}
static int
check_fhdr(struct sbni_softc *sc, u_int *framelen, u_int *frameno,
u_int *ack, u_int *is_first, u_int32_t *crc_p)
{
u_int32_t crc;
u_char value;
crc = *crc_p;
if (sbni_inb(sc, DAT) != SBNI_SIG)
return (0);
value = sbni_inb(sc, DAT);
*framelen = (u_int)value;
crc = CRC32(value, crc);
value = sbni_inb(sc, DAT);
*framelen |= ((u_int)value) << 8;
crc = CRC32(value, crc);
*ack = *framelen & FRAME_ACK_MASK;
*is_first = (*framelen & FRAME_FIRST) != 0;
if ((*framelen &= FRAME_LEN_MASK) < 6 || *framelen > SBNI_MAX_FRAME - 3)
return (0);
value = sbni_inb(sc, DAT);
*frameno = (u_int)value;
crc = CRC32(value, crc);
crc = CRC32(sbni_inb(sc, DAT), crc); /* reserved byte */
*framelen -= 2;
*crc_p = crc;
return (1);
}
static int
get_rx_buf(struct sbni_softc *sc)
{
struct mbuf *m;
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
if_printf(sc->ifp, "cannot allocate header mbuf\n");
return (0);
}
/*
* We always put the received packet in a single buffer -
* either with just an mbuf header or in a cluster attached
* to the header. The +2 is to compensate for the alignment
* fixup below.
*/
if (ETHER_MAX_LEN + 2 > MHLEN) {
/* Attach an mbuf cluster */
if (!(MCLGET(m, M_NOWAIT))) {
m_freem(m);
return (0);
}
}
m->m_pkthdr.len = m->m_len = ETHER_MAX_LEN + 2;
/*
* The +2 is to longword align the start of the real packet.
* (sizeof ether_header == 14)
* This is important for NFS.
*/
m_adj(m, 2);
sc->rx_buf_p = m;
return (1);
}
static void
indicate_pkt(struct sbni_softc *sc)
{
if_t ifp = sc->ifp;
struct mbuf *m;
m = sc->rx_buf_p;
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = sc->inppos;
sc->rx_buf_p = NULL;
SBNI_UNLOCK(sc);
if_input(ifp, m);
SBNI_LOCK(sc);
}
/* -------------------------------------------------------------------------- */
/*
* Routine checks periodically wire activity and regenerates marker if
* connect was inactive for a long time.
*/
static void
sbni_timeout(void *xsc)
{
struct sbni_softc *sc;
u_char csr0;
sc = (struct sbni_softc *)xsc;
SBNI_ASSERT_LOCKED(sc);
csr0 = sbni_inb(sc, CSR0);
if (csr0 & RC_CHK) {
if (sc->timer_ticks) {
if (csr0 & (RC_RDY | BU_EMP))
/* receiving not active */
sc->timer_ticks--;
} else {
sc->in_stats.timeout_number++;
if (sc->delta_rxl)
timeout_change_level(sc);
sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES);
csr0 = sbni_inb(sc, CSR0);
}
}
sbni_outb(sc, CSR0, csr0 | RC_CHK);
callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc);
}
/* -------------------------------------------------------------------------- */
static void
card_start(struct sbni_softc *sc)
{
sc->timer_ticks = CHANGE_LEVEL_START_TICKS;
sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
sc->state |= FL_PREV_OK;
sc->inppos = 0;
sc->wait_frameno = 0;
sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES);
sbni_outb(sc, CSR0, EN_INT);
}
/* -------------------------------------------------------------------------- */
static u_char rxl_tab[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
};
#define SIZE_OF_TIMEOUT_RXL_TAB 4
static u_char timeout_rxl_tab[] = {
0x03, 0x05, 0x08, 0x0b
};
static void
set_initial_values(struct sbni_softc *sc, struct sbni_flags flags)
{
if (flags.fixed_rxl) {
sc->delta_rxl = 0; /* disable receive level autodetection */
sc->cur_rxl_index = flags.rxl;
} else {
sc->delta_rxl = DEF_RXL_DELTA;
sc->cur_rxl_index = DEF_RXL;
}
sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE;
sc->csr1.rxl = rxl_tab[sc->cur_rxl_index];
sc->maxframe = DEFAULT_FRAME_LEN;
/*
* generate Ethernet address (0x00ff01xxxxxx)
*/
*(u_int16_t *) sc->enaddr = htons(0x00ff);
if (flags.mac_addr) {
*(u_int32_t *) (sc->enaddr + 2) =
htonl(flags.mac_addr | 0x01000000);
} else {
*(u_char *) (sc->enaddr + 2) = 0x01;
read_random(sc->enaddr + 3, 3);
}
}
#ifdef SBNI_DUAL_COMPOUND
void
sbni_add(struct sbni_softc *sc)
{
mtx_lock(&headlist_lock);
sc->link = sbni_headlist;
sbni_headlist = sc;
mtx_unlock(&headlist_lock);
}
struct sbni_softc *
connect_to_master(struct sbni_softc *sc)
{
struct sbni_softc *p, *p_prev;
mtx_lock(&headlist_lock);
for (p = sbni_headlist, p_prev = NULL; p; p_prev = p, p = p->link) {
if (rman_get_start(p->io_res) == rman_get_start(sc->io_res) + 4 ||
rman_get_start(p->io_res) == rman_get_start(sc->io_res) - 4) {
p->slave_sc = sc;
if (p_prev)
p_prev->link = p->link;
else
sbni_headlist = p->link;
mtx_unlock(&headlist_lock);
return p;
}
}
mtx_unlock(&headlist_lock);
return (NULL);
}
#endif /* SBNI_DUAL_COMPOUND */
/* Receive level auto-selection */
static void
change_level(struct sbni_softc *sc)
{
if (sc->delta_rxl == 0) /* do not auto-negotiate RxL */
return;
if (sc->cur_rxl_index == 0)
sc->delta_rxl = 1;
else if (sc->cur_rxl_index == 15)
sc->delta_rxl = -1;
else if (sc->cur_rxl_rcvd < sc->prev_rxl_rcvd)
sc->delta_rxl = -sc->delta_rxl;
sc->csr1.rxl = rxl_tab[sc->cur_rxl_index += sc->delta_rxl];
sbni_inb(sc, CSR0); /* it needed for PCI cards */
sbni_outb(sc, CSR1, *(u_char *)&sc->csr1);
sc->prev_rxl_rcvd = sc->cur_rxl_rcvd;
sc->cur_rxl_rcvd = 0;
}
static void
timeout_change_level(struct sbni_softc *sc)
{
sc->cur_rxl_index = timeout_rxl_tab[sc->timeout_rxl];
if (++sc->timeout_rxl >= 4)
sc->timeout_rxl = 0;
sc->csr1.rxl = rxl_tab[sc->cur_rxl_index];
sbni_inb(sc, CSR0);
sbni_outb(sc, CSR1, *(u_char *)&sc->csr1);
sc->prev_rxl_rcvd = sc->cur_rxl_rcvd;
sc->cur_rxl_rcvd = 0;
}
/* -------------------------------------------------------------------------- */
/*
* Process an ioctl request. This code needs some work - it looks
* pretty ugly.
*/
static int
sbni_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct sbni_softc *sc;
struct ifreq *ifr;
struct thread *td;
struct sbni_in_stats *in_stats;
struct sbni_flags flags;
int error;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
td = curthread;
error = 0;
switch (command) {
case SIOCSIFFLAGS:
/*
* If the interface is marked up and stopped, then start it.
* If it is marked down and running, then stop it.
*/
SBNI_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
sbni_init_locked(sc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
sbni_stop(sc);
}
}
SBNI_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/*
* Multicast list has changed; set the hardware filter
* accordingly.
*/
error = 0;
/* if (ifr == NULL)
error = EAFNOSUPPORT; */
break;
/*
* SBNI specific ioctl
*/
case SIOCGHWFLAGS: /* get flags */
SBNI_LOCK(sc);
bcopy((caddr_t)if_getlladdr(sc->ifp)+3, (caddr_t) &flags, 3);
flags.rxl = sc->cur_rxl_index;
flags.rate = sc->csr1.rate;
flags.fixed_rxl = (sc->delta_rxl == 0);
flags.fixed_rate = 1;
SBNI_UNLOCK(sc);
bcopy(&flags, &ifr->ifr_ifru, sizeof(flags));
break;
case SIOCGINSTATS:
in_stats = malloc(sizeof(struct sbni_in_stats), M_DEVBUF,
M_WAITOK);
SBNI_LOCK(sc);
bcopy(&sc->in_stats, in_stats, sizeof(struct sbni_in_stats));
SBNI_UNLOCK(sc);
error = copyout(in_stats, ifr_data_get_ptr(ifr),
sizeof(struct sbni_in_stats));
free(in_stats, M_DEVBUF);
break;
case SIOCSHWFLAGS: /* set flags */
/* root only */
error = priv_check(td, PRIV_DRIVER);
if (error)
break;
bcopy(&ifr->ifr_ifru, &flags, sizeof(flags));
SBNI_LOCK(sc);
if (flags.fixed_rxl) {
sc->delta_rxl = 0;
sc->cur_rxl_index = flags.rxl;
} else {
sc->delta_rxl = DEF_RXL_DELTA;
sc->cur_rxl_index = DEF_RXL;
}
sc->csr1.rxl = rxl_tab[sc->cur_rxl_index];
sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE;
if (flags.mac_addr)
bcopy((caddr_t) &flags,
(caddr_t) if_getlladdr(sc->ifp)+3, 3);
/* Don't be afraid... */
sbni_outb(sc, CSR1, *(char*)(&sc->csr1) | PR_RES);
SBNI_UNLOCK(sc);
break;
case SIOCRINSTATS:
SBNI_LOCK(sc);
if (!(error = priv_check(td, PRIV_DRIVER))) /* root only */
bzero(&sc->in_stats, sizeof(struct sbni_in_stats));
SBNI_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
/* -------------------------------------------------------------------------- */
static u_int32_t
calc_crc32(u_int32_t crc, caddr_t p, u_int len)
{
while (len--)
crc = CRC32(*p++, crc);
return (crc);
}
static u_int32_t crc32tab[] __aligned(8) = {
0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
};
diff --git a/sys/dev/sbni/if_sbni_isa.c b/sys/dev/sbni/if_sbni_isa.c
index 6f8c91a0b3bc..113ff3d954e4 100644
--- a/sys/dev/sbni/if_sbni_isa.c
+++ b/sys/dev/sbni/if_sbni_isa.c
@@ -1,163 +1,158 @@
/*-
* Copyright (c) 1997-2001 Granch, Ltd. All rights reserved.
* Author: Denis I.Timofeev <timofeev@granch.ru>
*
* Redistributon and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <isa/isavar.h>
#include <dev/sbni/if_sbnireg.h>
#include <dev/sbni/if_sbnivar.h>
static int sbni_probe_isa(device_t);
static int sbni_attach_isa(device_t);
static device_method_t sbni_isa_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sbni_probe_isa),
DEVMETHOD(device_attach, sbni_attach_isa),
{ 0, 0 }
};
static driver_t sbni_isa_driver = {
"sbni",
sbni_isa_methods,
sizeof(struct sbni_softc)
};
static struct isa_pnp_id sbni_ids[] = {
{ 0, NULL } /* we have no pnp sbni cards atm. */
};
static int
sbni_probe_isa(device_t dev)
{
struct sbni_softc *sc;
int error;
error = ISA_PNP_PROBE(device_get_parent(dev), dev, sbni_ids);
if (error && error != ENOENT)
return (error);
sc = device_get_softc(dev);
sc->io_res = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT,
&sc->io_rid, SBNI_PORTS,
RF_ACTIVE);
if (!sc->io_res) {
printf("sbni: cannot allocate io ports!\n");
return (ENOENT);
}
if (sbni_probe(sc) != 0) {
sbni_release_resources(sc);
return (ENXIO);
}
device_set_desc(dev, "Granch SBNI12/ISA adapter");
return (0);
}
static int
sbni_attach_isa(device_t dev)
{
struct sbni_softc *sc;
struct sbni_flags flags;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
sc->irq_res = bus_alloc_resource_any(
dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE);
#ifndef SBNI_DUAL_COMPOUND
if (sc->irq_res == NULL) {
device_printf(dev, "irq conflict!\n");
sbni_release_resources(sc);
return (ENOENT);
}
#else /* SBNI_DUAL_COMPOUND */
if (sc->irq_res) {
sbni_add(sc);
} else {
struct sbni_softc *master;
if ((master = connect_to_master(sc)) == NULL) {
device_printf(dev, "failed to alloc irq\n");
sbni_release_resources(sc);
return (ENXIO);
} else {
device_printf(dev, "shared irq with %s\n",
if_name(master->ifp));
}
}
#endif /* SBNI_DUAL_COMPOUND */
*(u_int32_t*)&flags = device_get_flags(dev);
- error = sbni_attach(sc, device_get_unit(dev) * 2, flags);
- if (error) {
- device_printf(dev, "cannot initialize driver\n");
- sbni_release_resources(sc);
- return (error);
- }
+ sbni_attach(sc, device_get_unit(dev) * 2, flags);
if (sc->irq_res) {
error = bus_setup_intr(
dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
NULL, sbni_intr, sc, &sc->irq_handle);
if (error) {
device_printf(dev, "bus_setup_intr\n");
sbni_detach(sc);
sbni_release_resources(sc);
return (error);
}
}
return (0);
}
DRIVER_MODULE(sbni, isa, sbni_isa_driver, 0, 0);
MODULE_DEPEND(sbni, isa, 1, 1, 1);
ISA_PNP_INFO(sbni_ids);
diff --git a/sys/dev/sbni/if_sbni_pci.c b/sys/dev/sbni/if_sbni_pci.c
index 60c70ec492f9..b1b0614613eb 100644
--- a/sys/dev/sbni/if_sbni_pci.c
+++ b/sys/dev/sbni/if_sbni_pci.c
@@ -1,183 +1,172 @@
/*-
* Copyright (c) 1997-2001 Granch, Ltd. All rights reserved.
* Author: Denis I.Timofeev <timofeev@granch.ru>
*
* Redistributon and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <sys/malloc.h>
#include <net/if.h>
#include <net/ethernet.h>
#include <net/if_arp.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/sbni/if_sbnireg.h>
#include <dev/sbni/if_sbnivar.h>
static int sbni_pci_probe(device_t);
static int sbni_pci_attach(device_t);
static int sbni_pci_detach(device_t);
static device_method_t sbni_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sbni_pci_probe),
DEVMETHOD(device_attach, sbni_pci_attach),
DEVMETHOD(device_detach, sbni_pci_detach),
{ 0, 0 }
};
static driver_t sbni_pci_driver = {
"sbni",
sbni_pci_methods,
sizeof(struct sbni_softc)
};
DRIVER_MODULE(sbni, pci, sbni_pci_driver, 0, 0);
MODULE_DEPEND(sbni, pci, 1, 1, 1);
static int
sbni_pci_probe(device_t dev)
{
struct sbni_softc *sc;
if (pci_get_vendor(dev) != SBNI_PCI_VENDOR ||
pci_get_device(dev) != SBNI_PCI_DEVICE)
return (ENXIO);
sc = device_get_softc(dev);
if (pci_get_subdevice(dev) == 2) {
sc->slave_sc = malloc(sizeof(struct sbni_softc),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!sc->slave_sc)
return (ENOMEM);
device_set_desc(dev, "Granch SBNI12/PCI Dual adapter");
} else
device_set_desc(dev, "Granch SBNI12/PCI adapter");
sc->io_rid = PCIR_BAR(0);
sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
&sc->io_rid, RF_ACTIVE);
if (!sc->io_res) {
device_printf(dev, "cannot allocate io ports!\n");
if (sc->slave_sc)
free(sc->slave_sc, M_DEVBUF);
return (ENOENT);
}
if (sc->slave_sc) {
sc->slave_sc->io_res = sc->io_res;
sc->slave_sc->io_off = 4;
}
if (sbni_probe(sc) != 0) {
sbni_release_resources(sc);
if (sc->slave_sc)
free(sc->slave_sc, M_DEVBUF);
return (ENXIO);
}
return (0);
}
static int
sbni_pci_attach(device_t dev)
{
struct sbni_softc *sc;
struct sbni_flags flags;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
RF_SHAREABLE);
if (sc->irq_res == NULL) {
device_printf(dev, "cannot claim irq!\n");
error = ENOENT;
goto attach_failed;
}
memset(&flags, 0, sizeof(flags));
- error = sbni_attach(sc, device_get_unit(dev) * 2, flags);
- if (error) {
- device_printf(dev, "cannot initialize driver\n");
- goto attach_failed;
- }
- if (sc->slave_sc) {
- error = sbni_attach(sc->slave_sc, device_get_unit(dev) * 2 + 1,
- flags);
- if (error) {
- device_printf(dev, "cannot initialize slave\n");
- sbni_detach(sc);
- goto attach_failed;
- }
- }
+ sbni_attach(sc, device_get_unit(dev) * 2, flags);
+ if (sc->slave_sc)
+ sbni_attach(sc->slave_sc, device_get_unit(dev) * 2 + 1, flags);
if (sc->irq_res) {
error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET |
INTR_MPSAFE, NULL, sbni_intr, sc, &sc->irq_handle);
if (error) {
device_printf(dev, "bus_setup_intr\n");
sbni_detach(sc);
if (sc->slave_sc)
sbni_detach(sc);
goto attach_failed;
}
}
return (0);
attach_failed:
sbni_release_resources(sc);
if (sc->slave_sc)
free(sc->slave_sc, M_DEVBUF);
return (error);
}
static int
sbni_pci_detach(device_t dev)
{
struct sbni_softc *sc;
sc = device_get_softc(dev);
sbni_detach(sc);
if (sc->slave_sc)
sbni_detach(sc);
sbni_release_resources(sc);
if (sc->slave_sc)
free(sc->slave_sc, M_DEVBUF);
return (0);
}
diff --git a/sys/dev/sbni/if_sbnivar.h b/sys/dev/sbni/if_sbnivar.h
index 92678899a5d9..211d2bc56b0e 100644
--- a/sys/dev/sbni/if_sbnivar.h
+++ b/sys/dev/sbni/if_sbnivar.h
@@ -1,156 +1,156 @@
/*-
* Copyright (c) 1997-2001 Granch, Ltd. All rights reserved.
* Author: Denis I.Timofeev <timofeev@granch.ru>
*
* Redistributon and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* SBNI12 definitions
*/
/*
* CONFIGURATION PARAMETER:
*
* Uncomment this if you want to use model SBNI12D-11/ISA with same IRQ
* for both first and second channels.
*/
#define SBNI_DUAL_COMPOUND 1
#define SBNI_DEBUG 0
#if SBNI_DEBUG
#define DP(A) A
#else
#define DP(A)
#endif
struct sbni_in_stats {
u_int32_t all_rx_number;
u_int32_t bad_rx_number;
u_int32_t timeout_number;
u_int32_t all_tx_number;
u_int32_t resend_tx_number;
};
struct sbni_flags {
u_int mac_addr : 24;
u_int rxl : 4;
u_int rate : 2;
u_int fixed_rxl : 1;
u_int fixed_rate : 1;
};
#ifdef _KERNEL /* to avoid compile this decls with sbniconfig */
struct sbni_softc {
struct ifnet *ifp;
device_t dev;
u_char enaddr[6];
int io_rid;
struct resource *io_res;
int io_off;
int irq_rid;
struct resource *irq_res;
void *irq_handle;
struct mbuf *rx_buf_p; /* receive buffer ptr */
struct mbuf *tx_buf_p; /* transmit buffer ptr */
u_int pktlen; /* length of transmitting pkt */
u_int framelen; /* current frame length */
u_int maxframe; /* maximum valid frame length */
u_int state;
u_int inppos; /* positions in rx/tx buffers */
u_int outpos; /* positions in rx/tx buffers */
/* transmitting frame number - from frames qty to 1 */
u_int tx_frameno;
/* expected number of next receiving frame */
u_int wait_frameno;
/* count of failed attempts to frame send - 32 attempts do before
error - while receiver tunes on opposite side of wire */
u_int trans_errors;
/* idle time; send pong when limit exceeded */
u_int timer_ticks;
/* fields used for receive level autoselection */
int delta_rxl;
u_int cur_rxl_index;
u_int timeout_rxl;
u_int32_t cur_rxl_rcvd;
u_int32_t prev_rxl_rcvd;
struct sbni_csr1 csr1; /* current value of CSR1 */
struct sbni_in_stats in_stats; /* internal statistics */
struct callout wch;
struct mtx lock;
struct sbni_softc *slave_sc;
#ifdef SBNI_DUAL_COMPOUND
struct sbni_softc *link;
#endif
};
#define SBNI_LOCK(sc) mtx_lock(&(sc)->lock)
#define SBNI_UNLOCK(sc) mtx_unlock(&(sc)->lock)
#define SBNI_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED)
void sbni_intr(void *);
int sbni_probe(struct sbni_softc *);
-int sbni_attach(struct sbni_softc *, int, struct sbni_flags);
+void sbni_attach(struct sbni_softc *, int, struct sbni_flags);
void sbni_detach(struct sbni_softc *);
void sbni_release_resources(struct sbni_softc *);
extern u_int32_t next_sbni_unit;
#ifdef SBNI_DUAL_COMPOUND
void sbni_add(struct sbni_softc *);
struct sbni_softc *connect_to_master(struct sbni_softc *);
#endif
#endif /* _KERNEL */
/*
* SBNI socket ioctl params
*/
#define SIOCGHWFLAGS _IOWR('i', 62, struct ifreq) /* get flags */
#define SIOCSHWFLAGS _IOWR('i', 61, struct ifreq) /* set flags */
#define SIOCGINSTATS _IOWR('i', 60, struct ifreq) /* get internal stats */
#define SIOCRINSTATS _IOWR('i', 63, struct ifreq) /* reset internal stats */
/*
* CRC-32 stuff
*/
#define CRC32(c,crc) (crc32tab[((size_t)(crc) ^ (c)) & 0xff] ^ (((crc) >> 8) & 0x00ffffff))
/* CRC generator EDB88320 */
/* CRC remainder 2144DF1C */
/* CRC initial value 0 */
#define CRC32_REMAINDER 0x2144df1c
#define CRC32_INITIAL 0x00000000
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index 23294f90f517..5fb3a3e74c2e 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -1,1206 +1,1199 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2010-2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of the FreeBSD Project.
*/
#include <sys/cdefs.h>
#include "opt_rss.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/taskqueue.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/priv.h>
#include <sys/syslog.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/if_types.h>
#ifdef RSS
#include <net/rss_config.h>
#endif
#include "common/efx.h"
#include "sfxge.h"
#include "sfxge_rx.h"
#include "sfxge_ioc.h"
#include "sfxge_version.h"
#define SFXGE_CAP (IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM | \
IFCAP_RXCSUM | IFCAP_TXCSUM | \
IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6 | \
IFCAP_TSO4 | IFCAP_TSO6 | \
IFCAP_JUMBO_MTU | \
IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWSTATS)
#define SFXGE_CAP_ENABLE SFXGE_CAP
#define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | \
IFCAP_JUMBO_MTU | IFCAP_LINKSTATE | IFCAP_HWSTATS)
MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
SYSCTL_NODE(_hw, OID_AUTO, sfxge, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"SFXGE driver parameters");
#define SFXGE_PARAM_RX_RING SFXGE_PARAM(rx_ring)
static int sfxge_rx_ring_entries = SFXGE_NDESCS;
TUNABLE_INT(SFXGE_PARAM_RX_RING, &sfxge_rx_ring_entries);
SYSCTL_INT(_hw_sfxge, OID_AUTO, rx_ring, CTLFLAG_RDTUN,
&sfxge_rx_ring_entries, 0,
"Maximum number of descriptors in a receive ring");
#define SFXGE_PARAM_TX_RING SFXGE_PARAM(tx_ring)
static int sfxge_tx_ring_entries = SFXGE_NDESCS;
TUNABLE_INT(SFXGE_PARAM_TX_RING, &sfxge_tx_ring_entries);
SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
&sfxge_tx_ring_entries, 0,
"Maximum number of descriptors in a transmit ring");
#define SFXGE_PARAM_RESTART_ATTEMPTS SFXGE_PARAM(restart_attempts)
static int sfxge_restart_attempts = 3;
TUNABLE_INT(SFXGE_PARAM_RESTART_ATTEMPTS, &sfxge_restart_attempts);
SYSCTL_INT(_hw_sfxge, OID_AUTO, restart_attempts, CTLFLAG_RDTUN,
&sfxge_restart_attempts, 0,
"Maximum number of attempts to bring interface up after reset");
#if EFSYS_OPT_MCDI_LOGGING
#define SFXGE_PARAM_MCDI_LOGGING SFXGE_PARAM(mcdi_logging)
static int sfxge_mcdi_logging = 0;
TUNABLE_INT(SFXGE_PARAM_MCDI_LOGGING, &sfxge_mcdi_logging);
#endif
static void
sfxge_reset(void *arg, int npending);
static int
sfxge_estimate_rsrc_limits(struct sfxge_softc *sc)
{
efx_drv_limits_t limits;
int rc;
unsigned int evq_max;
uint32_t evq_allocated;
uint32_t rxq_allocated;
uint32_t txq_allocated;
/*
* Limit the number of event queues to:
* - number of CPUs
* - hardwire maximum RSS channels
* - administratively specified maximum RSS channels
*/
#ifdef RSS
/*
* Avoid extra limitations so that the number of queues
* may be configured at administrator's will
*/
evq_max = MIN(MAX(rss_getnumbuckets(), 1), EFX_MAXRSS);
#else
evq_max = MIN(mp_ncpus, EFX_MAXRSS);
#endif
if (sc->max_rss_channels > 0)
evq_max = MIN(evq_max, sc->max_rss_channels);
memset(&limits, 0, sizeof(limits));
limits.edl_min_evq_count = 1;
limits.edl_max_evq_count = evq_max;
limits.edl_min_txq_count = SFXGE_EVQ0_N_TXQ(sc);
limits.edl_max_txq_count = evq_max + SFXGE_EVQ0_N_TXQ(sc) - 1;
limits.edl_min_rxq_count = 1;
limits.edl_max_rxq_count = evq_max;
efx_nic_set_drv_limits(sc->enp, &limits);
if ((rc = efx_nic_init(sc->enp)) != 0)
return (rc);
rc = efx_nic_get_vi_pool(sc->enp, &evq_allocated, &rxq_allocated,
&txq_allocated);
if (rc != 0) {
efx_nic_fini(sc->enp);
return (rc);
}
KASSERT(txq_allocated >= SFXGE_EVQ0_N_TXQ(sc),
("txq_allocated < %u", SFXGE_EVQ0_N_TXQ(sc)));
sc->evq_max = MIN(evq_allocated, evq_max);
sc->evq_max = MIN(rxq_allocated, sc->evq_max);
sc->evq_max = MIN(txq_allocated - (SFXGE_EVQ0_N_TXQ(sc) - 1),
sc->evq_max);
KASSERT(sc->evq_max <= evq_max,
("allocated more than maximum requested"));
#ifdef RSS
if (sc->evq_max < rss_getnumbuckets())
device_printf(sc->dev, "The number of allocated queues (%u) "
"is less than the number of RSS buckets (%u); "
"performance degradation might be observed",
sc->evq_max, rss_getnumbuckets());
#endif
/*
* NIC is kept initialized in the case of success to be able to
* initialize port to find out media types.
*/
return (0);
}
static int
sfxge_set_drv_limits(struct sfxge_softc *sc)
{
efx_drv_limits_t limits;
memset(&limits, 0, sizeof(limits));
/* Limits are strict since take into account initial estimation */
limits.edl_min_evq_count = limits.edl_max_evq_count =
sc->intr.n_alloc;
limits.edl_min_txq_count = limits.edl_max_txq_count =
sc->intr.n_alloc + SFXGE_EVQ0_N_TXQ(sc) - 1;
limits.edl_min_rxq_count = limits.edl_max_rxq_count =
sc->intr.n_alloc;
return (efx_nic_set_drv_limits(sc->enp, &limits));
}
static int
sfxge_start(struct sfxge_softc *sc)
{
int rc;
SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
if (sc->init_state == SFXGE_STARTED)
return (0);
if (sc->init_state != SFXGE_REGISTERED) {
rc = EINVAL;
goto fail;
}
/* Set required resource limits */
if ((rc = sfxge_set_drv_limits(sc)) != 0)
goto fail;
if ((rc = efx_nic_init(sc->enp)) != 0)
goto fail;
/* Start processing interrupts. */
if ((rc = sfxge_intr_start(sc)) != 0)
goto fail2;
/* Start processing events. */
if ((rc = sfxge_ev_start(sc)) != 0)
goto fail3;
/* Fire up the port. */
if ((rc = sfxge_port_start(sc)) != 0)
goto fail4;
/* Start the receiver side. */
if ((rc = sfxge_rx_start(sc)) != 0)
goto fail5;
/* Start the transmitter side. */
if ((rc = sfxge_tx_start(sc)) != 0)
goto fail6;
sc->init_state = SFXGE_STARTED;
/* Tell the stack we're running. */
if_setdrvflagbits(sc->ifnet, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
return (0);
fail6:
sfxge_rx_stop(sc);
fail5:
sfxge_port_stop(sc);
fail4:
sfxge_ev_stop(sc);
fail3:
sfxge_intr_stop(sc);
fail2:
efx_nic_fini(sc->enp);
fail:
device_printf(sc->dev, "sfxge_start: %d\n", rc);
return (rc);
}
static void
sfxge_if_init(void *arg)
{
struct sfxge_softc *sc;
sc = (struct sfxge_softc *)arg;
SFXGE_ADAPTER_LOCK(sc);
(void)sfxge_start(sc);
SFXGE_ADAPTER_UNLOCK(sc);
}
static void
sfxge_stop(struct sfxge_softc *sc)
{
SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
if (sc->init_state != SFXGE_STARTED)
return;
sc->init_state = SFXGE_REGISTERED;
/* Stop the transmitter. */
sfxge_tx_stop(sc);
/* Stop the receiver. */
sfxge_rx_stop(sc);
/* Stop the port. */
sfxge_port_stop(sc);
/* Stop processing events. */
sfxge_ev_stop(sc);
/* Stop processing interrupts. */
sfxge_intr_stop(sc);
efx_nic_fini(sc->enp);
if_setdrvflagbits(sc->ifnet, 0, IFF_DRV_RUNNING);
}
static int
sfxge_vpd_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ioc)
{
efx_vpd_value_t value;
int rc = 0;
switch (ioc->u.vpd.op) {
case SFXGE_VPD_OP_GET_KEYWORD:
value.evv_tag = ioc->u.vpd.tag;
value.evv_keyword = ioc->u.vpd.keyword;
rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value);
if (rc != 0)
break;
ioc->u.vpd.len = MIN(ioc->u.vpd.len, value.evv_length);
if (ioc->u.vpd.payload != 0) {
rc = copyout(value.evv_value, ioc->u.vpd.payload,
ioc->u.vpd.len);
}
break;
case SFXGE_VPD_OP_SET_KEYWORD:
if (ioc->u.vpd.len > sizeof(value.evv_value))
return (EINVAL);
value.evv_tag = ioc->u.vpd.tag;
value.evv_keyword = ioc->u.vpd.keyword;
value.evv_length = ioc->u.vpd.len;
rc = copyin(ioc->u.vpd.payload, value.evv_value, value.evv_length);
if (rc != 0)
break;
rc = efx_vpd_set(sc->enp, sc->vpd_data, sc->vpd_size, &value);
if (rc != 0)
break;
rc = efx_vpd_verify(sc->enp, sc->vpd_data, sc->vpd_size);
if (rc != 0)
break;
rc = efx_vpd_write(sc->enp, sc->vpd_data, sc->vpd_size);
break;
default:
rc = EOPNOTSUPP;
break;
}
return (rc);
}
static int
sfxge_private_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ioc)
{
switch (ioc->op) {
case SFXGE_MCDI_IOC:
return (sfxge_mcdi_ioctl(sc, ioc));
case SFXGE_NVRAM_IOC:
return (sfxge_nvram_ioctl(sc, ioc));
case SFXGE_VPD_IOC:
return (sfxge_vpd_ioctl(sc, ioc));
default:
return (EOPNOTSUPP);
}
}
static int
sfxge_if_ioctl(if_t ifp, unsigned long command, caddr_t data)
{
struct sfxge_softc *sc;
struct ifreq *ifr;
sfxge_ioc_t ioc;
int error;
ifr = (struct ifreq *)data;
sc = if_getsoftc(ifp);
error = 0;
switch (command) {
case SIOCSIFFLAGS:
SFXGE_ADAPTER_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ sc->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
sfxge_mac_filter_set(sc);
}
} else
sfxge_start(sc);
} else
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
sfxge_stop(sc);
sc->if_flags = if_getflags(ifp);
SFXGE_ADAPTER_UNLOCK(sc);
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu == if_getmtu(ifp)) {
/* Nothing to do */
error = 0;
} else if (ifr->ifr_mtu > SFXGE_MAX_MTU) {
error = EINVAL;
} else if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
if_setmtu(ifp, ifr->ifr_mtu);
error = 0;
} else {
/* Restart required */
SFXGE_ADAPTER_LOCK(sc);
sfxge_stop(sc);
if_setmtu(ifp, ifr->ifr_mtu);
error = sfxge_start(sc);
SFXGE_ADAPTER_UNLOCK(sc);
if (error != 0) {
if_setflagbits(ifp, 0, IFF_UP);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
if_down(ifp);
}
}
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
sfxge_mac_filter_set(sc);
break;
case SIOCSIFCAP:
{
int reqcap = ifr->ifr_reqcap;
int capchg_mask;
SFXGE_ADAPTER_LOCK(sc);
/* Capabilities to be changed in accordance with request */
capchg_mask = if_getcapenable(ifp) ^ reqcap;
/*
* The networking core already rejects attempts to
* enable capabilities we don't have. We still have
* to reject attempts to disable capabilities that we
* can't (yet) disable.
*/
KASSERT((reqcap & ~if_getcapabilities(ifp)) == 0,
("Unsupported capabilities 0x%x requested 0x%x vs "
"supported 0x%x",
reqcap & ~if_getcapabilities(ifp),
reqcap , if_getcapabilities(ifp)));
if (capchg_mask & SFXGE_CAP_FIXED) {
error = EINVAL;
SFXGE_ADAPTER_UNLOCK(sc);
break;
}
/* Check request before any changes */
if ((capchg_mask & IFCAP_TSO4) &&
(reqcap & (IFCAP_TSO4 | IFCAP_TXCSUM)) == IFCAP_TSO4) {
error = EAGAIN;
SFXGE_ADAPTER_UNLOCK(sc);
if_printf(ifp, "enable txcsum before tso4\n");
break;
}
if ((capchg_mask & IFCAP_TSO6) &&
(reqcap & (IFCAP_TSO6 | IFCAP_TXCSUM_IPV6)) == IFCAP_TSO6) {
error = EAGAIN;
SFXGE_ADAPTER_UNLOCK(sc);
if_printf(ifp, "enable txcsum6 before tso6\n");
break;
}
if (reqcap & IFCAP_TXCSUM) {
if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP), 0);
} else {
if_sethwassistbits(ifp, 0, (CSUM_IP | CSUM_TCP | CSUM_UDP));
if (reqcap & IFCAP_TSO4) {
reqcap &= ~IFCAP_TSO4;
if_printf(ifp,
"tso4 disabled due to -txcsum\n");
}
}
if (reqcap & IFCAP_TXCSUM_IPV6) {
if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
} else {
if_sethwassistbits(ifp, 0, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
if (reqcap & IFCAP_TSO6) {
reqcap &= ~IFCAP_TSO6;
if_printf(ifp,
"tso6 disabled due to -txcsum6\n");
}
}
/*
* The kernel takes both IFCAP_TSOx and CSUM_TSO into
* account before using TSO. So, we do not touch
* checksum flags when IFCAP_TSOx is modified.
* Note that CSUM_TSO is (CSUM_IP_TSO|CSUM_IP6_TSO),
* but both bits are set in IPv4 and IPv6 mbufs.
*/
if_setcapenable(ifp, reqcap);
SFXGE_ADAPTER_UNLOCK(sc);
break;
}
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
#ifdef SIOCGI2C
case SIOCGI2C:
{
struct ifi2creq i2c;
error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (error != 0)
break;
if (i2c.len > sizeof(i2c.data)) {
error = EINVAL;
break;
}
SFXGE_ADAPTER_LOCK(sc);
error = efx_phy_module_get_info(sc->enp, i2c.dev_addr,
i2c.offset, i2c.len,
&i2c.data[0]);
SFXGE_ADAPTER_UNLOCK(sc);
if (error == 0)
error = copyout(&i2c, ifr_data_get_ptr(ifr),
sizeof(i2c));
break;
}
#endif
case SIOCGPRIVATE_0:
error = priv_check(curthread, PRIV_DRIVER);
if (error != 0)
break;
error = copyin(ifr_data_get_ptr(ifr), &ioc, sizeof(ioc));
if (error != 0)
return (error);
error = sfxge_private_ioctl(sc, &ioc);
if (error == 0) {
error = copyout(&ioc, ifr_data_get_ptr(ifr),
sizeof(ioc));
}
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
static void
sfxge_ifnet_fini(if_t ifp)
{
struct sfxge_softc *sc = if_getsoftc(ifp);
SFXGE_ADAPTER_LOCK(sc);
sfxge_stop(sc);
SFXGE_ADAPTER_UNLOCK(sc);
ifmedia_removeall(&sc->media);
ether_ifdetach(ifp);
if_free(ifp);
}
static int
sfxge_ifnet_init(if_t ifp, struct sfxge_softc *sc)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
device_t dev;
int rc;
dev = sc->dev;
sc->ifnet = ifp;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setinitfn(ifp, sfxge_if_init);
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, sfxge_if_ioctl);
if_setcapabilities(ifp, SFXGE_CAP);
if_setcapenable(ifp, SFXGE_CAP_ENABLE);
if_sethwtsomax(ifp, SFXGE_TSO_MAX_SIZE);
if_sethwtsomaxsegcount(ifp, SFXGE_TX_MAPPING_MAX_SEG);
if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
#ifdef SFXGE_LRO
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
if_setcapenablebit(ifp, IFCAP_LRO, 0);
#endif
if (encp->enc_hw_tx_insert_vlan_enabled) {
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_HWTAGGING, 0);
}
if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
ether_ifattach(ifp, encp->enc_mac_addr);
if_settransmitfn(ifp, sfxge_if_transmit);
if_setqflushfn(ifp, sfxge_if_qflush);
if_setgetcounterfn(ifp, sfxge_get_counter);
DBGPRINT(sc->dev, "ifmedia_init");
if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
goto fail;
return (0);
fail:
ether_ifdetach(sc->ifnet);
return (rc);
}
void
sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp)
{
KASSERT(sc->buffer_table_next + n <=
efx_nic_cfg_get(sc->enp)->enc_buftbl_limit,
("buffer table full"));
*idp = sc->buffer_table_next;
sc->buffer_table_next += n;
}
static int
sfxge_bar_init(struct sfxge_softc *sc)
{
efsys_bar_t *esbp = &sc->bar;
esbp->esb_rid = PCIR_BAR(sc->mem_bar);
if ((esbp->esb_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&esbp->esb_rid, RF_ACTIVE)) == NULL) {
device_printf(sc->dev, "Cannot allocate BAR region %d\n",
sc->mem_bar);
return (ENXIO);
}
esbp->esb_tag = rman_get_bustag(esbp->esb_res);
esbp->esb_handle = rman_get_bushandle(esbp->esb_res);
SFXGE_BAR_LOCK_INIT(esbp, device_get_nameunit(sc->dev));
return (0);
}
static void
sfxge_bar_fini(struct sfxge_softc *sc)
{
efsys_bar_t *esbp = &sc->bar;
bus_release_resource(sc->dev, SYS_RES_MEMORY, esbp->esb_rid,
esbp->esb_res);
SFXGE_BAR_LOCK_DESTROY(esbp);
}
static int
sfxge_create(struct sfxge_softc *sc)
{
device_t dev;
efx_nic_t *enp;
int error;
char rss_param_name[sizeof(SFXGE_PARAM(%d.max_rss_channels))];
#if EFSYS_OPT_MCDI_LOGGING
char mcdi_log_param_name[sizeof(SFXGE_PARAM(%d.mcdi_logging))];
#endif
dev = sc->dev;
SFXGE_ADAPTER_LOCK_INIT(sc, device_get_nameunit(sc->dev));
sc->max_rss_channels = 0;
snprintf(rss_param_name, sizeof(rss_param_name),
SFXGE_PARAM(%d.max_rss_channels),
(int)device_get_unit(dev));
TUNABLE_INT_FETCH(rss_param_name, &sc->max_rss_channels);
#if EFSYS_OPT_MCDI_LOGGING
sc->mcdi_logging = sfxge_mcdi_logging;
snprintf(mcdi_log_param_name, sizeof(mcdi_log_param_name),
SFXGE_PARAM(%d.mcdi_logging),
(int)device_get_unit(dev));
TUNABLE_INT_FETCH(mcdi_log_param_name, &sc->mcdi_logging);
#endif
sc->stats_node = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
if (sc->stats_node == NULL) {
error = ENOMEM;
goto fail;
}
TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc);
(void) pci_enable_busmaster(dev);
/* Initialize DMA mappings. */
DBGPRINT(sc->dev, "dma_init...");
if ((error = sfxge_dma_init(sc)) != 0)
goto fail;
error = efx_family(pci_get_vendor(dev), pci_get_device(dev),
&sc->family, &sc->mem_bar);
KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));
/* Map the device registers. */
DBGPRINT(sc->dev, "bar_init...");
if ((error = sfxge_bar_init(sc)) != 0)
goto fail;
DBGPRINT(sc->dev, "nic_create...");
/* Create the common code nic object. */
SFXGE_EFSYS_LOCK_INIT(&sc->enp_lock,
device_get_nameunit(sc->dev), "nic");
if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
&sc->bar, &sc->enp_lock, &enp)) != 0)
goto fail3;
sc->enp = enp;
/* Initialize MCDI to talk to the microcontroller. */
DBGPRINT(sc->dev, "mcdi_init...");
if ((error = sfxge_mcdi_init(sc)) != 0)
goto fail4;
/* Probe the NIC and build the configuration data area. */
DBGPRINT(sc->dev, "nic_probe...");
if ((error = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE)) != 0)
goto fail5;
if (!ISP2(sfxge_rx_ring_entries) ||
(sfxge_rx_ring_entries < EFX_RXQ_MINNDESCS) ||
(sfxge_rx_ring_entries > EFX_RXQ_MAXNDESCS)) {
log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
SFXGE_PARAM_RX_RING, sfxge_rx_ring_entries,
EFX_RXQ_MINNDESCS, EFX_RXQ_MAXNDESCS);
error = EINVAL;
goto fail_rx_ring_entries;
}
sc->rxq_entries = sfxge_rx_ring_entries;
if (efx_nic_cfg_get(enp)->enc_features & EFX_FEATURE_TXQ_CKSUM_OP_DESC)
sc->txq_dynamic_cksum_toggle_supported = B_TRUE;
else
sc->txq_dynamic_cksum_toggle_supported = B_FALSE;
if (!ISP2(sfxge_tx_ring_entries) ||
(sfxge_tx_ring_entries < EFX_TXQ_MINNDESCS) ||
(sfxge_tx_ring_entries > efx_nic_cfg_get(enp)->enc_txq_max_ndescs)) {
log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
SFXGE_PARAM_TX_RING, sfxge_tx_ring_entries,
EFX_TXQ_MINNDESCS, efx_nic_cfg_get(enp)->enc_txq_max_ndescs);
error = EINVAL;
goto fail_tx_ring_entries;
}
sc->txq_entries = sfxge_tx_ring_entries;
SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "version", CTLFLAG_RD,
SFXGE_VERSION_STRING, 0,
"Driver version");
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "phy_type", CTLFLAG_RD,
NULL, efx_nic_cfg_get(enp)->enc_phy_type,
"PHY type");
/* Initialize the NVRAM. */
DBGPRINT(sc->dev, "nvram_init...");
if ((error = efx_nvram_init(enp)) != 0)
goto fail6;
/* Initialize the VPD. */
DBGPRINT(sc->dev, "vpd_init...");
if ((error = efx_vpd_init(enp)) != 0)
goto fail7;
efx_mcdi_new_epoch(enp);
/* Reset the NIC. */
DBGPRINT(sc->dev, "nic_reset...");
if ((error = efx_nic_reset(enp)) != 0)
goto fail8;
/* Initialize buffer table allocation. */
sc->buffer_table_next = 0;
/*
* Guarantee minimum and estimate maximum number of event queues
* to take it into account when MSI-X interrupts are allocated.
* It initializes NIC and keeps it initialized on success.
*/
if ((error = sfxge_estimate_rsrc_limits(sc)) != 0)
goto fail8;
/* Set up interrupts. */
DBGPRINT(sc->dev, "intr_init...");
if ((error = sfxge_intr_init(sc)) != 0)
goto fail9;
/* Initialize event processing state. */
DBGPRINT(sc->dev, "ev_init...");
if ((error = sfxge_ev_init(sc)) != 0)
goto fail11;
/* Initialize port state. */
DBGPRINT(sc->dev, "port_init...");
if ((error = sfxge_port_init(sc)) != 0)
goto fail12;
/* Initialize receive state. */
DBGPRINT(sc->dev, "rx_init...");
if ((error = sfxge_rx_init(sc)) != 0)
goto fail13;
/* Initialize transmit state. */
DBGPRINT(sc->dev, "tx_init...");
if ((error = sfxge_tx_init(sc)) != 0)
goto fail14;
sc->init_state = SFXGE_INITIALIZED;
DBGPRINT(sc->dev, "success");
return (0);
fail14:
sfxge_rx_fini(sc);
fail13:
sfxge_port_fini(sc);
fail12:
sfxge_ev_fini(sc);
fail11:
sfxge_intr_fini(sc);
fail9:
efx_nic_fini(sc->enp);
fail8:
efx_vpd_fini(enp);
fail7:
efx_nvram_fini(enp);
fail6:
fail_tx_ring_entries:
fail_rx_ring_entries:
efx_nic_unprobe(enp);
fail5:
sfxge_mcdi_fini(sc);
fail4:
sc->enp = NULL;
efx_nic_destroy(enp);
SFXGE_EFSYS_LOCK_DESTROY(&sc->enp_lock);
fail3:
sfxge_bar_fini(sc);
(void) pci_disable_busmaster(sc->dev);
fail:
DBGPRINT(sc->dev, "failed %d", error);
sc->dev = NULL;
SFXGE_ADAPTER_LOCK_DESTROY(sc);
return (error);
}
static void
sfxge_destroy(struct sfxge_softc *sc)
{
efx_nic_t *enp;
/* Clean up transmit state. */
sfxge_tx_fini(sc);
/* Clean up receive state. */
sfxge_rx_fini(sc);
/* Clean up port state. */
sfxge_port_fini(sc);
/* Clean up event processing state. */
sfxge_ev_fini(sc);
/* Clean up interrupts. */
sfxge_intr_fini(sc);
/* Tear down common code subsystems. */
efx_nic_reset(sc->enp);
efx_vpd_fini(sc->enp);
efx_nvram_fini(sc->enp);
efx_nic_unprobe(sc->enp);
/* Tear down MCDI. */
sfxge_mcdi_fini(sc);
/* Destroy common code context. */
enp = sc->enp;
sc->enp = NULL;
efx_nic_destroy(enp);
/* Free DMA memory. */
sfxge_dma_fini(sc);
/* Free mapped BARs. */
sfxge_bar_fini(sc);
(void) pci_disable_busmaster(sc->dev);
taskqueue_drain(taskqueue_thread, &sc->task_reset);
/* Destroy the softc lock. */
SFXGE_ADAPTER_LOCK_DESTROY(sc);
}
static int
sfxge_vpd_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc = arg1;
efx_vpd_value_t value;
int rc;
value.evv_tag = arg2 >> 16;
value.evv_keyword = arg2 & 0xffff;
if ((rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value))
!= 0)
return (rc);
return (SYSCTL_OUT(req, value.evv_value, value.evv_length));
}
static void
sfxge_vpd_try_add(struct sfxge_softc *sc, struct sysctl_oid_list *list,
efx_vpd_tag_t tag, const char *keyword)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
efx_vpd_value_t value;
/* Check whether VPD tag/keyword is present */
value.evv_tag = tag;
value.evv_keyword = EFX_VPD_KEYWORD(keyword[0], keyword[1]);
if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) != 0)
return;
SYSCTL_ADD_PROC(ctx, list, OID_AUTO, keyword,
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
sc, tag << 16 | EFX_VPD_KEYWORD(keyword[0], keyword[1]),
sfxge_vpd_handler, "A", "");
}
static int
sfxge_vpd_init(struct sfxge_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
struct sysctl_oid *vpd_node;
struct sysctl_oid_list *vpd_list;
char keyword[3];
efx_vpd_value_t value;
int rc;
if ((rc = efx_vpd_size(sc->enp, &sc->vpd_size)) != 0) {
/*
* Unprivileged functions deny VPD access.
* Simply skip VPD in this case.
*/
if (rc == EACCES)
goto done;
goto fail;
}
sc->vpd_data = malloc(sc->vpd_size, M_SFXGE, M_WAITOK);
if ((rc = efx_vpd_read(sc->enp, sc->vpd_data, sc->vpd_size)) != 0)
goto fail2;
/* Copy ID (product name) into device description, and log it. */
value.evv_tag = EFX_VPD_ID;
if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) == 0) {
value.evv_value[value.evv_length] = 0;
device_set_desc_copy(sc->dev, value.evv_value);
device_printf(sc->dev, "%s\n", value.evv_value);
}
vpd_node = SYSCTL_ADD_NODE(ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "vpd",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Vital Product Data");
vpd_list = SYSCTL_CHILDREN(vpd_node);
/* Add sysctls for all expected and any vendor-defined keywords. */
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "PN");
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "EC");
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "SN");
keyword[0] = 'V';
keyword[2] = 0;
for (keyword[1] = '0'; keyword[1] <= '9'; keyword[1]++)
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
for (keyword[1] = 'A'; keyword[1] <= 'Z'; keyword[1]++)
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
done:
return (0);
fail2:
free(sc->vpd_data, M_SFXGE);
fail:
return (rc);
}
static void
sfxge_vpd_fini(struct sfxge_softc *sc)
{
free(sc->vpd_data, M_SFXGE);
}
static void
sfxge_reset(void *arg, int npending)
{
struct sfxge_softc *sc;
int rc;
unsigned attempt;
(void)npending;
sc = (struct sfxge_softc *)arg;
SFXGE_ADAPTER_LOCK(sc);
if (sc->init_state != SFXGE_STARTED)
goto done;
sfxge_stop(sc);
efx_nic_reset(sc->enp);
for (attempt = 0; attempt < sfxge_restart_attempts; ++attempt) {
if ((rc = sfxge_start(sc)) == 0)
goto done;
device_printf(sc->dev, "start on reset failed (%d)\n", rc);
DELAY(100000);
}
device_printf(sc->dev, "reset failed; interface is now stopped\n");
done:
SFXGE_ADAPTER_UNLOCK(sc);
}
void
sfxge_schedule_reset(struct sfxge_softc *sc)
{
taskqueue_enqueue(taskqueue_thread, &sc->task_reset);
}
static int
sfxge_attach(device_t dev)
{
struct sfxge_softc *sc;
if_t ifp;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
/* Allocate ifnet. */
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Couldn't allocate ifnet\n");
- error = ENOMEM;
- goto fail;
- }
sc->ifnet = ifp;
/* Initialize hardware. */
DBGPRINT(sc->dev, "create nic");
if ((error = sfxge_create(sc)) != 0)
goto fail2;
/* Create the ifnet for the port. */
DBGPRINT(sc->dev, "init ifnet");
if ((error = sfxge_ifnet_init(ifp, sc)) != 0)
goto fail3;
DBGPRINT(sc->dev, "init vpd");
if ((error = sfxge_vpd_init(sc)) != 0)
goto fail4;
/*
* NIC is initialized inside sfxge_create() and kept inialized
* to be able to initialize port to discover media types in
* sfxge_ifnet_init().
*/
efx_nic_fini(sc->enp);
sc->init_state = SFXGE_REGISTERED;
DBGPRINT(sc->dev, "success");
return (0);
fail4:
sfxge_ifnet_fini(ifp);
fail3:
efx_nic_fini(sc->enp);
sfxge_destroy(sc);
fail2:
if_free(sc->ifnet);
-
-fail:
DBGPRINT(sc->dev, "failed %d", error);
return (error);
}
static int
sfxge_detach(device_t dev)
{
struct sfxge_softc *sc;
sc = device_get_softc(dev);
sfxge_vpd_fini(sc);
/* Destroy the ifnet. */
sfxge_ifnet_fini(sc->ifnet);
/* Tear down hardware. */
sfxge_destroy(sc);
return (0);
}
static int
sfxge_probe(device_t dev)
{
uint16_t pci_vendor_id;
uint16_t pci_device_id;
efx_family_t family;
unsigned int mem_bar;
int rc;
pci_vendor_id = pci_get_vendor(dev);
pci_device_id = pci_get_device(dev);
DBGPRINT(dev, "PCI ID %04x:%04x", pci_vendor_id, pci_device_id);
rc = efx_family(pci_vendor_id, pci_device_id, &family, &mem_bar);
if (rc != 0) {
DBGPRINT(dev, "efx_family fail %d", rc);
return (ENXIO);
}
if (family == EFX_FAMILY_SIENA) {
device_set_desc(dev, "Solarflare SFC9000 family");
return (0);
}
if (family == EFX_FAMILY_HUNTINGTON) {
device_set_desc(dev, "Solarflare SFC9100 family");
return (0);
}
if (family == EFX_FAMILY_MEDFORD) {
device_set_desc(dev, "Solarflare SFC9200 family");
return (0);
}
if (family == EFX_FAMILY_MEDFORD2) {
device_set_desc(dev, "Solarflare SFC9250 family");
return (0);
}
DBGPRINT(dev, "impossible controller family %d", family);
return (ENXIO);
}
static device_method_t sfxge_methods[] = {
DEVMETHOD(device_probe, sfxge_probe),
DEVMETHOD(device_attach, sfxge_attach),
DEVMETHOD(device_detach, sfxge_detach),
DEVMETHOD_END
};
static driver_t sfxge_driver = {
"sfxge",
sfxge_methods,
sizeof(struct sfxge_softc)
};
DRIVER_MODULE(sfxge, pci, sfxge_driver, 0, 0);
diff --git a/sys/dev/sge/if_sge.c b/sys/dev/sge/if_sge.c
index 8146e7cf2e18..c6e8f4790190 100644
--- a/sys/dev/sge/if_sge.c
+++ b/sys/dev/sge/if_sge.c
@@ -1,1902 +1,1897 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com>
* Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net>
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR
* THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* SiS 190/191 PCI Ethernet NIC driver.
*
* Adapted to SiS 190 NIC by Alexander Pohoyda based on the original
* SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by
* Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu
* <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for
* review and very useful comments.
*
* Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the
* Linux and Solaris drivers.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/sge/if_sgereg.h>
MODULE_DEPEND(sge, pci, 1, 1, 1);
MODULE_DEPEND(sge, ether, 1, 1, 1);
MODULE_DEPEND(sge, miibus, 1, 1, 1);
/* "device miibus0" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/*
* Various supported device vendors/types and their names.
*/
static struct sge_type sge_devs[] = {
{ SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" },
{ SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" },
{ 0, 0, NULL }
};
static int sge_probe(device_t);
static int sge_attach(device_t);
static int sge_detach(device_t);
static int sge_shutdown(device_t);
static int sge_suspend(device_t);
static int sge_resume(device_t);
static int sge_miibus_readreg(device_t, int, int);
static int sge_miibus_writereg(device_t, int, int, int);
static void sge_miibus_statchg(device_t);
static int sge_newbuf(struct sge_softc *, int);
static int sge_encap(struct sge_softc *, struct mbuf **);
static __inline void
sge_discard_rxbuf(struct sge_softc *, int);
static void sge_rxeof(struct sge_softc *);
static void sge_txeof(struct sge_softc *);
static void sge_intr(void *);
static void sge_tick(void *);
static void sge_start(if_t);
static void sge_start_locked(if_t);
static int sge_ioctl(if_t, u_long, caddr_t);
static void sge_init(void *);
static void sge_init_locked(struct sge_softc *);
static void sge_stop(struct sge_softc *);
static void sge_watchdog(struct sge_softc *);
static int sge_ifmedia_upd(if_t);
static void sge_ifmedia_sts(if_t, struct ifmediareq *);
static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *);
static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *);
static uint16_t sge_read_eeprom(struct sge_softc *, int);
static void sge_rxfilter(struct sge_softc *);
static void sge_setvlan(struct sge_softc *);
static void sge_reset(struct sge_softc *);
static int sge_list_rx_init(struct sge_softc *);
static int sge_list_rx_free(struct sge_softc *);
static int sge_list_tx_init(struct sge_softc *);
static int sge_list_tx_free(struct sge_softc *);
static int sge_dma_alloc(struct sge_softc *);
static void sge_dma_free(struct sge_softc *);
static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static device_method_t sge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sge_probe),
DEVMETHOD(device_attach, sge_attach),
DEVMETHOD(device_detach, sge_detach),
DEVMETHOD(device_suspend, sge_suspend),
DEVMETHOD(device_resume, sge_resume),
DEVMETHOD(device_shutdown, sge_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, sge_miibus_readreg),
DEVMETHOD(miibus_writereg, sge_miibus_writereg),
DEVMETHOD(miibus_statchg, sge_miibus_statchg),
DEVMETHOD_END
};
static driver_t sge_driver = {
"sge", sge_methods, sizeof(struct sge_softc)
};
DRIVER_MODULE(sge, pci, sge_driver, 0, 0);
DRIVER_MODULE(miibus, sge, miibus_driver, 0, 0);
/*
* Register space access macros.
*/
#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val)
#define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val)
#define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val)
#define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg)
#define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg)
#define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg)
/* Define to show Tx/Rx error status. */
#undef SGE_SHOW_ERRORS
#define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
static void
sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *p;
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
p = arg;
*p = segs->ds_addr;
}
/*
* Read a sequence of words from the EEPROM.
*/
static uint16_t
sge_read_eeprom(struct sge_softc *sc, int offset)
{
uint32_t val;
int i;
KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big"));
CSR_WRITE_4(sc, ROMInterface,
EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT));
DELAY(500);
for (i = 0; i < SGE_TIMEOUT; i++) {
val = CSR_READ_4(sc, ROMInterface);
if ((val & EI_REQ) == 0)
break;
DELAY(100);
}
if (i == SGE_TIMEOUT) {
device_printf(sc->sge_dev,
"EEPROM read timeout : 0x%08x\n", val);
return (0xffff);
}
return ((val & EI_DATA) >> EI_DATA_SHIFT);
}
static int
sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest)
{
uint16_t val;
int i;
val = sge_read_eeprom(sc, EEPROMSignature);
if (val == 0xffff || val == 0) {
device_printf(sc->sge_dev,
"invalid EEPROM signature : 0x%04x\n", val);
return (EINVAL);
}
for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2);
dest[i + 0] = (uint8_t)val;
dest[i + 1] = (uint8_t)(val >> 8);
}
if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0)
sc->sge_flags |= SGE_FLAG_RGMII;
return (0);
}
/*
* For SiS96x, APC CMOS RAM is used to store ethernet address.
* APC CMOS RAM is accessed through ISA bridge.
*/
static int
sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest)
{
#if defined(__amd64__) || defined(__i386__)
devclass_t pci;
device_t bus, dev = NULL;
device_t *kids;
struct apc_tbl {
uint16_t vid;
uint16_t did;
} *tp, apc_tbls[] = {
{ SIS_VENDORID, 0x0965 },
{ SIS_VENDORID, 0x0966 },
{ SIS_VENDORID, 0x0968 }
};
uint8_t reg;
int busnum, i, j, numkids;
pci = devclass_find("pci");
for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
bus = devclass_get_device(pci, busnum);
if (!bus)
continue;
if (device_get_children(bus, &kids, &numkids) != 0)
continue;
for (i = 0; i < numkids; i++) {
dev = kids[i];
if (pci_get_class(dev) == PCIC_BRIDGE &&
pci_get_subclass(dev) == PCIS_BRIDGE_ISA) {
tp = apc_tbls;
for (j = 0; j < nitems(apc_tbls); j++) {
if (pci_get_vendor(dev) == tp->vid &&
pci_get_device(dev) == tp->did) {
free(kids, M_TEMP);
goto apc_found;
}
tp++;
}
}
}
free(kids, M_TEMP);
}
device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n");
return (EINVAL);
apc_found:
/* Enable port 0x78 and 0x79 to access APC registers. */
reg = pci_read_config(dev, 0x48, 1);
pci_write_config(dev, 0x48, reg & ~0x02, 1);
DELAY(50);
pci_read_config(dev, 0x48, 1);
/* Read stored ethernet address. */
for (i = 0; i < ETHER_ADDR_LEN; i++) {
outb(0x78, 0x09 + i);
dest[i] = inb(0x79);
}
outb(0x78, 0x12);
if ((inb(0x79) & 0x80) != 0)
sc->sge_flags |= SGE_FLAG_RGMII;
/* Restore access to APC registers. */
pci_write_config(dev, 0x48, reg, 1);
return (0);
#else
return (EINVAL);
#endif
}
static int
sge_miibus_readreg(device_t dev, int phy, int reg)
{
struct sge_softc *sc;
uint32_t val;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) |
(reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ);
DELAY(10);
for (i = 0; i < SGE_TIMEOUT; i++) {
val = CSR_READ_4(sc, GMIIControl);
if ((val & GMI_REQ) == 0)
break;
DELAY(10);
}
if (i == SGE_TIMEOUT) {
device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg);
return (0);
}
return ((val & GMI_DATA) >> GMI_DATA_SHIFT);
}
static int
sge_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct sge_softc *sc;
uint32_t val;
int i;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) |
(reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) |
GMI_OP_WR | GMI_REQ);
DELAY(10);
for (i = 0; i < SGE_TIMEOUT; i++) {
val = CSR_READ_4(sc, GMIIControl);
if ((val & GMI_REQ) == 0)
break;
DELAY(10);
}
if (i == SGE_TIMEOUT)
device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg);
return (0);
}
static void
sge_miibus_statchg(device_t dev)
{
struct sge_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t ctl, speed;
sc = device_get_softc(dev);
mii = device_get_softc(sc->sge_miibus);
ifp = sc->sge_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
speed = 0;
sc->sge_flags &= ~SGE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
sc->sge_flags |= SGE_FLAG_LINK;
speed = SC_SPEED_10;
break;
case IFM_100_TX:
sc->sge_flags |= SGE_FLAG_LINK;
speed = SC_SPEED_100;
break;
case IFM_1000_T:
if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) {
sc->sge_flags |= SGE_FLAG_LINK;
speed = SC_SPEED_1000;
}
break;
default:
break;
}
}
if ((sc->sge_flags & SGE_FLAG_LINK) == 0)
return;
/* Reprogram MAC to resolved speed/duplex/flow-control parameters. */
ctl = CSR_READ_4(sc, StationControl);
ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK);
if (speed == SC_SPEED_1000) {
ctl |= 0x07000000;
sc->sge_flags |= SGE_FLAG_SPEED_1000;
} else {
ctl |= 0x04000000;
sc->sge_flags &= ~SGE_FLAG_SPEED_1000;
}
#ifdef notyet
if ((sc->sge_flags & SGE_FLAG_GMII) != 0)
ctl |= 0x03000000;
#endif
ctl |= speed;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
ctl |= SC_FDX;
sc->sge_flags |= SGE_FLAG_FDX;
} else
sc->sge_flags &= ~SGE_FLAG_FDX;
CSR_WRITE_4(sc, StationControl, ctl);
if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) {
CSR_WRITE_4(sc, RGMIIDelay, 0x0441);
CSR_WRITE_4(sc, RGMIIDelay, 0x0440);
}
}
static u_int
sge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int count)
{
uint32_t crc, *hashes = arg;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
return (1);
}
static void
sge_rxfilter(struct sge_softc *sc)
{
if_t ifp;
uint32_t hashes[2];
uint16_t rxfilt;
SGE_LOCK_ASSERT(sc);
ifp = sc->sge_ifp;
rxfilt = CSR_READ_2(sc, RxMacControl);
rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast);
rxfilt |= AcceptMyPhys;
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxfilt |= AcceptBroadcast;
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxfilt |= AcceptAllPhys;
rxfilt |= AcceptMulticast;
hashes[0] = 0xFFFFFFFF;
hashes[1] = 0xFFFFFFFF;
} else {
rxfilt |= AcceptMulticast;
hashes[0] = hashes[1] = 0;
/* Now program new ones. */
if_foreach_llmaddr(ifp, sge_hash_maddr, hashes);
}
CSR_WRITE_2(sc, RxMacControl, rxfilt);
CSR_WRITE_4(sc, RxHashTable, hashes[0]);
CSR_WRITE_4(sc, RxHashTable2, hashes[1]);
}
static void
sge_setvlan(struct sge_softc *sc)
{
if_t ifp;
uint16_t rxfilt;
SGE_LOCK_ASSERT(sc);
ifp = sc->sge_ifp;
if ((if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
return;
rxfilt = CSR_READ_2(sc, RxMacControl);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
rxfilt |= RXMAC_STRIP_VLAN;
else
rxfilt &= ~RXMAC_STRIP_VLAN;
CSR_WRITE_2(sc, RxMacControl, rxfilt);
}
static void
sge_reset(struct sge_softc *sc)
{
CSR_WRITE_4(sc, IntrMask, 0);
CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
/* Soft reset. */
CSR_WRITE_4(sc, IntrControl, 0x8000);
CSR_READ_4(sc, IntrControl);
DELAY(100);
CSR_WRITE_4(sc, IntrControl, 0);
/* Stop MAC. */
CSR_WRITE_4(sc, TX_CTL, 0x1a00);
CSR_WRITE_4(sc, RX_CTL, 0x1a00);
CSR_WRITE_4(sc, IntrMask, 0);
CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
CSR_WRITE_4(sc, GMIIControl, 0);
}
/*
* Probe for an SiS chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
sge_probe(device_t dev)
{
struct sge_type *t;
t = sge_devs;
while (t->sge_name != NULL) {
if ((pci_get_vendor(dev) == t->sge_vid) &&
(pci_get_device(dev) == t->sge_did)) {
device_set_desc(dev, t->sge_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
sge_attach(device_t dev)
{
struct sge_softc *sc;
if_t ifp;
uint8_t eaddr[ETHER_ADDR_LEN];
int error = 0, rid;
sc = device_get_softc(dev);
sc->sge_dev = dev;
mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
/* Allocate resources. */
sc->sge_res_id = PCIR_BAR(0);
sc->sge_res_type = SYS_RES_MEMORY;
sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type,
&sc->sge_res_id, RF_ACTIVE);
if (sc->sge_res == NULL) {
device_printf(dev, "couldn't allocate resource\n");
error = ENXIO;
goto fail;
}
rid = 0;
sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->sge_irq == NULL) {
device_printf(dev, "couldn't allocate IRQ resources\n");
error = ENXIO;
goto fail;
}
sc->sge_rev = pci_get_revid(dev);
if (pci_get_device(dev) == SIS_DEVICEID_190)
sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190;
/* Reset the adapter. */
sge_reset(sc);
/* Get MAC address from the EEPROM. */
if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0)
sge_get_mac_addr_apc(sc, eaddr);
else
sge_get_mac_addr_eeprom(sc, eaddr);
if ((error = sge_dma_alloc(sc)) != 0)
goto fail;
ifp = sc->sge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, sge_ioctl);
if_setstartfn(ifp, sge_start);
if_setinitfn(ifp, sge_init);
if_setsendqlen(ifp, SGE_TX_RING_CNT - 1);
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4);
if_sethwassist(ifp, SGE_CSUM_FEATURES | CSUM_TSO);
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Do MII setup.
*/
error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd,
sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/* VLAN setup. */
if_setcapabilities(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM |
IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, sge_intr, sc, &sc->sge_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
sge_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
sge_detach(device_t dev)
{
struct sge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->sge_ifp;
/* These should only be active if attach succeeded. */
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
SGE_LOCK(sc);
sge_stop(sc);
SGE_UNLOCK(sc);
callout_drain(&sc->sge_stat_ch);
}
if (sc->sge_miibus)
device_delete_child(dev, sc->sge_miibus);
bus_generic_detach(dev);
if (sc->sge_intrhand)
bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand);
if (sc->sge_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq);
if (sc->sge_res)
bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id,
sc->sge_res);
if (ifp)
if_free(ifp);
sge_dma_free(sc);
mtx_destroy(&sc->sge_mtx);
return (0);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
sge_shutdown(device_t dev)
{
struct sge_softc *sc;
sc = device_get_softc(dev);
SGE_LOCK(sc);
sge_stop(sc);
SGE_UNLOCK(sc);
return (0);
}
static int
sge_suspend(device_t dev)
{
struct sge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
SGE_LOCK(sc);
ifp = sc->sge_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
sge_stop(sc);
SGE_UNLOCK(sc);
return (0);
}
static int
sge_resume(device_t dev)
{
struct sge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
SGE_LOCK(sc);
ifp = sc->sge_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0)
sge_init_locked(sc);
SGE_UNLOCK(sc);
return (0);
}
static int
sge_dma_alloc(struct sge_softc *sc)
{
struct sge_chain_data *cd;
struct sge_list_data *ld;
struct sge_rxdesc *rxd;
struct sge_txdesc *txd;
int error, i;
cd = &sc->sge_cdata;
ld = &sc->sge_ldata;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev),
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&cd->sge_tag);
if (error != 0) {
device_printf(sc->sge_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* RX descriptor ring */
error = bus_dma_tag_create(cd->sge_tag,
SGE_DESC_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
SGE_RX_RING_SZ, 1, /* maxsize,nsegments */
SGE_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&cd->sge_rx_tag);
if (error != 0) {
device_printf(sc->sge_dev,
"could not create Rx ring DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load DMA map for RX ring. */
error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&cd->sge_rx_dmamap);
if (error != 0) {
device_printf(sc->sge_dev,
"could not allocate DMA'able memory for Rx ring.\n");
goto fail;
}
error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap,
ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr,
&ld->sge_rx_paddr, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sge_dev,
"could not load DMA'able memory for Rx ring.\n");
}
/* TX descriptor ring */
error = bus_dma_tag_create(cd->sge_tag,
SGE_DESC_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
SGE_TX_RING_SZ, 1, /* maxsize,nsegments */
SGE_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&cd->sge_tx_tag);
if (error != 0) {
device_printf(sc->sge_dev,
"could not create Rx ring DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load DMA map for TX ring. */
error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&cd->sge_tx_dmamap);
if (error != 0) {
device_printf(sc->sge_dev,
"could not allocate DMA'able memory for Tx ring.\n");
goto fail;
}
error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap,
ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr,
&ld->sge_tx_paddr, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sge_dev,
"could not load DMA'able memory for Rx ring.\n");
goto fail;
}
/* Create DMA tag for Tx buffers. */
error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS,
SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag);
if (error != 0) {
device_printf(sc->sge_dev,
"could not create Tx mbuf DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx buffers. */
error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag);
if (error != 0) {
device_printf(sc->sge_dev,
"could not create Rx mbuf DMA tag.\n");
goto fail;
}
/* Create DMA maps for Tx buffers. */
for (i = 0; i < SGE_TX_RING_CNT; i++) {
txd = &cd->sge_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
txd->tx_ndesc = 0;
error = bus_dmamap_create(cd->sge_txmbuf_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->sge_dev,
"could not create Tx DMA map.\n");
goto fail;
}
}
/* Create spare DMA map for Rx buffer. */
error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map);
if (error != 0) {
device_printf(sc->sge_dev,
"could not create spare Rx DMA map.\n");
goto fail;
}
/* Create DMA maps for Rx buffers. */
for (i = 0; i < SGE_RX_RING_CNT; i++) {
rxd = &cd->sge_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0,
&rxd->rx_dmamap);
if (error) {
device_printf(sc->sge_dev,
"could not create Rx DMA map.\n");
goto fail;
}
}
fail:
return (error);
}
static void
sge_dma_free(struct sge_softc *sc)
{
struct sge_chain_data *cd;
struct sge_list_data *ld;
struct sge_rxdesc *rxd;
struct sge_txdesc *txd;
int i;
cd = &sc->sge_cdata;
ld = &sc->sge_ldata;
/* Rx ring. */
if (cd->sge_rx_tag != NULL) {
if (ld->sge_rx_paddr != 0)
bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap);
if (ld->sge_rx_ring != NULL)
bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring,
cd->sge_rx_dmamap);
ld->sge_rx_ring = NULL;
ld->sge_rx_paddr = 0;
bus_dma_tag_destroy(cd->sge_rx_tag);
cd->sge_rx_tag = NULL;
}
/* Tx ring. */
if (cd->sge_tx_tag != NULL) {
if (ld->sge_tx_paddr != 0)
bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap);
if (ld->sge_tx_ring != NULL)
bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring,
cd->sge_tx_dmamap);
ld->sge_tx_ring = NULL;
ld->sge_tx_paddr = 0;
bus_dma_tag_destroy(cd->sge_tx_tag);
cd->sge_tx_tag = NULL;
}
/* Rx buffers. */
if (cd->sge_rxmbuf_tag != NULL) {
for (i = 0; i < SGE_RX_RING_CNT; i++) {
rxd = &cd->sge_rxdesc[i];
if (rxd->rx_dmamap != NULL) {
bus_dmamap_destroy(cd->sge_rxmbuf_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (cd->sge_rx_spare_map != NULL) {
bus_dmamap_destroy(cd->sge_rxmbuf_tag,
cd->sge_rx_spare_map);
cd->sge_rx_spare_map = NULL;
}
bus_dma_tag_destroy(cd->sge_rxmbuf_tag);
cd->sge_rxmbuf_tag = NULL;
}
/* Tx buffers. */
if (cd->sge_txmbuf_tag != NULL) {
for (i = 0; i < SGE_TX_RING_CNT; i++) {
txd = &cd->sge_txdesc[i];
if (txd->tx_dmamap != NULL) {
bus_dmamap_destroy(cd->sge_txmbuf_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(cd->sge_txmbuf_tag);
cd->sge_txmbuf_tag = NULL;
}
if (cd->sge_tag != NULL)
bus_dma_tag_destroy(cd->sge_tag);
cd->sge_tag = NULL;
}
/*
* Initialize the TX descriptors.
*/
static int
sge_list_tx_init(struct sge_softc *sc)
{
struct sge_list_data *ld;
struct sge_chain_data *cd;
SGE_LOCK_ASSERT(sc);
ld = &sc->sge_ldata;
cd = &sc->sge_cdata;
bzero(ld->sge_tx_ring, SGE_TX_RING_SZ);
ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END);
bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
cd->sge_tx_prod = 0;
cd->sge_tx_cons = 0;
cd->sge_tx_cnt = 0;
return (0);
}
static int
sge_list_tx_free(struct sge_softc *sc)
{
struct sge_chain_data *cd;
struct sge_txdesc *txd;
int i;
SGE_LOCK_ASSERT(sc);
cd = &sc->sge_cdata;
for (i = 0; i < SGE_TX_RING_CNT; i++) {
txd = &cd->sge_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
txd->tx_ndesc = 0;
}
}
return (0);
}
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that
* we arrange the descriptors in a closed ring, so that the last descriptor
* has RING_END flag set.
*/
static int
sge_list_rx_init(struct sge_softc *sc)
{
struct sge_chain_data *cd;
int i;
SGE_LOCK_ASSERT(sc);
cd = &sc->sge_cdata;
cd->sge_rx_cons = 0;
bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ);
for (i = 0; i < SGE_RX_RING_CNT; i++) {
if (sge_newbuf(sc, i) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static int
sge_list_rx_free(struct sge_softc *sc)
{
struct sge_chain_data *cd;
struct sge_rxdesc *rxd;
int i;
SGE_LOCK_ASSERT(sc);
cd = &sc->sge_cdata;
for (i = 0; i < SGE_RX_RING_CNT; i++) {
rxd = &cd->sge_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(cd->sge_rxmbuf_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
return (0);
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
*/
static int
sge_newbuf(struct sge_softc *sc, int prod)
{
struct mbuf *m;
struct sge_desc *desc;
struct sge_chain_data *cd;
struct sge_rxdesc *rxd;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int error, nsegs;
SGE_LOCK_ASSERT(sc);
cd = &sc->sge_cdata;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, SGE_RX_BUF_ALIGN);
error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag,
cd->sge_rx_spare_map, m, segs, &nsegs, 0);
if (error != 0) {
m_freem(m);
return (error);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &cd->sge_rxdesc[prod];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = cd->sge_rx_spare_map;
cd->sge_rx_spare_map = map;
bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
desc = &sc->sge_ldata.sge_rx_ring[prod];
desc->sge_sts_size = 0;
desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr));
desc->sge_flags = htole32(segs[0].ds_len);
if (prod == SGE_RX_RING_CNT - 1)
desc->sge_flags |= htole32(RING_END);
desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR);
return (0);
}
static __inline void
sge_discard_rxbuf(struct sge_softc *sc, int index)
{
struct sge_desc *desc;
desc = &sc->sge_ldata.sge_rx_ring[index];
desc->sge_sts_size = 0;
desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN);
if (index == SGE_RX_RING_CNT - 1)
desc->sge_flags |= htole32(RING_END);
desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static void
sge_rxeof(struct sge_softc *sc)
{
if_t ifp;
struct mbuf *m;
struct sge_chain_data *cd;
struct sge_desc *cur_rx;
uint32_t rxinfo, rxstat;
int cons, prog;
SGE_LOCK_ASSERT(sc);
ifp = sc->sge_ifp;
cd = &sc->sge_cdata;
bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cons = cd->sge_rx_cons;
for (prog = 0; prog < SGE_RX_RING_CNT; prog++,
SGE_INC(cons, SGE_RX_RING_CNT)) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
cur_rx = &sc->sge_ldata.sge_rx_ring[cons];
rxinfo = le32toh(cur_rx->sge_cmdsts);
if ((rxinfo & RDC_OWN) != 0)
break;
rxstat = le32toh(cur_rx->sge_sts_size);
if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 ||
SGE_RX_NSEGS(rxstat) != 1) {
/* XXX We don't support multi-segment frames yet. */
#ifdef SGE_SHOW_ERRORS
device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat,
RX_ERR_BITS);
#endif
sge_discard_rxbuf(sc, cons);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
m = cd->sge_rxdesc[cons].rx_m;
if (sge_newbuf(sc, cons) != 0) {
sge_discard_rxbuf(sc, cons);
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
continue;
}
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
if ((rxinfo & RDC_IP_CSUM) != 0 &&
(rxinfo & RDC_IP_CSUM_OK) != 0)
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED | CSUM_IP_VALID;
if (((rxinfo & RDC_TCP_CSUM) != 0 &&
(rxinfo & RDC_TCP_CSUM_OK) != 0) ||
((rxinfo & RDC_UDP_CSUM) != 0 &&
(rxinfo & RDC_UDP_CSUM_OK) != 0)) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
/* Check for VLAN tagged frame. */
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
(rxstat & RDS_VLAN) != 0) {
m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK;
m->m_flags |= M_VLANTAG;
}
/*
* Account for 10bytes auto padding which is used
* to align IP header on 32bit boundary. Also note,
* CRC bytes is automatically removed by the
* hardware.
*/
m->m_data += SGE_RX_PAD_BYTES;
m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) -
SGE_RX_PAD_BYTES;
m->m_pkthdr.rcvif = ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
SGE_UNLOCK(sc);
if_input(ifp, m);
SGE_LOCK(sc);
}
if (prog > 0) {
bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
cd->sge_rx_cons = cons;
}
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
sge_txeof(struct sge_softc *sc)
{
if_t ifp;
struct sge_list_data *ld;
struct sge_chain_data *cd;
struct sge_txdesc *txd;
uint32_t txstat;
int cons, nsegs, prod;
SGE_LOCK_ASSERT(sc);
ifp = sc->sge_ifp;
ld = &sc->sge_ldata;
cd = &sc->sge_cdata;
if (cd->sge_tx_cnt == 0)
return;
bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cons = cd->sge_tx_cons;
prod = cd->sge_tx_prod;
for (; cons != prod;) {
txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts);
if ((txstat & TDC_OWN) != 0)
break;
/*
* Only the first descriptor of multi-descriptor transmission
* is updated by controller. Driver should skip entire
* chained buffers for the transmitted frame. In other words
* TDC_OWN bit is valid only at the first descriptor of a
* multi-descriptor transmission.
*/
if (SGE_TX_ERROR(txstat) != 0) {
#ifdef SGE_SHOW_ERRORS
device_printf(sc->sge_dev, "Tx error : 0x%b\n",
txstat, TX_ERR_BITS);
#endif
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} else {
#ifdef notyet
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1);
#endif
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
txd = &cd->sge_txdesc[cons];
for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
ld->sge_tx_ring[cons].sge_cmdsts = 0;
SGE_INC(cons, SGE_TX_RING_CNT);
}
/* Reclaim transmitted mbuf. */
KASSERT(txd->tx_m != NULL,
("%s: freeing NULL mbuf\n", __func__));
bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
cd->sge_tx_cnt -= txd->tx_ndesc;
KASSERT(cd->sge_tx_cnt >= 0,
("%s: Active Tx desc counter was garbled\n", __func__));
txd->tx_ndesc = 0;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
cd->sge_tx_cons = cons;
if (cd->sge_tx_cnt == 0)
sc->sge_timer = 0;
}
static void
sge_tick(void *arg)
{
struct sge_softc *sc;
struct mii_data *mii;
if_t ifp;
sc = arg;
SGE_LOCK_ASSERT(sc);
ifp = sc->sge_ifp;
mii = device_get_softc(sc->sge_miibus);
mii_tick(mii);
if ((sc->sge_flags & SGE_FLAG_LINK) == 0) {
sge_miibus_statchg(sc->sge_dev);
if ((sc->sge_flags & SGE_FLAG_LINK) != 0 &&
!if_sendq_empty(ifp))
sge_start_locked(ifp);
}
/*
* Reclaim transmitted frames here as we do not request
* Tx completion interrupt for every queued frames to
* reduce excessive interrupts.
*/
sge_txeof(sc);
sge_watchdog(sc);
callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc);
}
static void
sge_intr(void *arg)
{
struct sge_softc *sc;
if_t ifp;
uint32_t status;
sc = arg;
SGE_LOCK(sc);
ifp = sc->sge_ifp;
status = CSR_READ_4(sc, IntrStatus);
if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) {
/* Not ours. */
SGE_UNLOCK(sc);
return;
}
/* Acknowledge interrupts. */
CSR_WRITE_4(sc, IntrStatus, status);
/* Disable further interrupts. */
CSR_WRITE_4(sc, IntrMask, 0);
/*
* It seems the controller supports some kind of interrupt
* moderation mechanism but we still don't know how to
* enable that. To reduce number of generated interrupts
* under load we check pending interrupts in a loop. This
* will increase number of register access and is not correct
* way to handle interrupt moderation but there seems to be
* no other way at this time.
*/
for (;;) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) {
sge_rxeof(sc);
/* Wakeup Rx MAC. */
if ((status & INTR_RX_IDLE) != 0)
CSR_WRITE_4(sc, RX_CTL,
0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
}
if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0)
sge_txeof(sc);
status = CSR_READ_4(sc, IntrStatus);
if ((status & SGE_INTRS) == 0)
break;
/* Acknowledge interrupts. */
CSR_WRITE_4(sc, IntrStatus, status);
}
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
/* Re-enable interrupts */
CSR_WRITE_4(sc, IntrMask, SGE_INTRS);
if (!if_sendq_empty(ifp))
sge_start_locked(ifp);
}
SGE_UNLOCK(sc);
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
sge_encap(struct sge_softc *sc, struct mbuf **m_head)
{
struct mbuf *m;
struct sge_desc *desc;
struct sge_txdesc *txd;
bus_dma_segment_t txsegs[SGE_MAXTXSEGS];
uint32_t cflags, mss;
int error, i, nsegs, prod, si;
SGE_LOCK_ASSERT(sc);
si = prod = sc->sge_cdata.sge_tx_prod;
txd = &sc->sge_cdata.sge_txdesc[prod];
if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
struct ether_header *eh;
struct ip *ip;
struct tcphdr *tcp;
uint32_t ip_off, poff;
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
ip_off = sizeof(struct ether_header);
m = m_pullup(*m_head, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
/* Check the existence of VLAN tag. */
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ip_off = sizeof(struct ether_vlan_header);
m = m_pullup(m, ip_off);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
}
m = m_pullup(m, ip_off + sizeof(struct ip));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
ip = (struct ip *)(mtod(m, char *) + ip_off);
poff = ip_off + (ip->ip_hl << 2);
m = m_pullup(m, poff + sizeof(struct tcphdr));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
m = m_pullup(m, poff + (tcp->th_off << 2));
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
/*
* Reset IP checksum and recompute TCP pseudo
* checksum that NDIS specification requires.
*/
ip = (struct ip *)(mtod(m, char *) + ip_off);
ip->ip_sum = 0;
tcp = (struct tcphdr *)(mtod(m, char *) + poff);
tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
htons(IPPROTO_TCP));
*m_head = m;
}
error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
KASSERT(nsegs != 0, ("zero segment returned"));
/* Check descriptor overrun. */
if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) {
bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap);
return (ENOBUFS);
}
bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
m = *m_head;
cflags = 0;
mss = 0;
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
cflags |= TDC_LS;
mss = (uint32_t)m->m_pkthdr.tso_segsz;
mss <<= 16;
} else {
if (m->m_pkthdr.csum_flags & CSUM_IP)
cflags |= TDC_IP_CSUM;
if (m->m_pkthdr.csum_flags & CSUM_TCP)
cflags |= TDC_TCP_CSUM;
if (m->m_pkthdr.csum_flags & CSUM_UDP)
cflags |= TDC_UDP_CSUM;
}
for (i = 0; i < nsegs; i++) {
desc = &sc->sge_ldata.sge_tx_ring[prod];
if (i == 0) {
desc->sge_sts_size = htole32(m->m_pkthdr.len | mss);
desc->sge_cmdsts = 0;
} else {
desc->sge_sts_size = 0;
desc->sge_cmdsts = htole32(TDC_OWN);
}
desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr));
desc->sge_flags = htole32(txsegs[i].ds_len);
if (prod == SGE_TX_RING_CNT - 1)
desc->sge_flags |= htole32(RING_END);
sc->sge_cdata.sge_tx_cnt++;
SGE_INC(prod, SGE_TX_RING_CNT);
}
/* Update producer index. */
sc->sge_cdata.sge_tx_prod = prod;
desc = &sc->sge_ldata.sge_tx_ring[si];
/* Configure VLAN. */
if((m->m_flags & M_VLANTAG) != 0) {
cflags |= m->m_pkthdr.ether_vtag;
desc->sge_sts_size |= htole32(TDS_INS_VLAN);
}
desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags);
#if 1
if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0)
desc->sge_cmdsts |= htole32(TDC_BST);
#else
if ((sc->sge_flags & SGE_FLAG_FDX) == 0) {
desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF);
if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0)
desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST);
}
#endif
/* Request interrupt and give ownership to controller. */
desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR);
txd->tx_m = m;
txd->tx_ndesc = nsegs;
return (0);
}
static void
sge_start(if_t ifp)
{
struct sge_softc *sc;
sc = if_getsoftc(ifp);
SGE_LOCK(sc);
sge_start_locked(ifp);
SGE_UNLOCK(sc);
}
static void
sge_start_locked(if_t ifp)
{
struct sge_softc *sc;
struct mbuf *m_head;
int queued = 0;
sc = if_getsoftc(ifp);
SGE_LOCK_ASSERT(sc);
if ((sc->sge_flags & SGE_FLAG_LINK) == 0 ||
(if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
for (queued = 0; !if_sendq_empty(ifp); ) {
if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT -
SGE_MAXTXSEGS)) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
if (sge_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
queued++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, m_head);
}
if (queued > 0) {
bus_dmamap_sync(sc->sge_cdata.sge_tx_tag,
sc->sge_cdata.sge_tx_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL);
sc->sge_timer = 5;
}
}
static void
sge_init(void *arg)
{
struct sge_softc *sc;
sc = arg;
SGE_LOCK(sc);
sge_init_locked(sc);
SGE_UNLOCK(sc);
}
static void
sge_init_locked(struct sge_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint16_t rxfilt;
int i;
SGE_LOCK_ASSERT(sc);
ifp = sc->sge_ifp;
mii = device_get_softc(sc->sge_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
sge_stop(sc);
sge_reset(sc);
/* Init circular RX list. */
if (sge_list_rx_init(sc) == ENOBUFS) {
device_printf(sc->sge_dev, "no memory for Rx buffers\n");
sge_stop(sc);
return;
}
/* Init TX descriptors. */
sge_list_tx_init(sc);
/*
* Load the address of the RX and TX lists.
*/
CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr));
CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr));
CSR_WRITE_4(sc, TxMacControl, 0x60);
CSR_WRITE_4(sc, RxWakeOnLan, 0);
CSR_WRITE_4(sc, RxWakeOnLanData, 0);
/* Allow receiving VLAN frames. */
CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN +
SGE_RX_PAD_BYTES);
for (i = 0; i < ETHER_ADDR_LEN; i++)
CSR_WRITE_1(sc, RxMacAddr + i, if_getlladdr(ifp)[i]);
/* Configure RX MAC. */
rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB;
CSR_WRITE_2(sc, RxMacControl, rxfilt);
sge_rxfilter(sc);
sge_setvlan(sc);
/* Initialize default speed/duplex information. */
if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0)
sc->sge_flags |= SGE_FLAG_SPEED_1000;
sc->sge_flags |= SGE_FLAG_FDX;
if ((sc->sge_flags & SGE_FLAG_RGMII) != 0)
CSR_WRITE_4(sc, StationControl, 0x04008001);
else
CSR_WRITE_4(sc, StationControl, 0x04000001);
/*
* XXX Try to mitigate interrupts.
*/
CSR_WRITE_4(sc, IntrControl, 0x08880000);
#ifdef notyet
if (sc->sge_intrcontrol != 0)
CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol);
if (sc->sge_intrtimer != 0)
CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer);
#endif
/*
* Clear and enable interrupts.
*/
CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF);
CSR_WRITE_4(sc, IntrMask, SGE_INTRS);
/* Enable receiver and transmitter. */
CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB);
CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->sge_flags &= ~SGE_FLAG_LINK;
mii_mediachg(mii);
callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc);
}
/*
* Set media options.
*/
static int
sge_ifmedia_upd(if_t ifp)
{
struct sge_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
SGE_LOCK(sc);
mii = device_get_softc(sc->sge_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
SGE_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
sge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct sge_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
SGE_LOCK(sc);
mii = device_get_softc(sc->sge_miibus);
if ((if_getflags(ifp) & IFF_UP) == 0) {
SGE_UNLOCK(sc);
return;
}
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
SGE_UNLOCK(sc);
}
static int
sge_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct sge_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error = 0, mask, reinit;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
switch(command) {
case SIOCSIFFLAGS:
SGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->sge_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
sge_rxfilter(sc);
else
sge_init_locked(sc);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
sge_stop(sc);
sc->sge_if_flags = if_getflags(ifp);
SGE_UNLOCK(sc);
break;
case SIOCSIFCAP:
SGE_LOCK(sc);
reinit = 0;
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, SGE_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, SGE_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_TSO4) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
/*
* Due to unknown reason, toggling VLAN hardware
* tagging require interface reinitialization.
*/
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0,
IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
reinit = 1;
}
if (reinit > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sge_init_locked(sc);
}
SGE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
SGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
sge_rxfilter(sc);
SGE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->sge_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
sge_watchdog(struct sge_softc *sc)
{
if_t ifp;
SGE_LOCK_ASSERT(sc);
if (sc->sge_timer == 0 || --sc->sge_timer > 0)
return;
ifp = sc->sge_ifp;
if ((sc->sge_flags & SGE_FLAG_LINK) == 0) {
if (1 || bootverbose)
device_printf(sc->sge_dev,
"watchdog timeout (lost link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sge_init_locked(sc);
return;
}
device_printf(sc->sge_dev, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sge_init_locked(sc);
if (!if_sendq_empty(sc->sge_ifp))
sge_start_locked(ifp);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
sge_stop(struct sge_softc *sc)
{
if_t ifp;
ifp = sc->sge_ifp;
SGE_LOCK_ASSERT(sc);
sc->sge_timer = 0;
callout_stop(&sc->sge_stat_ch);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
CSR_WRITE_4(sc, IntrMask, 0);
CSR_READ_4(sc, IntrMask);
CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
/* Stop TX/RX MAC. */
CSR_WRITE_4(sc, TX_CTL, 0x1a00);
CSR_WRITE_4(sc, RX_CTL, 0x1a00);
/* XXX Can we assume active DMA cycles gone? */
DELAY(2000);
CSR_WRITE_4(sc, IntrMask, 0);
CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
sc->sge_flags &= ~SGE_FLAG_LINK;
sge_list_rx_free(sc);
sge_list_tx_free(sc);
}
diff --git a/sys/dev/sis/if_sis.c b/sys/dev/sis/if_sis.c
index bf96383e9a4a..64a43b2bf51e 100644
--- a/sys/dev/sis/if_sis.c
+++ b/sys/dev/sis/if_sis.c
@@ -1,2411 +1,2406 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org>
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
* available from http://www.sis.com.tw.
*
* This driver also supports the NatSemi DP83815. Datasheets are
* available from http://www.national.com.
*
* Written by Bill Paul <wpaul@ee.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
/*
* The SiS 900 is a fairly simple chip. It uses bus master DMA with
* simple TX and RX descriptors of 3 longwords in size. The receiver
* has a single perfect filter entry for the station address and a
* 128-bit multicast hash table. The SiS 900 has a built-in MII-based
* transceiver while the 7016 requires an external transceiver chip.
* Both chips offer the standard bit-bang MII interface as well as
* an enchanced PHY interface which simplifies accessing MII registers.
*
* The only downside to this chipset is that RX descriptors must be
* longword aligned.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#define SIS_USEIOSPACE
#include <dev/sis/if_sisreg.h>
MODULE_DEPEND(sis, pci, 1, 1, 1);
MODULE_DEPEND(sis, ether, 1, 1, 1);
MODULE_DEPEND(sis, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx)
#define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx)
#define SIS_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sis_mtx, MA_OWNED)
/*
* register space access macros
*/
#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sis_res[0], reg, val)
#define CSR_READ_4(sc, reg) bus_read_4(sc->sis_res[0], reg)
#define CSR_READ_2(sc, reg) bus_read_2(sc->sis_res[0], reg)
#define CSR_BARRIER(sc, reg, length, flags) \
bus_barrier(sc->sis_res[0], reg, length, flags)
/*
* Various supported device vendors/types and their names.
*/
static const struct sis_type sis_devs[] = {
{ SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" },
{ SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" },
{ NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
{ 0, 0, NULL }
};
static int sis_detach(device_t);
static __inline void sis_discard_rxbuf(struct sis_rxdesc *);
static int sis_dma_alloc(struct sis_softc *);
static void sis_dma_free(struct sis_softc *);
static int sis_dma_ring_alloc(struct sis_softc *, bus_size_t, bus_size_t,
bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
static void sis_dmamap_cb(void *, bus_dma_segment_t *, int, int);
#ifndef __NO_STRICT_ALIGNMENT
static __inline void sis_fixup_rx(struct mbuf *);
#endif
static void sis_ifmedia_sts(if_t, struct ifmediareq *);
static int sis_ifmedia_upd(if_t);
static void sis_init(void *);
static void sis_initl(struct sis_softc *);
static void sis_intr(void *);
static int sis_ioctl(if_t, u_long, caddr_t);
static uint32_t sis_mii_bitbang_read(device_t);
static void sis_mii_bitbang_write(device_t, uint32_t);
static int sis_newbuf(struct sis_softc *, struct sis_rxdesc *);
static int sis_resume(device_t);
static int sis_rxeof(struct sis_softc *);
static void sis_rxfilter(struct sis_softc *);
static void sis_rxfilter_ns(struct sis_softc *);
static void sis_rxfilter_sis(struct sis_softc *);
static void sis_start(if_t);
static void sis_startl(if_t);
static void sis_stop(struct sis_softc *);
static int sis_suspend(device_t);
static void sis_add_sysctls(struct sis_softc *);
static void sis_watchdog(struct sis_softc *);
static void sis_wol(struct sis_softc *);
/*
* MII bit-bang glue
*/
static const struct mii_bitbang_ops sis_mii_bitbang_ops = {
sis_mii_bitbang_read,
sis_mii_bitbang_write,
{
SIS_MII_DATA, /* MII_BIT_MDO */
SIS_MII_DATA, /* MII_BIT_MDI */
SIS_MII_CLK, /* MII_BIT_MDC */
SIS_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
0, /* MII_BIT_DIR_PHY_HOST */
}
};
static struct resource_spec sis_res_spec[] = {
#ifdef SIS_USEIOSPACE
{ SYS_RES_IOPORT, SIS_PCI_LOIO, RF_ACTIVE},
#else
{ SYS_RES_MEMORY, SIS_PCI_LOMEM, RF_ACTIVE},
#endif
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE},
{ -1, 0 }
};
#define SIS_SETBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, \
CSR_READ_4(sc, reg) | (x))
#define SIS_CLRBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, \
CSR_READ_4(sc, reg) & ~(x))
#define SIO_SET(x) \
CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
#define SIO_CLR(x) \
CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
/*
* Routine to reverse the bits in a word. Stolen almost
* verbatim from /usr/games/fortune.
*/
static uint16_t
sis_reverse(uint16_t n)
{
n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa);
n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc);
n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0);
n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00);
return (n);
}
static void
sis_delay(struct sis_softc *sc)
{
int idx;
for (idx = (300 / 33) + 1; idx > 0; idx--)
CSR_READ_4(sc, SIS_CSR);
}
static void
sis_eeprom_idle(struct sis_softc *sc)
{
int i;
SIO_SET(SIS_EECTL_CSEL);
sis_delay(sc);
SIO_SET(SIS_EECTL_CLK);
sis_delay(sc);
for (i = 0; i < 25; i++) {
SIO_CLR(SIS_EECTL_CLK);
sis_delay(sc);
SIO_SET(SIS_EECTL_CLK);
sis_delay(sc);
}
SIO_CLR(SIS_EECTL_CLK);
sis_delay(sc);
SIO_CLR(SIS_EECTL_CSEL);
sis_delay(sc);
CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
}
/*
* Send a read command and address to the EEPROM, check for ACK.
*/
static void
sis_eeprom_putbyte(struct sis_softc *sc, int addr)
{
int d, i;
d = addr | SIS_EECMD_READ;
/*
* Feed in each bit and stobe the clock.
*/
for (i = 0x400; i; i >>= 1) {
if (d & i) {
SIO_SET(SIS_EECTL_DIN);
} else {
SIO_CLR(SIS_EECTL_DIN);
}
sis_delay(sc);
SIO_SET(SIS_EECTL_CLK);
sis_delay(sc);
SIO_CLR(SIS_EECTL_CLK);
sis_delay(sc);
}
}
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest)
{
int i;
uint16_t word = 0;
/* Force EEPROM to idle state. */
sis_eeprom_idle(sc);
/* Enter EEPROM access mode. */
sis_delay(sc);
SIO_CLR(SIS_EECTL_CLK);
sis_delay(sc);
SIO_SET(SIS_EECTL_CSEL);
sis_delay(sc);
/*
* Send address of word we want to read.
*/
sis_eeprom_putbyte(sc, addr);
/*
* Start reading bits from EEPROM.
*/
for (i = 0x8000; i; i >>= 1) {
SIO_SET(SIS_EECTL_CLK);
sis_delay(sc);
if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
word |= i;
sis_delay(sc);
SIO_CLR(SIS_EECTL_CLK);
sis_delay(sc);
}
/* Turn off EEPROM access mode. */
sis_eeprom_idle(sc);
*dest = word;
}
/*
* Read a sequence of words from the EEPROM.
*/
static void
sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap)
{
int i;
uint16_t word = 0, *ptr;
for (i = 0; i < cnt; i++) {
sis_eeprom_getword(sc, off + i, &word);
ptr = (uint16_t *)(dest + (i * 2));
if (swap)
*ptr = ntohs(word);
else
*ptr = word;
}
}
#if defined(__i386__) || defined(__amd64__)
static device_t
sis_find_bridge(device_t dev)
{
devclass_t pci_devclass;
device_t *pci_devices;
int pci_count = 0;
device_t *pci_children;
int pci_childcount = 0;
device_t *busp, *childp;
device_t child = NULL;
int i, j;
if ((pci_devclass = devclass_find("pci")) == NULL)
return (NULL);
devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
if (device_get_children(*busp, &pci_children, &pci_childcount))
continue;
for (j = 0, childp = pci_children;
j < pci_childcount; j++, childp++) {
if (pci_get_vendor(*childp) == SIS_VENDORID &&
pci_get_device(*childp) == 0x0008) {
child = *childp;
free(pci_children, M_TEMP);
goto done;
}
}
free(pci_children, M_TEMP);
}
done:
free(pci_devices, M_TEMP);
return (child);
}
static void
sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt)
{
device_t bridge;
uint8_t reg;
int i;
bus_space_tag_t btag;
bridge = sis_find_bridge(dev);
if (bridge == NULL)
return;
reg = pci_read_config(bridge, 0x48, 1);
pci_write_config(bridge, 0x48, reg|0x40, 1);
/* XXX */
#if defined(__amd64__) || defined(__i386__)
btag = X86_BUS_SPACE_IO;
#endif
for (i = 0; i < cnt; i++) {
bus_space_write_1(btag, 0x0, 0x70, i + off);
*(dest + i) = bus_space_read_1(btag, 0x0, 0x71);
}
pci_write_config(bridge, 0x48, reg & ~0x40, 1);
}
static void
sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
{
uint32_t filtsave, csrsave;
filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
csrsave = CSR_READ_4(sc, SIS_CSR);
CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave);
CSR_WRITE_4(sc, SIS_CSR, 0);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
((uint16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA);
CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1);
((uint16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
((uint16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
CSR_WRITE_4(sc, SIS_CSR, csrsave);
}
#endif
/*
* Read the MII serial port for the MII bit-bang module.
*/
static uint32_t
sis_mii_bitbang_read(device_t dev)
{
struct sis_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
val = CSR_READ_4(sc, SIS_EECTL);
CSR_BARRIER(sc, SIS_EECTL, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
/*
* Write the MII serial port for the MII bit-bang module.
*/
static void
sis_mii_bitbang_write(device_t dev, uint32_t val)
{
struct sis_softc *sc;
sc = device_get_softc(dev);
CSR_WRITE_4(sc, SIS_EECTL, val);
CSR_BARRIER(sc, SIS_EECTL, 4,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
sis_miibus_readreg(device_t dev, int phy, int reg)
{
struct sis_softc *sc;
sc = device_get_softc(dev);
if (sc->sis_type == SIS_TYPE_83815) {
if (phy != 0)
return (0);
/*
* The NatSemi chip can take a while after
* a reset to come ready, during which the BMSR
* returns a value of 0. This is *never* supposed
* to happen: some of the BMSR bits are meant to
* be hardwired in the on position, and this can
* confuse the miibus code a bit during the probe
* and attach phase. So we make an effort to check
* for this condition and wait for it to clear.
*/
if (!CSR_READ_4(sc, NS_BMSR))
DELAY(1000);
return CSR_READ_4(sc, NS_BMCR + (reg * 4));
}
/*
* Chipsets < SIS_635 seem not to be able to read/write
* through mdio. Use the enhanced PHY access register
* again for them.
*/
if (sc->sis_type == SIS_TYPE_900 &&
sc->sis_rev < SIS_REV_635) {
int i, val = 0;
if (phy != 0)
return (0);
CSR_WRITE_4(sc, SIS_PHYCTL,
(phy << 11) | (reg << 6) | SIS_PHYOP_READ);
SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
for (i = 0; i < SIS_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
break;
}
if (i == SIS_TIMEOUT) {
device_printf(sc->sis_dev,
"PHY failed to come ready\n");
return (0);
}
val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
if (val == 0xFFFF)
return (0);
return (val);
} else
return (mii_bitbang_readreg(dev, &sis_mii_bitbang_ops, phy,
reg));
}
static int
sis_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct sis_softc *sc;
sc = device_get_softc(dev);
if (sc->sis_type == SIS_TYPE_83815) {
if (phy != 0)
return (0);
CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
return (0);
}
/*
* Chipsets < SIS_635 seem not to be able to read/write
* through mdio. Use the enhanced PHY access register
* again for them.
*/
if (sc->sis_type == SIS_TYPE_900 &&
sc->sis_rev < SIS_REV_635) {
int i;
if (phy != 0)
return (0);
CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
(reg << 6) | SIS_PHYOP_WRITE);
SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
for (i = 0; i < SIS_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
break;
}
if (i == SIS_TIMEOUT)
device_printf(sc->sis_dev,
"PHY failed to come ready\n");
} else
mii_bitbang_writereg(dev, &sis_mii_bitbang_ops, phy, reg,
data);
return (0);
}
static void
sis_miibus_statchg(device_t dev)
{
struct sis_softc *sc;
struct mii_data *mii;
if_t ifp;
uint32_t reg;
sc = device_get_softc(dev);
SIS_LOCK_ASSERT(sc);
mii = device_get_softc(sc->sis_miibus);
ifp = sc->sis_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->sis_flags &= ~SIS_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
sc->sis_flags |= SIS_FLAG_LINK;
break;
case IFM_100_TX:
CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
sc->sis_flags |= SIS_FLAG_LINK;
break;
default:
break;
}
}
if ((sc->sis_flags & SIS_FLAG_LINK) == 0) {
/*
* Stopping MACs seem to reset SIS_TX_LISTPTR and
* SIS_RX_LISTPTR which in turn requires resetting
* TX/RX buffers. So just don't do anything for
* lost link.
*/
return;
}
/* Set full/half duplex mode. */
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
SIS_SETBIT(sc, SIS_TX_CFG,
(SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
} else {
SIS_CLRBIT(sc, SIS_TX_CFG,
(SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
}
if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
/*
* MPII03.D: Half Duplex Excessive Collisions.
* Also page 49 in 83816 manual
*/
SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
}
if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
/*
* Short Cable Receive Errors (MP21.E)
*/
CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
DELAY(100);
reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
device_printf(sc->sis_dev,
"Applying short cable fix (reg=%x)\n", reg);
CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
}
CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
}
/* Enable TX/RX MACs. */
SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
}
static uint32_t
sis_mchash(struct sis_softc *sc, const uint8_t *addr)
{
uint32_t crc;
/* Compute CRC for the address value. */
crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
/*
* return the filter bit position
*
* The NatSemi chip has a 512-bit filter, which is
* different than the SiS, so we special-case it.
*/
if (sc->sis_type == SIS_TYPE_83815)
return (crc >> 23);
else if (sc->sis_rev >= SIS_REV_635 ||
sc->sis_rev == SIS_REV_900B)
return (crc >> 24);
else
return (crc >> 25);
}
static void
sis_rxfilter(struct sis_softc *sc)
{
SIS_LOCK_ASSERT(sc);
if (sc->sis_type == SIS_TYPE_83815)
sis_rxfilter_ns(sc);
else
sis_rxfilter_sis(sc);
}
static u_int
sis_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct sis_softc *sc = arg;
uint32_t h;
int bit, index;
h = sis_mchash(sc, LLADDR(sdl));
index = h >> 3;
bit = h & 0x1F;
CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
if (bit > 0xF)
bit -= 0x10;
SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
return (1);
}
static void
sis_rxfilter_ns(struct sis_softc *sc)
{
if_t ifp;
uint32_t i, filter;
ifp = sc->sis_ifp;
filter = CSR_READ_4(sc, SIS_RXFILT_CTL);
if (filter & SIS_RXFILTCTL_ENABLE) {
/*
* Filter should be disabled to program other bits.
*/
CSR_WRITE_4(sc, SIS_RXFILT_CTL, filter & ~SIS_RXFILTCTL_ENABLE);
CSR_READ_4(sc, SIS_RXFILT_CTL);
}
filter &= ~(NS_RXFILTCTL_ARP | NS_RXFILTCTL_PERFECT |
NS_RXFILTCTL_MCHASH | SIS_RXFILTCTL_ALLPHYS | SIS_RXFILTCTL_BROAD |
SIS_RXFILTCTL_ALLMULTI);
if (if_getflags(ifp) & IFF_BROADCAST)
filter |= SIS_RXFILTCTL_BROAD;
/*
* For the NatSemi chip, we have to explicitly enable the
* reception of ARP frames, as well as turn on the 'perfect
* match' filter where we store the station address, otherwise
* we won't receive unicasts meant for this host.
*/
filter |= NS_RXFILTCTL_ARP | NS_RXFILTCTL_PERFECT;
if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
filter |= SIS_RXFILTCTL_ALLMULTI;
if (if_getflags(ifp) & IFF_PROMISC)
filter |= SIS_RXFILTCTL_ALLPHYS;
} else {
/*
* We have to explicitly enable the multicast hash table
* on the NatSemi chip if we want to use it, which we do.
*/
filter |= NS_RXFILTCTL_MCHASH;
/* first, zot all the existing hash bits */
for (i = 0; i < 32; i++) {
CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO +
(i * 2));
CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
}
if_foreach_llmaddr(ifp, sis_write_maddr, sc);
}
/* Turn the receive filter on */
CSR_WRITE_4(sc, SIS_RXFILT_CTL, filter | SIS_RXFILTCTL_ENABLE);
CSR_READ_4(sc, SIS_RXFILT_CTL);
}
struct sis_hash_maddr_ctx {
struct sis_softc *sc;
uint16_t hashes[16];
};
static u_int
sis_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct sis_hash_maddr_ctx *ctx = arg;
uint32_t h;
h = sis_mchash(ctx->sc, LLADDR(sdl));
ctx->hashes[h >> 4] |= 1 << (h & 0xf);
return (1);
}
static void
sis_rxfilter_sis(struct sis_softc *sc)
{
if_t ifp;
struct sis_hash_maddr_ctx ctx;
uint32_t filter, i, n;
ifp = sc->sis_ifp;
/* hash table size */
if (sc->sis_rev >= SIS_REV_635 || sc->sis_rev == SIS_REV_900B)
n = 16;
else
n = 8;
filter = CSR_READ_4(sc, SIS_RXFILT_CTL);
if (filter & SIS_RXFILTCTL_ENABLE) {
CSR_WRITE_4(sc, SIS_RXFILT_CTL, filter & ~SIS_RXFILTCTL_ENABLE);
CSR_READ_4(sc, SIS_RXFILT_CTL);
}
filter &= ~(SIS_RXFILTCTL_ALLPHYS | SIS_RXFILTCTL_BROAD |
SIS_RXFILTCTL_ALLMULTI);
if (if_getflags(ifp) & IFF_BROADCAST)
filter |= SIS_RXFILTCTL_BROAD;
if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
filter |= SIS_RXFILTCTL_ALLMULTI;
if (if_getflags(ifp) & IFF_PROMISC)
filter |= SIS_RXFILTCTL_ALLPHYS;
for (i = 0; i < n; i++)
ctx.hashes[i] = ~0;
} else {
for (i = 0; i < n; i++)
ctx.hashes[i] = 0;
ctx.sc = sc;
if (if_foreach_llmaddr(ifp, sis_hash_maddr, &ctx) > n) {
filter |= SIS_RXFILTCTL_ALLMULTI;
for (i = 0; i < n; i++)
ctx.hashes[i] = ~0;
}
}
for (i = 0; i < n; i++) {
CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
CSR_WRITE_4(sc, SIS_RXFILT_DATA, ctx.hashes[i]);
}
/* Turn the receive filter on */
CSR_WRITE_4(sc, SIS_RXFILT_CTL, filter | SIS_RXFILTCTL_ENABLE);
CSR_READ_4(sc, SIS_RXFILT_CTL);
}
static void
sis_reset(struct sis_softc *sc)
{
int i;
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
for (i = 0; i < SIS_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
break;
}
if (i == SIS_TIMEOUT)
device_printf(sc->sis_dev, "reset never completed\n");
/* Wait a little while for the chip to get its brains in order. */
DELAY(1000);
/*
* If this is a NetSemi chip, make sure to clear
* PME mode.
*/
if (sc->sis_type == SIS_TYPE_83815) {
CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
CSR_WRITE_4(sc, NS_CLKRUN, 0);
} else {
/* Disable WOL functions. */
CSR_WRITE_4(sc, SIS_PWRMAN_CTL, 0);
}
}
/*
* Probe for an SiS chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
sis_probe(device_t dev)
{
const struct sis_type *t;
t = sis_devs;
while (t->sis_name != NULL) {
if ((pci_get_vendor(dev) == t->sis_vid) &&
(pci_get_device(dev) == t->sis_did)) {
device_set_desc(dev, t->sis_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
sis_attach(device_t dev)
{
u_char eaddr[ETHER_ADDR_LEN];
struct sis_softc *sc;
if_t ifp;
int error = 0, pmc;
sc = device_get_softc(dev);
sc->sis_dev = dev;
mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0);
if (pci_get_device(dev) == SIS_DEVICEID_900)
sc->sis_type = SIS_TYPE_900;
if (pci_get_device(dev) == SIS_DEVICEID_7016)
sc->sis_type = SIS_TYPE_7016;
if (pci_get_vendor(dev) == NS_VENDORID)
sc->sis_type = SIS_TYPE_83815;
sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res);
if (error) {
device_printf(dev, "couldn't allocate resources\n");
goto fail;
}
/* Reset the adapter. */
sis_reset(sc);
if (sc->sis_type == SIS_TYPE_900 &&
(sc->sis_rev == SIS_REV_635 ||
sc->sis_rev == SIS_REV_900B)) {
SIO_SET(SIS_CFG_RND_CNT);
SIO_SET(SIS_CFG_PERR_DETECT);
}
/*
* Get station address from the EEPROM.
*/
switch (pci_get_vendor(dev)) {
case NS_VENDORID:
sc->sis_srr = CSR_READ_4(sc, NS_SRR);
/* We can't update the device description, so spew */
if (sc->sis_srr == NS_SRR_15C)
device_printf(dev, "Silicon Revision: DP83815C\n");
else if (sc->sis_srr == NS_SRR_15D)
device_printf(dev, "Silicon Revision: DP83815D\n");
else if (sc->sis_srr == NS_SRR_16A)
device_printf(dev, "Silicon Revision: DP83816A\n");
else
device_printf(dev, "Silicon Revision %x\n", sc->sis_srr);
/*
* Reading the MAC address out of the EEPROM on
* the NatSemi chip takes a bit more work than
* you'd expect. The address spans 4 16-bit words,
* with the first word containing only a single bit.
* You have to shift everything over one bit to
* get it aligned properly. Also, the bits are
* stored backwards (the LSB is really the MSB,
* and so on) so you have to reverse them in order
* to get the MAC address into the form we want.
* Why? Who the hell knows.
*/
{
uint16_t tmp[4];
sis_read_eeprom(sc, (caddr_t)&tmp,
NS_EE_NODEADDR, 4, 0);
/* Shift everything over one bit. */
tmp[3] = tmp[3] >> 1;
tmp[3] |= tmp[2] << 15;
tmp[2] = tmp[2] >> 1;
tmp[2] |= tmp[1] << 15;
tmp[1] = tmp[1] >> 1;
tmp[1] |= tmp[0] << 15;
/* Now reverse all the bits. */
tmp[3] = sis_reverse(tmp[3]);
tmp[2] = sis_reverse(tmp[2]);
tmp[1] = sis_reverse(tmp[1]);
eaddr[0] = (tmp[1] >> 0) & 0xFF;
eaddr[1] = (tmp[1] >> 8) & 0xFF;
eaddr[2] = (tmp[2] >> 0) & 0xFF;
eaddr[3] = (tmp[2] >> 8) & 0xFF;
eaddr[4] = (tmp[3] >> 0) & 0xFF;
eaddr[5] = (tmp[3] >> 8) & 0xFF;
}
break;
case SIS_VENDORID:
default:
#if defined(__i386__) || defined(__amd64__)
/*
* If this is a SiS 630E chipset with an embedded
* SiS 900 controller, we have to read the MAC address
* from the APC CMOS RAM. Our method for doing this
* is very ugly since we have to reach out and grab
* ahold of hardware for which we cannot properly
* allocate resources. This code is only compiled on
* the i386 architecture since the SiS 630E chipset
* is for x86 motherboards only. Note that there are
* a lot of magic numbers in this hack. These are
* taken from SiS's Linux driver. I'd like to replace
* them with proper symbolic definitions, but that
* requires some datasheets that I don't have access
* to at the moment.
*/
if (sc->sis_rev == SIS_REV_630S ||
sc->sis_rev == SIS_REV_630E ||
sc->sis_rev == SIS_REV_630EA1)
sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6);
else if (sc->sis_rev == SIS_REV_635 ||
sc->sis_rev == SIS_REV_630ET)
sis_read_mac(sc, dev, (caddr_t)&eaddr);
else if (sc->sis_rev == SIS_REV_96x) {
/* Allow to read EEPROM from LAN. It is shared
* between a 1394 controller and the NIC and each
* time we access it, we need to set SIS_EECMD_REQ.
*/
SIO_SET(SIS_EECMD_REQ);
for (int waittime = 0; waittime < SIS_TIMEOUT;
waittime++) {
/* Force EEPROM to idle state. */
sis_eeprom_idle(sc);
if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) {
sis_read_eeprom(sc, (caddr_t)&eaddr,
SIS_EE_NODEADDR, 3, 0);
break;
}
DELAY(1);
}
/*
* Set SIS_EECTL_CLK to high, so a other master
* can operate on the i2c bus.
*/
SIO_SET(SIS_EECTL_CLK);
/* Refuse EEPROM access by LAN */
SIO_SET(SIS_EECMD_DONE);
} else
#endif
sis_read_eeprom(sc, (caddr_t)&eaddr,
SIS_EE_NODEADDR, 3, 0);
break;
}
sis_add_sysctls(sc);
/* Allocate DMA'able memory. */
if ((error = sis_dma_alloc(sc)) != 0)
goto fail;
ifp = sc->sis_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, sis_ioctl);
if_setstartfn(ifp, sis_start);
if_setinitfn(ifp, sis_init);
if_setsendqlen(ifp, SIS_TX_LIST_CNT - 1);
if_setsendqready(ifp);
if (pci_find_cap(sc->sis_dev, PCIY_PMG, &pmc) == 0) {
if (sc->sis_type == SIS_TYPE_83815)
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
else
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
}
/*
* Do MII setup.
*/
error = mii_attach(dev, &sc->sis_miibus, ifp, sis_ifmedia_upd,
sis_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/*
* Tell the upper layer(s) we support long frames.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE,
NULL, sis_intr, sc, &sc->sis_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
sis_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
sis_detach(device_t dev)
{
struct sis_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
ifp = sc->sis_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded. */
if (device_is_attached(dev)) {
SIS_LOCK(sc);
sis_stop(sc);
SIS_UNLOCK(sc);
callout_drain(&sc->sis_stat_ch);
ether_ifdetach(ifp);
}
if (sc->sis_miibus)
device_delete_child(dev, sc->sis_miibus);
bus_generic_detach(dev);
if (sc->sis_intrhand)
bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand);
bus_release_resources(dev, sis_res_spec, sc->sis_res);
if (ifp)
if_free(ifp);
sis_dma_free(sc);
mtx_destroy(&sc->sis_mtx);
return (0);
}
struct sis_dmamap_arg {
bus_addr_t sis_busaddr;
};
static void
sis_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct sis_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct sis_dmamap_arg *)arg;
ctx->sis_busaddr = segs[0].ds_addr;
}
static int
sis_dma_ring_alloc(struct sis_softc *sc, bus_size_t alignment,
bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
bus_addr_t *paddr, const char *msg)
{
struct sis_dmamap_arg ctx;
int error;
error = bus_dma_tag_create(sc->sis_parent_tag, alignment, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1,
maxsize, 0, NULL, NULL, tag);
if (error != 0) {
device_printf(sc->sis_dev,
"could not create %s dma tag\n", msg);
return (ENOMEM);
}
/* Allocate DMA'able memory for ring. */
error = bus_dmamem_alloc(*tag, (void **)ring,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
if (error != 0) {
device_printf(sc->sis_dev,
"could not allocate DMA'able memory for %s\n", msg);
return (ENOMEM);
}
/* Load the address of the ring. */
ctx.sis_busaddr = 0;
error = bus_dmamap_load(*tag, *map, *ring, maxsize, sis_dmamap_cb,
&ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sis_dev,
"could not load DMA'able memory for %s\n", msg);
return (ENOMEM);
}
*paddr = ctx.sis_busaddr;
return (0);
}
static int
sis_dma_alloc(struct sis_softc *sc)
{
struct sis_rxdesc *rxd;
struct sis_txdesc *txd;
int error, i;
/* Allocate the parent bus DMA tag appropriate for PCI. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->sis_dev),
1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
0, NULL, NULL, &sc->sis_parent_tag);
if (error != 0) {
device_printf(sc->sis_dev,
"could not allocate parent dma tag\n");
return (ENOMEM);
}
/* Create RX ring. */
error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_RX_LIST_SZ,
&sc->sis_rx_list_tag, (uint8_t **)&sc->sis_rx_list,
&sc->sis_rx_list_map, &sc->sis_rx_paddr, "RX ring");
if (error)
return (error);
/* Create TX ring. */
error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_TX_LIST_SZ,
&sc->sis_tx_list_tag, (uint8_t **)&sc->sis_tx_list,
&sc->sis_tx_list_map, &sc->sis_tx_paddr, "TX ring");
if (error)
return (error);
/* Create tag for RX mbufs. */
error = bus_dma_tag_create(sc->sis_parent_tag, SIS_RX_BUF_ALIGN, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
MCLBYTES, 0, NULL, NULL, &sc->sis_rx_tag);
if (error) {
device_printf(sc->sis_dev, "could not allocate RX dma tag\n");
return (error);
}
/* Create tag for TX mbufs. */
error = bus_dma_tag_create(sc->sis_parent_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * SIS_MAXTXSEGS, SIS_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
&sc->sis_tx_tag);
if (error) {
device_printf(sc->sis_dev, "could not allocate TX dma tag\n");
return (error);
}
/* Create DMA maps for RX buffers. */
error = bus_dmamap_create(sc->sis_rx_tag, 0, &sc->sis_rx_sparemap);
if (error) {
device_printf(sc->sis_dev,
"can't create spare DMA map for RX\n");
return (error);
}
for (i = 0; i < SIS_RX_LIST_CNT; i++) {
rxd = &sc->sis_rxdesc[i];
rxd->rx_m = NULL;
error = bus_dmamap_create(sc->sis_rx_tag, 0, &rxd->rx_dmamap);
if (error) {
device_printf(sc->sis_dev,
"can't create DMA map for RX\n");
return (error);
}
}
/* Create DMA maps for TX buffers. */
for (i = 0; i < SIS_TX_LIST_CNT; i++) {
txd = &sc->sis_txdesc[i];
txd->tx_m = NULL;
error = bus_dmamap_create(sc->sis_tx_tag, 0, &txd->tx_dmamap);
if (error) {
device_printf(sc->sis_dev,
"can't create DMA map for TX\n");
return (error);
}
}
return (0);
}
static void
sis_dma_free(struct sis_softc *sc)
{
struct sis_rxdesc *rxd;
struct sis_txdesc *txd;
int i;
/* Destroy DMA maps for RX buffers. */
for (i = 0; i < SIS_RX_LIST_CNT; i++) {
rxd = &sc->sis_rxdesc[i];
if (rxd->rx_dmamap)
bus_dmamap_destroy(sc->sis_rx_tag, rxd->rx_dmamap);
}
if (sc->sis_rx_sparemap)
bus_dmamap_destroy(sc->sis_rx_tag, sc->sis_rx_sparemap);
/* Destroy DMA maps for TX buffers. */
for (i = 0; i < SIS_TX_LIST_CNT; i++) {
txd = &sc->sis_txdesc[i];
if (txd->tx_dmamap)
bus_dmamap_destroy(sc->sis_tx_tag, txd->tx_dmamap);
}
if (sc->sis_rx_tag)
bus_dma_tag_destroy(sc->sis_rx_tag);
if (sc->sis_tx_tag)
bus_dma_tag_destroy(sc->sis_tx_tag);
/* Destroy RX ring. */
if (sc->sis_rx_paddr)
bus_dmamap_unload(sc->sis_rx_list_tag, sc->sis_rx_list_map);
if (sc->sis_rx_list)
bus_dmamem_free(sc->sis_rx_list_tag, sc->sis_rx_list,
sc->sis_rx_list_map);
if (sc->sis_rx_list_tag)
bus_dma_tag_destroy(sc->sis_rx_list_tag);
/* Destroy TX ring. */
if (sc->sis_tx_paddr)
bus_dmamap_unload(sc->sis_tx_list_tag, sc->sis_tx_list_map);
if (sc->sis_tx_list)
bus_dmamem_free(sc->sis_tx_list_tag, sc->sis_tx_list,
sc->sis_tx_list_map);
if (sc->sis_tx_list_tag)
bus_dma_tag_destroy(sc->sis_tx_list_tag);
/* Destroy the parent tag. */
if (sc->sis_parent_tag)
bus_dma_tag_destroy(sc->sis_parent_tag);
}
/*
* Initialize the TX and RX descriptors and allocate mbufs for them. Note that
* we arrange the descriptors in a closed ring, so that the last descriptor
* points back to the first.
*/
static int
sis_ring_init(struct sis_softc *sc)
{
struct sis_rxdesc *rxd;
struct sis_txdesc *txd;
bus_addr_t next;
int error, i;
bzero(&sc->sis_tx_list[0], SIS_TX_LIST_SZ);
for (i = 0; i < SIS_TX_LIST_CNT; i++) {
txd = &sc->sis_txdesc[i];
txd->tx_m = NULL;
if (i == SIS_TX_LIST_CNT - 1)
next = SIS_TX_RING_ADDR(sc, 0);
else
next = SIS_TX_RING_ADDR(sc, i + 1);
sc->sis_tx_list[i].sis_next = htole32(SIS_ADDR_LO(next));
}
sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0;
bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->sis_rx_cons = 0;
bzero(&sc->sis_rx_list[0], SIS_RX_LIST_SZ);
for (i = 0; i < SIS_RX_LIST_CNT; i++) {
rxd = &sc->sis_rxdesc[i];
rxd->rx_desc = &sc->sis_rx_list[i];
if (i == SIS_RX_LIST_CNT - 1)
next = SIS_RX_RING_ADDR(sc, 0);
else
next = SIS_RX_RING_ADDR(sc, i + 1);
rxd->rx_desc->sis_next = htole32(SIS_ADDR_LO(next));
error = sis_newbuf(sc, rxd);
if (error)
return (error);
}
bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
*/
static int
sis_newbuf(struct sis_softc *sc, struct sis_rxdesc *rxd)
{
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = SIS_RXLEN;
#ifndef __NO_STRICT_ALIGNMENT
m_adj(m, SIS_RX_BUF_ALIGN);
#endif
if (bus_dmamap_load_mbuf_sg(sc->sis_rx_tag, sc->sis_rx_sparemap, m,
segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->sis_rx_sparemap;
sc->sis_rx_sparemap = map;
bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
rxd->rx_desc->sis_ptr = htole32(SIS_ADDR_LO(segs[0].ds_addr));
rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
return (0);
}
static __inline void
sis_discard_rxbuf(struct sis_rxdesc *rxd)
{
rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
}
#ifndef __NO_STRICT_ALIGNMENT
static __inline void
sis_fixup_rx(struct mbuf *m)
{
uint16_t *src, *dst;
int i;
src = mtod(m, uint16_t *);
dst = src - (SIS_RX_BUF_ALIGN - ETHER_ALIGN) / sizeof(*src);
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= SIS_RX_BUF_ALIGN - ETHER_ALIGN;
}
#endif
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static int
sis_rxeof(struct sis_softc *sc)
{
struct mbuf *m;
if_t ifp;
struct sis_rxdesc *rxd;
struct sis_desc *cur_rx;
int prog, rx_cons, rx_npkts = 0, total_len;
uint32_t rxstat;
SIS_LOCK_ASSERT(sc);
bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rx_cons = sc->sis_rx_cons;
ifp = sc->sis_ifp;
for (prog = 0; (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0;
SIS_INC(rx_cons, SIS_RX_LIST_CNT), prog++) {
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
}
#endif
cur_rx = &sc->sis_rx_list[rx_cons];
rxstat = le32toh(cur_rx->sis_cmdsts);
if ((rxstat & SIS_CMDSTS_OWN) == 0)
break;
rxd = &sc->sis_rxdesc[rx_cons];
total_len = (rxstat & SIS_CMDSTS_BUFLEN) - ETHER_CRC_LEN;
if ((if_getcapenable(ifp) & IFCAP_VLAN_MTU) != 0 &&
total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN -
ETHER_CRC_LEN))
rxstat &= ~SIS_RXSTAT_GIANT;
if (SIS_RXSTAT_ERROR(rxstat) != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if (rxstat & SIS_RXSTAT_COLL)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
sis_discard_rxbuf(rxd);
continue;
}
/* Add a new receive buffer to the ring. */
m = rxd->rx_m;
if (sis_newbuf(sc, rxd) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
sis_discard_rxbuf(rxd);
continue;
}
/* No errors; receive the packet. */
m->m_pkthdr.len = m->m_len = total_len;
#ifndef __NO_STRICT_ALIGNMENT
/*
* On architectures without alignment problems we try to
* allocate a new buffer for the receive ring, and pass up
* the one where the packet is already, saving the expensive
* copy operation.
*/
sis_fixup_rx(m);
#endif
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
SIS_UNLOCK(sc);
if_input(ifp, m);
SIS_LOCK(sc);
rx_npkts++;
}
if (prog > 0) {
sc->sis_rx_cons = rx_cons;
bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
return (rx_npkts);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
sis_txeof(struct sis_softc *sc)
{
if_t ifp;
struct sis_desc *cur_tx;
struct sis_txdesc *txd;
uint32_t cons, txstat;
SIS_LOCK_ASSERT(sc);
cons = sc->sis_tx_cons;
if (cons == sc->sis_tx_prod)
return;
ifp = sc->sis_ifp;
bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Go through our tx list and free mbufs for those
* frames that have been transmitted.
*/
for (; cons != sc->sis_tx_prod; SIS_INC(cons, SIS_TX_LIST_CNT)) {
cur_tx = &sc->sis_tx_list[cons];
txstat = le32toh(cur_tx->sis_cmdsts);
if ((txstat & SIS_CMDSTS_OWN) != 0)
break;
txd = &sc->sis_txdesc[cons];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
if ((txstat & SIS_CMDSTS_PKT_OK) != 0) {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
(txstat & SIS_TXSTAT_COLLCNT) >> 16);
} else {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if (txstat & SIS_TXSTAT_EXCESSCOLLS)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
}
}
sc->sis_tx_cnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
sc->sis_tx_cons = cons;
if (sc->sis_tx_cnt == 0)
sc->sis_watchdog_timer = 0;
}
static void
sis_tick(void *xsc)
{
struct sis_softc *sc;
struct mii_data *mii;
sc = xsc;
SIS_LOCK_ASSERT(sc);
mii = device_get_softc(sc->sis_miibus);
mii_tick(mii);
sis_watchdog(sc);
if ((sc->sis_flags & SIS_FLAG_LINK) == 0)
sis_miibus_statchg(sc->sis_dev);
callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc);
}
#ifdef DEVICE_POLLING
static poll_handler_t sis_poll;
static int
sis_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct sis_softc *sc = if_getsoftc(ifp);
int rx_npkts = 0;
SIS_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
SIS_UNLOCK(sc);
return (rx_npkts);
}
/*
* On the sis, reading the status register also clears it.
* So before returning to intr mode we must make sure that all
* possible pending sources of interrupts have been served.
* In practice this means run to completion the *eof routines,
* and then call the interrupt routine
*/
sc->rxcycles = count;
rx_npkts = sis_rxeof(sc);
sis_txeof(sc);
if (!if_sendq_empty(ifp))
sis_startl(ifp);
if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
uint32_t status;
/* Reading the ISR register clears all interrupts. */
status = CSR_READ_4(sc, SIS_ISR);
if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW))
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if (status & (SIS_ISR_RX_IDLE))
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
if (status & SIS_ISR_SYSERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sis_initl(sc);
}
}
SIS_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
sis_intr(void *arg)
{
struct sis_softc *sc;
if_t ifp;
uint32_t status;
sc = arg;
ifp = sc->sis_ifp;
SIS_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
SIS_UNLOCK(sc);
return;
}
#endif
/* Reading the ISR register clears all interrupts. */
status = CSR_READ_4(sc, SIS_ISR);
if ((status & SIS_INTRS) == 0) {
/* Not ours. */
SIS_UNLOCK(sc);
return;
}
/* Disable interrupts. */
CSR_WRITE_4(sc, SIS_IER, 0);
for (;(status & SIS_INTRS) != 0;) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
if (status &
(SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) )
sis_txeof(sc);
if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
sis_rxeof(sc);
if (status & SIS_ISR_RX_OFLOW)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if (status & (SIS_ISR_RX_IDLE))
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
if (status & SIS_ISR_SYSERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sis_initl(sc);
SIS_UNLOCK(sc);
return;
}
status = CSR_READ_4(sc, SIS_ISR);
}
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Re-enable interrupts. */
CSR_WRITE_4(sc, SIS_IER, 1);
if (!if_sendq_empty(ifp))
sis_startl(ifp);
}
SIS_UNLOCK(sc);
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
sis_encap(struct sis_softc *sc, struct mbuf **m_head)
{
struct mbuf *m;
struct sis_txdesc *txd;
struct sis_desc *f;
bus_dma_segment_t segs[SIS_MAXTXSEGS];
bus_dmamap_t map;
int error, i, frag, nsegs, prod;
int padlen;
prod = sc->sis_tx_prod;
txd = &sc->sis_txdesc[prod];
if ((sc->sis_flags & SIS_FLAG_MANUAL_PAD) != 0 &&
(*m_head)->m_pkthdr.len < SIS_MIN_FRAMELEN) {
m = *m_head;
padlen = SIS_MIN_FRAMELEN - m->m_pkthdr.len;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
m = m_defrag(m, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
}
/*
* Manually pad short frames, and zero the pad space
* to avoid leaking data.
*/
bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
m->m_pkthdr.len += padlen;
m->m_len = m->m_pkthdr.len;
*m_head = m;
}
error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
*m_head, segs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, SIS_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
*m_head, segs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
/* Check for descriptor overruns. */
if (sc->sis_tx_cnt + nsegs > SIS_TX_LIST_CNT - 1) {
bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
return (ENOBUFS);
}
bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE);
frag = prod;
for (i = 0; i < nsegs; i++) {
f = &sc->sis_tx_list[prod];
if (i == 0)
f->sis_cmdsts = htole32(segs[i].ds_len |
SIS_CMDSTS_MORE);
else
f->sis_cmdsts = htole32(segs[i].ds_len |
SIS_CMDSTS_OWN | SIS_CMDSTS_MORE);
f->sis_ptr = htole32(SIS_ADDR_LO(segs[i].ds_addr));
SIS_INC(prod, SIS_TX_LIST_CNT);
sc->sis_tx_cnt++;
}
/* Update producer index. */
sc->sis_tx_prod = prod;
/* Remove MORE flag on the last descriptor. */
prod = (prod - 1) & (SIS_TX_LIST_CNT - 1);
f = &sc->sis_tx_list[prod];
f->sis_cmdsts &= ~htole32(SIS_CMDSTS_MORE);
/* Lastly transfer ownership of packet to the controller. */
f = &sc->sis_tx_list[frag];
f->sis_cmdsts |= htole32(SIS_CMDSTS_OWN);
/* Swap the last and the first dmamaps. */
map = txd->tx_dmamap;
txd->tx_dmamap = sc->sis_txdesc[prod].tx_dmamap;
sc->sis_txdesc[prod].tx_dmamap = map;
sc->sis_txdesc[prod].tx_m = *m_head;
return (0);
}
static void
sis_start(if_t ifp)
{
struct sis_softc *sc;
sc = if_getsoftc(ifp);
SIS_LOCK(sc);
sis_startl(ifp);
SIS_UNLOCK(sc);
}
static void
sis_startl(if_t ifp)
{
struct sis_softc *sc;
struct mbuf *m_head;
int queued;
sc = if_getsoftc(ifp);
SIS_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->sis_flags & SIS_FLAG_LINK) == 0)
return;
for (queued = 0; !if_sendq_empty(ifp) &&
sc->sis_tx_cnt < SIS_TX_LIST_CNT - 4;) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
if (sis_encap(sc, &m_head) != 0) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
queued++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, m_head);
}
if (queued) {
/* Transmit */
bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->sis_watchdog_timer = 5;
}
}
static void
sis_init(void *xsc)
{
struct sis_softc *sc = xsc;
SIS_LOCK(sc);
sis_initl(sc);
SIS_UNLOCK(sc);
}
static void
sis_initl(struct sis_softc *sc)
{
if_t ifp = sc->sis_ifp;
struct mii_data *mii;
uint8_t *eaddr;
SIS_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
sis_stop(sc);
/*
* Reset the chip to a known state.
*/
sis_reset(sc);
#ifdef notyet
if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
/*
* Configure 400usec of interrupt holdoff. This is based
* on empirical tests on a Soekris 4801.
*/
CSR_WRITE_4(sc, NS_IHR, 0x100 | 4);
}
#endif
mii = device_get_softc(sc->sis_miibus);
/* Set MAC address */
eaddr = if_getlladdr(sc->sis_ifp);
if (sc->sis_type == SIS_TYPE_83815) {
CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[0] | eaddr[1] << 8);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[2] | eaddr[3] << 8);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[4] | eaddr[5] << 8);
} else {
CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[0] | eaddr[1] << 8);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[2] | eaddr[3] << 8);
CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[4] | eaddr[5] << 8);
}
/* Init circular TX/RX lists. */
if (sis_ring_init(sc) != 0) {
device_printf(sc->sis_dev,
"initialization failed: no memory for rx buffers\n");
sis_stop(sc);
return;
}
if (sc->sis_type == SIS_TYPE_83815) {
if (sc->sis_manual_pad != 0)
sc->sis_flags |= SIS_FLAG_MANUAL_PAD;
else
sc->sis_flags &= ~SIS_FLAG_MANUAL_PAD;
}
/*
* Short Cable Receive Errors (MP21.E)
* also: Page 78 of the DP83815 data sheet (september 2002 version)
* recommends the following register settings "for optimum
* performance." for rev 15C. Set this also for 15D parts as
* they require it in practice.
*/
if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
/* set val for c2 */
CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
/* load/kill c2 */
CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
/* rais SD off, from 4 to c */
CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
}
sis_rxfilter(sc);
/*
* Load the address of the RX and TX lists.
*/
CSR_WRITE_4(sc, SIS_RX_LISTPTR, SIS_ADDR_LO(sc->sis_rx_paddr));
CSR_WRITE_4(sc, SIS_TX_LISTPTR, SIS_ADDR_LO(sc->sis_tx_paddr));
/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
* the PCI bus. When this bit is set, the Max DMA Burst Size
* for TX/RX DMA should be no larger than 16 double words.
*/
if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) {
CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
} else {
CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
}
/* Accept Long Packets for VLAN support */
SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
/*
* Assume 100Mbps link, actual MAC configuration is done
* after getting a valid link.
*/
CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
/*
* Enable interrupts.
*/
CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
#ifdef DEVICE_POLLING
/*
* ... only enable interrupts if we are not polling, make sure
* they are off otherwise.
*/
if (if_getcapenable(ifp) & IFCAP_POLLING)
CSR_WRITE_4(sc, SIS_IER, 0);
else
#endif
CSR_WRITE_4(sc, SIS_IER, 1);
/* Clear MAC disable. */
SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
sc->sis_flags &= ~SIS_FLAG_LINK;
mii_mediachg(mii);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc);
}
/*
* Set media options.
*/
static int
sis_ifmedia_upd(if_t ifp)
{
struct sis_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
SIS_LOCK(sc);
mii = device_get_softc(sc->sis_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
SIS_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
sis_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct sis_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
SIS_LOCK(sc);
mii = device_get_softc(sc->sis_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
SIS_UNLOCK(sc);
}
static int
sis_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct sis_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int error = 0, mask;
switch (command) {
case SIOCSIFFLAGS:
SIS_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->sis_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
sis_rxfilter(sc);
else
sis_initl(sc);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
sis_stop(sc);
sc->sis_if_flags = if_getflags(ifp);
SIS_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
SIS_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
sis_rxfilter(sc);
SIS_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->sis_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
SIS_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if ((mask & IFCAP_POLLING) != 0 &&
(IFCAP_POLLING & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_POLLING);
if ((IFCAP_POLLING & if_getcapenable(ifp)) != 0) {
error = ether_poll_register(sis_poll, ifp);
if (error != 0) {
SIS_UNLOCK(sc);
break;
}
/* Disable interrupts. */
CSR_WRITE_4(sc, SIS_IER, 0);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
CSR_WRITE_4(sc, SIS_IER, 1);
}
}
#endif /* DEVICE_POLLING */
if ((mask & IFCAP_WOL) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
if ((mask & IFCAP_WOL_UCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_UCAST);
if ((mask & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
}
SIS_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
sis_watchdog(struct sis_softc *sc)
{
SIS_LOCK_ASSERT(sc);
if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0)
return;
device_printf(sc->sis_dev, "watchdog timeout\n");
if_inc_counter(sc->sis_ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(sc->sis_ifp, 0, IFF_DRV_RUNNING);
sis_initl(sc);
if (!if_sendq_empty(sc->sis_ifp))
sis_startl(sc->sis_ifp);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
sis_stop(struct sis_softc *sc)
{
if_t ifp;
struct sis_rxdesc *rxd;
struct sis_txdesc *txd;
int i;
SIS_LOCK_ASSERT(sc);
ifp = sc->sis_ifp;
sc->sis_watchdog_timer = 0;
callout_stop(&sc->sis_stat_ch);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
CSR_WRITE_4(sc, SIS_IER, 0);
CSR_WRITE_4(sc, SIS_IMR, 0);
CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
DELAY(1000);
CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
sc->sis_flags &= ~SIS_FLAG_LINK;
/*
* Free data in the RX lists.
*/
for (i = 0; i < SIS_RX_LIST_CNT; i++) {
rxd = &sc->sis_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
/*
* Free the TX list buffers.
*/
for (i = 0; i < SIS_TX_LIST_CNT; i++) {
txd = &sc->sis_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
sis_shutdown(device_t dev)
{
return (sis_suspend(dev));
}
static int
sis_suspend(device_t dev)
{
struct sis_softc *sc;
sc = device_get_softc(dev);
SIS_LOCK(sc);
sis_stop(sc);
sis_wol(sc);
SIS_UNLOCK(sc);
return (0);
}
static int
sis_resume(device_t dev)
{
struct sis_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
SIS_LOCK(sc);
ifp = sc->sis_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sis_initl(sc);
}
SIS_UNLOCK(sc);
return (0);
}
static void
sis_wol(struct sis_softc *sc)
{
if_t ifp;
uint32_t val;
uint16_t pmstat;
int pmc;
ifp = sc->sis_ifp;
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0)
return;
if (sc->sis_type == SIS_TYPE_83815) {
/* Reset RXDP. */
CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
/* Configure WOL events. */
CSR_READ_4(sc, NS_WCSR);
val = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
val |= NS_WCSR_WAKE_UCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
val |= NS_WCSR_WAKE_MCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
val |= NS_WCSR_WAKE_MAGIC;
CSR_WRITE_4(sc, NS_WCSR, val);
/* Enable PME and clear PMESTS. */
val = CSR_READ_4(sc, NS_CLKRUN);
val |= NS_CLKRUN_PMEENB | NS_CLKRUN_PMESTS;
CSR_WRITE_4(sc, NS_CLKRUN, val);
/* Enable silent RX mode. */
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
} else {
if (pci_find_cap(sc->sis_dev, PCIY_PMG, &pmc) != 0)
return;
val = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
val |= SIS_PWRMAN_WOL_MAGIC;
CSR_WRITE_4(sc, SIS_PWRMAN_CTL, val);
/* Request PME. */
pmstat = pci_read_config(sc->sis_dev,
pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->sis_dev,
pmc + PCIR_POWER_STATUS, pmstat, 2);
}
}
static void
sis_add_sysctls(struct sis_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
ctx = device_get_sysctl_ctx(sc->sis_dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sis_dev));
/*
* Unlike most other controllers, NS DP83815/DP83816 controllers
* seem to pad with 0xFF when it encounter short frames. According
* to RFC 1042 the pad bytes should be 0x00. Turning this tunable
* on will have driver pad manully but it's disabled by default
* because it will consume extra CPU cycles for short frames.
*/
sc->sis_manual_pad = 0;
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "manual_pad",
CTLFLAG_RWTUN, &sc->sis_manual_pad, 0, "Manually pad short frames");
}
static device_method_t sis_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sis_probe),
DEVMETHOD(device_attach, sis_attach),
DEVMETHOD(device_detach, sis_detach),
DEVMETHOD(device_shutdown, sis_shutdown),
DEVMETHOD(device_suspend, sis_suspend),
DEVMETHOD(device_resume, sis_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, sis_miibus_readreg),
DEVMETHOD(miibus_writereg, sis_miibus_writereg),
DEVMETHOD(miibus_statchg, sis_miibus_statchg),
DEVMETHOD_END
};
static driver_t sis_driver = {
"sis",
sis_methods,
sizeof(struct sis_softc)
};
DRIVER_MODULE(sis, pci, sis_driver, 0, 0);
DRIVER_MODULE(miibus, sis, miibus_driver, 0, 0);
diff --git a/sys/dev/sk/if_sk.c b/sys/dev/sk/if_sk.c
index 7505ef58cfe4..db5588047385 100644
--- a/sys/dev/sk/if_sk.c
+++ b/sys/dev/sk/if_sk.c
@@ -1,3736 +1,3731 @@
/* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1997, 1998, 1999, 2000
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
/*
* SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
* the SK-984x series adapters, both single port and dual port.
* References:
* The XaQti XMAC II datasheet,
* https://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
* The SysKonnect GEnesis manual, http://www.syskonnect.com
*
* Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
* XMAC II datasheet online. I have put my copy at people.freebsd.org as a
* convenience to others until Vitesse corrects this problem:
*
* https://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
*
* Written by Bill Paul <wpaul@ee.columbia.edu>
* Department of Electrical Engineering
* Columbia University, New York City
*/
/*
* The SysKonnect gigabit ethernet adapters consist of two main
* components: the SysKonnect GEnesis controller chip and the XaQti Corp.
* XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
* components and a PHY while the GEnesis controller provides a PCI
* interface with DMA support. Each card may have between 512K and
* 2MB of SRAM on board depending on the configuration.
*
* The SysKonnect GEnesis controller can have either one or two XMAC
* chips connected to it, allowing single or dual port NIC configurations.
* SysKonnect has the distinction of being the only vendor on the market
* with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
* dual DMA queues, packet/MAC/transmit arbiters and direct access to the
* XMAC registers. This driver takes advantage of these features to allow
* both XMACs to operate as independent interfaces.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mii/brgphyreg.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#if 0
#define SK_USEIOSPACE
#endif
#include <dev/sk/if_skreg.h>
#include <dev/sk/xmaciireg.h>
#include <dev/sk/yukonreg.h>
MODULE_DEPEND(sk, pci, 1, 1, 1);
MODULE_DEPEND(sk, ether, 1, 1, 1);
MODULE_DEPEND(sk, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
static const struct sk_type sk_devs[] = {
{
VENDORID_SK,
DEVICEID_SK_V1,
"SysKonnect Gigabit Ethernet (V1.0)"
},
{
VENDORID_SK,
DEVICEID_SK_V2,
"SysKonnect Gigabit Ethernet (V2.0)"
},
{
VENDORID_MARVELL,
DEVICEID_SK_V2,
"Marvell Gigabit Ethernet"
},
{
VENDORID_MARVELL,
DEVICEID_BELKIN_5005,
"Belkin F5D5005 Gigabit Ethernet"
},
{
VENDORID_3COM,
DEVICEID_3COM_3C940,
"3Com 3C940 Gigabit Ethernet"
},
{
VENDORID_LINKSYS,
DEVICEID_LINKSYS_EG1032,
"Linksys EG1032 Gigabit Ethernet"
},
{
VENDORID_DLINK,
DEVICEID_DLINK_DGE530T_A1,
"D-Link DGE-530T Gigabit Ethernet"
},
{
VENDORID_DLINK,
DEVICEID_DLINK_DGE530T_B1,
"D-Link DGE-530T Gigabit Ethernet"
},
{ 0, 0, NULL }
};
static int skc_probe(device_t);
static int skc_attach(device_t);
static int skc_detach(device_t);
static int skc_shutdown(device_t);
static int skc_suspend(device_t);
static int skc_resume(device_t);
static bus_dma_tag_t skc_get_dma_tag(device_t, device_t);
static int sk_detach(device_t);
static int sk_probe(device_t);
static int sk_attach(device_t);
static void sk_tick(void *);
static void sk_yukon_tick(void *);
static void sk_intr(void *);
static void sk_intr_xmac(struct sk_if_softc *);
static void sk_intr_bcom(struct sk_if_softc *);
static void sk_intr_yukon(struct sk_if_softc *);
static __inline void sk_rxcksum(if_t, struct mbuf *, u_int32_t);
static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
static void sk_rxeof(struct sk_if_softc *);
static void sk_jumbo_rxeof(struct sk_if_softc *);
static void sk_txeof(struct sk_if_softc *);
static void sk_txcksum(if_t, struct mbuf *, struct sk_tx_desc *);
static int sk_encap(struct sk_if_softc *, struct mbuf **);
static void sk_start(if_t);
static void sk_start_locked(if_t);
static int sk_ioctl(if_t, u_long, caddr_t);
static void sk_init(void *);
static void sk_init_locked(struct sk_if_softc *);
static void sk_init_xmac(struct sk_if_softc *);
static void sk_init_yukon(struct sk_if_softc *);
static void sk_stop(struct sk_if_softc *);
static void sk_watchdog(void *);
static int sk_ifmedia_upd(if_t);
static void sk_ifmedia_sts(if_t, struct ifmediareq *);
static void sk_reset(struct sk_softc *);
static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
static int sk_newbuf(struct sk_if_softc *, int);
static int sk_jumbo_newbuf(struct sk_if_softc *, int);
static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int sk_dma_alloc(struct sk_if_softc *);
static int sk_dma_jumbo_alloc(struct sk_if_softc *);
static void sk_dma_free(struct sk_if_softc *);
static void sk_dma_jumbo_free(struct sk_if_softc *);
static int sk_init_rx_ring(struct sk_if_softc *);
static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
static void sk_init_tx_ring(struct sk_if_softc *);
static u_int32_t sk_win_read_4(struct sk_softc *, int);
static u_int16_t sk_win_read_2(struct sk_softc *, int);
static u_int8_t sk_win_read_1(struct sk_softc *, int);
static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
static int sk_miibus_readreg(device_t, int, int);
static int sk_miibus_writereg(device_t, int, int, int);
static void sk_miibus_statchg(device_t);
static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
int);
static void sk_xmac_miibus_statchg(struct sk_if_softc *);
static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
int);
static void sk_marv_miibus_statchg(struct sk_if_softc *);
static uint32_t sk_xmchash(const uint8_t *);
static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
static void sk_rxfilter(struct sk_if_softc *);
static void sk_rxfilter_genesis(struct sk_if_softc *);
static void sk_rxfilter_yukon(struct sk_if_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
/* Tunables. */
static int jumbo_disable = 0;
TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
/*
* It seems that SK-NET GENESIS supports very simple checksum offload
* capability for Tx and I believe it can generate 0 checksum value for
* UDP packets in Tx as the hardware can't differenciate UDP packets from
* TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
* means sender didn't perforam checksum computation. For the safety I
* disabled UDP checksum offload capability at the moment.
*/
#define SK_CSUM_FEATURES (CSUM_TCP)
/*
* Note that we have newbus methods for both the GEnesis controller
* itself and the XMAC(s). The XMACs are children of the GEnesis, and
* the miibus code is a child of the XMACs. We need to do it this way
* so that the miibus drivers can access the PHY registers on the
* right PHY. It's not quite what I had in mind, but it's the only
* design that achieves the desired effect.
*/
static device_method_t skc_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, skc_probe),
DEVMETHOD(device_attach, skc_attach),
DEVMETHOD(device_detach, skc_detach),
DEVMETHOD(device_suspend, skc_suspend),
DEVMETHOD(device_resume, skc_resume),
DEVMETHOD(device_shutdown, skc_shutdown),
DEVMETHOD(bus_get_dma_tag, skc_get_dma_tag),
DEVMETHOD_END
};
static driver_t skc_driver = {
"skc",
skc_methods,
sizeof(struct sk_softc)
};
static device_method_t sk_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sk_probe),
DEVMETHOD(device_attach, sk_attach),
DEVMETHOD(device_detach, sk_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, sk_miibus_readreg),
DEVMETHOD(miibus_writereg, sk_miibus_writereg),
DEVMETHOD(miibus_statchg, sk_miibus_statchg),
DEVMETHOD_END
};
static driver_t sk_driver = {
"sk",
sk_methods,
sizeof(struct sk_if_softc)
};
DRIVER_MODULE(skc, pci, skc_driver, NULL, NULL);
DRIVER_MODULE(sk, skc, sk_driver, NULL, NULL);
DRIVER_MODULE(miibus, sk, miibus_driver, NULL, NULL);
static struct resource_spec sk_res_spec_io[] = {
{ SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec sk_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
#define SK_SETBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
#define SK_CLRBIT(sc, reg, x) \
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
#define SK_WIN_SETBIT_4(sc, reg, x) \
sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
#define SK_WIN_CLRBIT_4(sc, reg, x) \
sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
#define SK_WIN_SETBIT_2(sc, reg, x) \
sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
#define SK_WIN_CLRBIT_2(sc, reg, x) \
sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
static u_int32_t
sk_win_read_4(struct sk_softc *sc, int reg)
{
#ifdef SK_USEIOSPACE
CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
#else
return(CSR_READ_4(sc, reg));
#endif
}
static u_int16_t
sk_win_read_2(struct sk_softc *sc, int reg)
{
#ifdef SK_USEIOSPACE
CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
#else
return(CSR_READ_2(sc, reg));
#endif
}
static u_int8_t
sk_win_read_1(struct sk_softc *sc, int reg)
{
#ifdef SK_USEIOSPACE
CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
#else
return(CSR_READ_1(sc, reg));
#endif
}
static void
sk_win_write_4(struct sk_softc *sc, int reg, u_int32_t val)
{
#ifdef SK_USEIOSPACE
CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
#else
CSR_WRITE_4(sc, reg, val);
#endif
return;
}
static void
sk_win_write_2(struct sk_softc *sc, int reg, u_int32_t val)
{
#ifdef SK_USEIOSPACE
CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
#else
CSR_WRITE_2(sc, reg, val);
#endif
return;
}
static void
sk_win_write_1(struct sk_softc *sc, int reg, u_int32_t val)
{
#ifdef SK_USEIOSPACE
CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
#else
CSR_WRITE_1(sc, reg, val);
#endif
return;
}
static int
sk_miibus_readreg(device_t dev, int phy, int reg)
{
struct sk_if_softc *sc_if;
int v;
sc_if = device_get_softc(dev);
SK_IF_MII_LOCK(sc_if);
switch(sc_if->sk_softc->sk_type) {
case SK_GENESIS:
v = sk_xmac_miibus_readreg(sc_if, phy, reg);
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
v = sk_marv_miibus_readreg(sc_if, phy, reg);
break;
default:
v = 0;
break;
}
SK_IF_MII_UNLOCK(sc_if);
return (v);
}
static int
sk_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct sk_if_softc *sc_if;
int v;
sc_if = device_get_softc(dev);
SK_IF_MII_LOCK(sc_if);
switch(sc_if->sk_softc->sk_type) {
case SK_GENESIS:
v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
break;
default:
v = 0;
break;
}
SK_IF_MII_UNLOCK(sc_if);
return (v);
}
static void
sk_miibus_statchg(device_t dev)
{
struct sk_if_softc *sc_if;
sc_if = device_get_softc(dev);
SK_IF_MII_LOCK(sc_if);
switch(sc_if->sk_softc->sk_type) {
case SK_GENESIS:
sk_xmac_miibus_statchg(sc_if);
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
sk_marv_miibus_statchg(sc_if);
break;
}
SK_IF_MII_UNLOCK(sc_if);
return;
}
static int
sk_xmac_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
{
int i;
SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
SK_XM_READ_2(sc_if, XM_PHY_DATA);
if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
for (i = 0; i < SK_TIMEOUT; i++) {
DELAY(1);
if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
XM_MMUCMD_PHYDATARDY)
break;
}
if (i == SK_TIMEOUT) {
if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
return(0);
}
}
DELAY(1);
i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
return(i);
}
static int
sk_xmac_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
{
int i;
SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
for (i = 0; i < SK_TIMEOUT; i++) {
if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
break;
}
if (i == SK_TIMEOUT) {
if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
return (ETIMEDOUT);
}
SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
for (i = 0; i < SK_TIMEOUT; i++) {
DELAY(1);
if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
break;
}
if (i == SK_TIMEOUT)
if_printf(sc_if->sk_ifp, "phy write timed out\n");
return(0);
}
static void
sk_xmac_miibus_statchg(struct sk_if_softc *sc_if)
{
struct mii_data *mii;
mii = device_get_softc(sc_if->sk_miibus);
/*
* If this is a GMII PHY, manually set the XMAC's
* duplex mode accordingly.
*/
if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
} else {
SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
}
}
}
static int
sk_marv_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
{
u_int16_t val;
int i;
if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
return(0);
}
SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
for (i = 0; i < SK_TIMEOUT; i++) {
DELAY(1);
val = SK_YU_READ_2(sc_if, YUKON_SMICR);
if (val & YU_SMICR_READ_VALID)
break;
}
if (i == SK_TIMEOUT) {
if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
return(0);
}
val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
return(val);
}
static int
sk_marv_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
{
int i;
SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
for (i = 0; i < SK_TIMEOUT; i++) {
DELAY(1);
if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
break;
}
if (i == SK_TIMEOUT)
if_printf(sc_if->sk_ifp, "phy write timeout\n");
return(0);
}
static void
sk_marv_miibus_statchg(struct sk_if_softc *sc_if)
{
return;
}
#define HASH_BITS 6
static u_int32_t
sk_xmchash(const uint8_t *addr)
{
uint32_t crc;
/* Compute CRC for the address value. */
crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
return (~crc & ((1 << HASH_BITS) - 1));
}
static void
sk_setfilt(struct sk_if_softc *sc_if, u_int16_t *addr, int slot)
{
int base;
base = XM_RXFILT_ENTRY(slot);
SK_XM_WRITE_2(sc_if, base, addr[0]);
SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
return;
}
static void
sk_rxfilter(struct sk_if_softc *sc_if)
{
struct sk_softc *sc;
SK_IF_LOCK_ASSERT(sc_if);
sc = sc_if->sk_softc;
if (sc->sk_type == SK_GENESIS)
sk_rxfilter_genesis(sc_if);
else
sk_rxfilter_yukon(sc_if);
}
struct sk_add_maddr_genesis_ctx {
struct sk_if_softc *sc_if;
uint32_t hashes[2];
uint32_t mode;
};
static u_int
sk_add_maddr_genesis(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct sk_add_maddr_genesis_ctx *ctx = arg;
int h;
/*
* Program the first XM_RXFILT_MAX multicast groups
* into the perfect filter.
*/
if (cnt + 1 < XM_RXFILT_MAX) {
sk_setfilt(ctx->sc_if, (uint16_t *)LLADDR(sdl), cnt + 1);
ctx->mode |= XM_MODE_RX_USE_PERFECT;
return (1);
}
h = sk_xmchash((const uint8_t *)LLADDR(sdl));
if (h < 32)
ctx->hashes[0] |= (1 << h);
else
ctx->hashes[1] |= (1 << (h - 32));
ctx->mode |= XM_MODE_RX_USE_HASH;
return (1);
}
static void
sk_rxfilter_genesis(struct sk_if_softc *sc_if)
{
if_t ifp = sc_if->sk_ifp;
struct sk_add_maddr_genesis_ctx ctx = { sc_if, { 0, 0 } };
int i;
u_int16_t dummy[] = { 0, 0, 0 };
SK_IF_LOCK_ASSERT(sc_if);
ctx.mode = SK_XM_READ_4(sc_if, XM_MODE);
ctx.mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
XM_MODE_RX_USE_PERFECT);
/* First, zot all the existing perfect filters. */
for (i = 1; i < XM_RXFILT_MAX; i++)
sk_setfilt(sc_if, dummy, i);
/* Now program new ones. */
if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
if (if_getflags(ifp) & IFF_ALLMULTI)
ctx.mode |= XM_MODE_RX_USE_HASH;
if (if_getflags(ifp) & IFF_PROMISC)
ctx.mode |= XM_MODE_RX_PROMISC;
ctx.hashes[0] = 0xFFFFFFFF;
ctx.hashes[1] = 0xFFFFFFFF;
} else
/* XXX want to maintain reverse semantics */
if_foreach_llmaddr(ifp, sk_add_maddr_genesis, &ctx);
SK_XM_WRITE_4(sc_if, XM_MODE, ctx.mode);
SK_XM_WRITE_4(sc_if, XM_MAR0, ctx.hashes[0]);
SK_XM_WRITE_4(sc_if, XM_MAR2, ctx.hashes[1]);
}
static u_int
sk_hash_maddr_yukon(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *hashes = arg;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
/* Just want the 6 least significant bits. */
crc &= 0x3f;
/* Set the corresponding bit in the hash table. */
hashes[crc >> 5] |= 1 << (crc & 0x1f);
return (1);
}
static void
sk_rxfilter_yukon(struct sk_if_softc *sc_if)
{
if_t ifp;
uint32_t hashes[2] = { 0, 0 }, mode;
SK_IF_LOCK_ASSERT(sc_if);
ifp = sc_if->sk_ifp;
mode = SK_YU_READ_2(sc_if, YUKON_RCR);
if (if_getflags(ifp) & IFF_PROMISC)
mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
else if (if_getflags(ifp) & IFF_ALLMULTI) {
mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
hashes[0] = 0xFFFFFFFF;
hashes[1] = 0xFFFFFFFF;
} else {
mode |= YU_RCR_UFLEN;
if_foreach_llmaddr(ifp, sk_hash_maddr_yukon, hashes);
if (hashes[0] != 0 || hashes[1] != 0)
mode |= YU_RCR_MUFLEN;
}
SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
}
static int
sk_init_rx_ring(struct sk_if_softc *sc_if)
{
struct sk_ring_data *rd;
bus_addr_t addr;
u_int32_t csum_start;
int i;
sc_if->sk_cdata.sk_rx_cons = 0;
csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
ETHER_HDR_LEN;
rd = &sc_if->sk_rdata;
bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
for (i = 0; i < SK_RX_RING_CNT; i++) {
if (sk_newbuf(sc_if, i) != 0)
return (ENOBUFS);
if (i == (SK_RX_RING_CNT - 1))
addr = SK_RX_RING_ADDR(sc_if, 0);
else
addr = SK_RX_RING_ADDR(sc_if, i + 1);
rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
}
bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
sc_if->sk_cdata.sk_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return(0);
}
static int
sk_init_jumbo_rx_ring(struct sk_if_softc *sc_if)
{
struct sk_ring_data *rd;
bus_addr_t addr;
u_int32_t csum_start;
int i;
sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
ETHER_HDR_LEN;
rd = &sc_if->sk_rdata;
bzero(rd->sk_jumbo_rx_ring,
sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
if (sk_jumbo_newbuf(sc_if, i) != 0)
return (ENOBUFS);
if (i == (SK_JUMBO_RX_RING_CNT - 1))
addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
else
addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
}
bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
sc_if->sk_cdata.sk_jumbo_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
sk_init_tx_ring(struct sk_if_softc *sc_if)
{
struct sk_ring_data *rd;
struct sk_txdesc *txd;
bus_addr_t addr;
int i;
STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
sc_if->sk_cdata.sk_tx_prod = 0;
sc_if->sk_cdata.sk_tx_cons = 0;
sc_if->sk_cdata.sk_tx_cnt = 0;
rd = &sc_if->sk_rdata;
bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
for (i = 0; i < SK_TX_RING_CNT; i++) {
if (i == (SK_TX_RING_CNT - 1))
addr = SK_TX_RING_ADDR(sc_if, 0);
else
addr = SK_TX_RING_ADDR(sc_if, i + 1);
rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
txd = &sc_if->sk_cdata.sk_txdesc[i];
STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
}
bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
sc_if->sk_cdata.sk_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static __inline void
sk_discard_rxbuf(struct sk_if_softc *sc_if, int idx)
{
struct sk_rx_desc *r;
struct sk_rxdesc *rxd;
struct mbuf *m;
r = &sc_if->sk_rdata.sk_rx_ring[idx];
rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
m = rxd->rx_m;
r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
}
static __inline void
sk_discard_jumbo_rxbuf(struct sk_if_softc *sc_if, int idx)
{
struct sk_rx_desc *r;
struct sk_rxdesc *rxd;
struct mbuf *m;
r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
m = rxd->rx_m;
r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
}
static int
sk_newbuf(struct sk_if_softc *sc_if, int idx)
{
struct sk_rx_desc *r;
struct sk_rxdesc *rxd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, ETHER_ALIGN);
if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
sc_if->sk_cdata.sk_rx_sparemap = map;
bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
r = &sc_if->sk_rdata.sk_rx_ring[idx];
r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
return (0);
}
static int
sk_jumbo_newbuf(struct sk_if_softc *sc_if, int idx)
{
struct sk_rx_desc *r;
struct sk_rxdesc *rxd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_pkthdr.len = m->m_len = MJUM9BYTES;
/*
* Adjust alignment so packet payload begins on a
* longword boundary. Mandatory for Alpha, useful on
* x86 too.
*/
m_adj(m, ETHER_ALIGN);
if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
return (0);
}
/*
* Set media options.
*/
static int
sk_ifmedia_upd(if_t ifp)
{
struct sk_if_softc *sc_if = if_getsoftc(ifp);
struct mii_data *mii;
mii = device_get_softc(sc_if->sk_miibus);
sk_init(sc_if);
mii_mediachg(mii);
return(0);
}
/*
* Report current media status.
*/
static void
sk_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct sk_if_softc *sc_if;
struct mii_data *mii;
sc_if = if_getsoftc(ifp);
mii = device_get_softc(sc_if->sk_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
return;
}
static int
sk_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct sk_if_softc *sc_if = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
int error, mask;
struct mii_data *mii;
error = 0;
switch(command) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
if (sc_if->sk_jumbo_disable != 0 &&
ifr->ifr_mtu > SK_MAX_FRAMELEN)
error = EINVAL;
else {
SK_IF_LOCK(sc_if);
if_setmtu(ifp, ifr->ifr_mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sk_init_locked(sc_if);
}
SK_IF_UNLOCK(sc_if);
}
}
break;
case SIOCSIFFLAGS:
SK_IF_LOCK(sc_if);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ sc_if->sk_if_flags)
& (IFF_PROMISC | IFF_ALLMULTI))
sk_rxfilter(sc_if);
} else
sk_init_locked(sc_if);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
sk_stop(sc_if);
}
sc_if->sk_if_flags = if_getflags(ifp);
SK_IF_UNLOCK(sc_if);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
SK_IF_LOCK(sc_if);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
sk_rxfilter(sc_if);
SK_IF_UNLOCK(sc_if);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc_if->sk_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
SK_IF_LOCK(sc_if);
if (sc_if->sk_softc->sk_type == SK_GENESIS) {
SK_IF_UNLOCK(sc_if);
break;
}
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, SK_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, SK_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
SK_IF_UNLOCK(sc_if);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
/*
* Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
skc_probe(device_t dev)
{
const struct sk_type *t = sk_devs;
while(t->sk_name != NULL) {
if ((pci_get_vendor(dev) == t->sk_vid) &&
(pci_get_device(dev) == t->sk_did)) {
/*
* Only attach to rev. 2 of the Linksys EG1032 adapter.
* Rev. 3 is supported by re(4).
*/
if ((t->sk_vid == VENDORID_LINKSYS) &&
(t->sk_did == DEVICEID_LINKSYS_EG1032) &&
(pci_get_subdevice(dev) !=
SUBDEVICEID_LINKSYS_EG1032_REV2)) {
t++;
continue;
}
device_set_desc(dev, t->sk_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return(ENXIO);
}
/*
* Force the GEnesis into reset, then bring it out of reset.
*/
static void
sk_reset(struct sk_softc *sc)
{
CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
if (SK_YUKON_FAMILY(sc->sk_type))
CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
DELAY(1000);
CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
DELAY(2);
CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
if (SK_YUKON_FAMILY(sc->sk_type))
CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
if (sc->sk_type == SK_GENESIS) {
/* Configure packet arbiter */
sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
}
/* Enable RAM interface */
sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
/*
* Configure interrupt moderation. The moderation timer
* defers interrupts specified in the interrupt moderation
* timer mask based on the timeout specified in the interrupt
* moderation timer init register. Each bit in the timer
* register represents one tick, so to specify a timeout in
* microseconds, we have to multiply by the correct number of
* ticks-per-microsecond.
*/
switch (sc->sk_type) {
case SK_GENESIS:
sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
break;
default:
sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
break;
}
if (bootverbose)
device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
sc->sk_int_mod);
sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
sc->sk_int_ticks));
sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
return;
}
static int
sk_probe(device_t dev)
{
struct sk_softc *sc;
sc = device_get_softc(device_get_parent(dev));
/*
* Not much to do here. We always know there will be
* at least one XMAC present, and if there are two,
* skc_attach() will create a second device instance
* for us.
*/
switch (sc->sk_type) {
case SK_GENESIS:
device_set_desc(dev, "XaQti Corp. XMAC II");
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
break;
}
return (BUS_PROBE_DEFAULT);
}
/*
* Each XMAC chip is attached as a separate logical IP interface.
* Single port cards will have only one logical interface of course.
*/
static int
sk_attach(device_t dev)
{
struct sk_softc *sc;
struct sk_if_softc *sc_if;
if_t ifp;
u_int32_t r;
int error, i, phy, port;
u_char eaddr[6];
u_char inv_mac[] = {0, 0, 0, 0, 0, 0};
if (dev == NULL)
return(EINVAL);
error = 0;
sc_if = device_get_softc(dev);
sc = device_get_softc(device_get_parent(dev));
port = *(int *)device_get_ivars(dev);
sc_if->sk_if_dev = dev;
sc_if->sk_port = port;
sc_if->sk_softc = sc;
sc->sk_if[port] = sc_if;
if (port == SK_PORT_A)
sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
if (port == SK_PORT_B)
sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
if (sk_dma_alloc(sc_if) != 0) {
error = ENOMEM;
goto fail;
}
sk_dma_jumbo_alloc(sc_if);
ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc_if);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
/*
* SK_GENESIS has a bug in checksum offload - From linux.
*/
if (sc_if->sk_softc->sk_type != SK_GENESIS) {
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM);
if_sethwassist(ifp, 0);
} else {
if_setcapabilities(ifp, 0);
if_sethwassist(ifp, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Some revision of Yukon controller generates corrupted
* frame when TX checksum offloading is enabled. The
* frame has a valid checksum value so payload might be
* modified during TX checksum calculation. Disable TX
* checksum offloading but give users chance to enable it
* when they know their controller works without problems
* with TX checksum offloading.
*/
if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
if_setioctlfn(ifp, sk_ioctl);
if_setstartfn(ifp, sk_start);
if_setinitfn(ifp, sk_init);
if_setsendqlen(ifp, SK_TX_RING_CNT - 1);
if_setsendqready(ifp);
/*
* Get station address for this interface. Note that
* dual port cards actually come with three station
* addresses: one for each port, plus an extra. The
* extra one is used by the SysKonnect driver software
* as a 'virtual' station address for when both ports
* are operating in failover mode. Currently we don't
* use this extra address.
*/
SK_IF_LOCK(sc_if);
for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] =
sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
/* Verify whether the station address is invalid or not. */
if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
device_printf(sc_if->sk_if_dev,
"Generating random ethernet address\n");
r = arc4random();
/*
* Set OUI to convenient locally assigned address. 'b'
* is 0x62, which has the locally assigned bit set, and
* the broadcast/multicast bit clear.
*/
eaddr[0] = 'b';
eaddr[1] = 's';
eaddr[2] = 'd';
eaddr[3] = (r >> 16) & 0xff;
eaddr[4] = (r >> 8) & 0xff;
eaddr[5] = (r >> 0) & 0xff;
}
/*
* Set up RAM buffer addresses. The NIC will have a certain
* amount of SRAM on it, somewhere between 512K and 2MB. We
* need to divide this up a) between the transmitter and
* receiver and b) between the two XMACs, if this is a
* dual port NIC. Our algotithm is to divide up the memory
* evenly so that everyone gets a fair share.
*
* Just to be contrary, Yukon2 appears to have separate memory
* for each MAC.
*/
if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
u_int32_t chunk, val;
chunk = sc->sk_ramsize / 2;
val = sc->sk_rboff / sizeof(u_int64_t);
sc_if->sk_rx_ramstart = val;
val += (chunk / sizeof(u_int64_t));
sc_if->sk_rx_ramend = val - 1;
sc_if->sk_tx_ramstart = val;
val += (chunk / sizeof(u_int64_t));
sc_if->sk_tx_ramend = val - 1;
} else {
u_int32_t chunk, val;
chunk = sc->sk_ramsize / 4;
val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
sizeof(u_int64_t);
sc_if->sk_rx_ramstart = val;
val += (chunk / sizeof(u_int64_t));
sc_if->sk_rx_ramend = val - 1;
sc_if->sk_tx_ramstart = val;
val += (chunk / sizeof(u_int64_t));
sc_if->sk_tx_ramend = val - 1;
}
/* Read and save PHY type and set PHY address */
sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
if (!SK_YUKON_FAMILY(sc->sk_type)) {
switch(sc_if->sk_phytype) {
case SK_PHYTYPE_XMAC:
sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
break;
case SK_PHYTYPE_BCOM:
sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
break;
default:
device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
sc_if->sk_phytype);
error = ENODEV;
SK_IF_UNLOCK(sc_if);
goto fail;
}
} else {
if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
sc->sk_pmd != 'S') {
/* not initialized, punt */
sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
sc->sk_coppertype = 1;
}
sc_if->sk_phyaddr = SK_PHYADDR_MARV;
if (!(sc->sk_coppertype))
sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
}
/*
* Call MI attach routine. Can't hold locks when calling into ether_*.
*/
SK_IF_UNLOCK(sc_if);
ether_ifattach(ifp, eaddr);
SK_IF_LOCK(sc_if);
/*
* The hardware should be ready for VLAN_MTU by default:
* XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
* YU_SMR_MFL_VLAN is set by this driver in Yukon.
*
*/
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* Do miibus setup.
*/
phy = MII_PHY_ANY;
switch (sc->sk_type) {
case SK_GENESIS:
sk_init_xmac(sc_if);
if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
phy = 0;
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
sk_init_yukon(sc_if);
phy = 0;
break;
}
SK_IF_UNLOCK(sc_if);
error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error) {
/* Access should be ok even though lock has been dropped */
sc->sk_if[port] = NULL;
sk_detach(dev);
}
return(error);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
skc_attach(device_t dev)
{
struct sk_softc *sc;
int error = 0, *port;
uint8_t skrs;
const char *pname = NULL;
char *revstr;
sc = device_get_softc(dev);
sc->sk_dev = dev;
mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
/* Allocate resources */
#ifdef SK_USEIOSPACE
sc->sk_res_spec = sk_res_spec_io;
#else
sc->sk_res_spec = sk_res_spec_mem;
#endif
error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
if (error) {
if (sc->sk_res_spec == sk_res_spec_mem)
sc->sk_res_spec = sk_res_spec_io;
else
sc->sk_res_spec = sk_res_spec_mem;
error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
if (error) {
device_printf(dev, "couldn't allocate %s resources\n",
sc->sk_res_spec == sk_res_spec_mem ? "memory" :
"I/O");
goto fail;
}
}
sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
/* Bail out if chip is not recognized. */
if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
sc->sk_type, sc->sk_rev);
error = ENXIO;
goto fail;
}
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "int_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
"SK interrupt moderation");
/* Pull in device tunables. */
sc->sk_int_mod = SK_IM_DEFAULT;
error = resource_int_value(device_get_name(dev), device_get_unit(dev),
"int_mod", &sc->sk_int_mod);
if (error == 0) {
if (sc->sk_int_mod < SK_IM_MIN ||
sc->sk_int_mod > SK_IM_MAX) {
device_printf(dev, "int_mod value out of range; "
"using default: %d\n", SK_IM_DEFAULT);
sc->sk_int_mod = SK_IM_DEFAULT;
}
}
/* Reset the adapter. */
sk_reset(sc);
skrs = sk_win_read_1(sc, SK_EPROM0);
if (sc->sk_type == SK_GENESIS) {
/* Read and save RAM size and RAMbuffer offset */
switch(skrs) {
case SK_RAMSIZE_512K_64:
sc->sk_ramsize = 0x80000;
sc->sk_rboff = SK_RBOFF_0;
break;
case SK_RAMSIZE_1024K_64:
sc->sk_ramsize = 0x100000;
sc->sk_rboff = SK_RBOFF_80000;
break;
case SK_RAMSIZE_1024K_128:
sc->sk_ramsize = 0x100000;
sc->sk_rboff = SK_RBOFF_0;
break;
case SK_RAMSIZE_2048K_128:
sc->sk_ramsize = 0x200000;
sc->sk_rboff = SK_RBOFF_0;
break;
default:
device_printf(dev, "unknown ram size: %d\n", skrs);
error = ENXIO;
goto fail;
}
} else { /* SK_YUKON_FAMILY */
if (skrs == 0x00)
sc->sk_ramsize = 0x20000;
else
sc->sk_ramsize = skrs * (1<<12);
sc->sk_rboff = SK_RBOFF_0;
}
/* Read and save physical media type */
sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
sc->sk_coppertype = 1;
else
sc->sk_coppertype = 0;
/* Determine whether to name it with VPD PN or just make it up.
* Marvell Yukon VPD PN seems to freqently be bogus. */
switch (pci_get_device(dev)) {
case DEVICEID_SK_V1:
case DEVICEID_BELKIN_5005:
case DEVICEID_3COM_3C940:
case DEVICEID_LINKSYS_EG1032:
case DEVICEID_DLINK_DGE530T_A1:
case DEVICEID_DLINK_DGE530T_B1:
/* Stay with VPD PN. */
(void) pci_get_vpd_ident(dev, &pname);
break;
case DEVICEID_SK_V2:
/* YUKON VPD PN might bear no resemblance to reality. */
switch (sc->sk_type) {
case SK_GENESIS:
/* Stay with VPD PN. */
(void) pci_get_vpd_ident(dev, &pname);
break;
case SK_YUKON:
pname = "Marvell Yukon Gigabit Ethernet";
break;
case SK_YUKON_LITE:
pname = "Marvell Yukon Lite Gigabit Ethernet";
break;
case SK_YUKON_LP:
pname = "Marvell Yukon LP Gigabit Ethernet";
break;
default:
pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
break;
}
/* Yukon Lite Rev. A0 needs special test. */
if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
u_int32_t far;
u_int8_t testbyte;
/* Save flash address register before testing. */
far = sk_win_read_4(sc, SK_EP_ADDR);
sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
if (testbyte != 0x00) {
/* Yukon Lite Rev. A0 detected. */
sc->sk_type = SK_YUKON_LITE;
sc->sk_rev = SK_YUKON_LITE_REV_A0;
/* Restore flash address register. */
sk_win_write_4(sc, SK_EP_ADDR, far);
}
}
break;
default:
device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
"chipver=%02x, rev=%x\n",
pci_get_vendor(dev), pci_get_device(dev),
sc->sk_type, sc->sk_rev);
error = ENXIO;
goto fail;
}
if (sc->sk_type == SK_YUKON_LITE) {
switch (sc->sk_rev) {
case SK_YUKON_LITE_REV_A0:
revstr = "A0";
break;
case SK_YUKON_LITE_REV_A1:
revstr = "A1";
break;
case SK_YUKON_LITE_REV_A3:
revstr = "A3";
break;
default:
revstr = "";
break;
}
} else {
revstr = "";
}
/* Announce the product name and more VPD data if there. */
if (pname != NULL)
device_printf(dev, "%s rev. %s(0x%x)\n",
pname, revstr, sc->sk_rev);
if (bootverbose) {
device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
}
sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
if (sc->sk_devs[SK_PORT_A] == NULL) {
device_printf(dev, "failed to add child for PORT_A\n");
error = ENXIO;
goto fail;
}
port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
if (port == NULL) {
device_printf(dev, "failed to allocate memory for "
"ivars of PORT_A\n");
error = ENXIO;
goto fail;
}
*port = SK_PORT_A;
device_set_ivars(sc->sk_devs[SK_PORT_A], port);
if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
if (sc->sk_devs[SK_PORT_B] == NULL) {
device_printf(dev, "failed to add child for PORT_B\n");
error = ENXIO;
goto fail;
}
port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
if (port == NULL) {
device_printf(dev, "failed to allocate memory for "
"ivars of PORT_B\n");
error = ENXIO;
goto fail;
}
*port = SK_PORT_B;
device_set_ivars(sc->sk_devs[SK_PORT_B], port);
}
/* Turn on the 'driver is loaded' LED. */
CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
error = bus_generic_attach(dev);
if (error) {
device_printf(dev, "failed to attach port(s)\n");
goto fail;
}
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
NULL, sk_intr, sc, &sc->sk_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
goto fail;
}
fail:
if (error)
skc_detach(dev);
return(error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
sk_detach(device_t dev)
{
struct sk_if_softc *sc_if;
if_t ifp;
sc_if = device_get_softc(dev);
KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
("sk mutex not initialized in sk_detach"));
SK_IF_LOCK(sc_if);
ifp = sc_if->sk_ifp;
/* These should only be active if attach_xmac succeeded */
if (device_is_attached(dev)) {
sk_stop(sc_if);
/* Can't hold locks while calling detach */
SK_IF_UNLOCK(sc_if);
callout_drain(&sc_if->sk_tick_ch);
callout_drain(&sc_if->sk_watchdog_ch);
ether_ifdetach(ifp);
SK_IF_LOCK(sc_if);
}
/*
* We're generally called from skc_detach() which is using
* device_delete_child() to get to here. It's already trashed
* miibus for us, so don't do it here or we'll panic.
*/
/*
if (sc_if->sk_miibus != NULL)
device_delete_child(dev, sc_if->sk_miibus);
*/
bus_generic_detach(dev);
sk_dma_jumbo_free(sc_if);
sk_dma_free(sc_if);
SK_IF_UNLOCK(sc_if);
if (ifp)
if_free(ifp);
return(0);
}
static int
skc_detach(device_t dev)
{
struct sk_softc *sc;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
if (device_is_alive(dev)) {
if (sc->sk_devs[SK_PORT_A] != NULL) {
free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
}
if (sc->sk_devs[SK_PORT_B] != NULL) {
free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
}
bus_generic_detach(dev);
}
if (sc->sk_intrhand)
bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
mtx_destroy(&sc->sk_mii_mtx);
mtx_destroy(&sc->sk_mtx);
return(0);
}
static bus_dma_tag_t
skc_get_dma_tag(device_t bus, device_t child __unused)
{
return (bus_get_dma_tag(bus));
}
struct sk_dmamap_arg {
bus_addr_t sk_busaddr;
};
static void
sk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct sk_dmamap_arg *ctx;
if (error != 0)
return;
ctx = arg;
ctx->sk_busaddr = segs[0].ds_addr;
}
/*
* Allocate jumbo buffer storage. The SysKonnect adapters support
* "jumbograms" (9K frames), although SysKonnect doesn't currently
* use them in their drivers. In order for us to use them, we need
* large 9K receive buffers, however standard mbuf clusters are only
* 2048 bytes in size. Consequently, we need to allocate and manage
* our own jumbo buffer pool. Fortunately, this does not require an
* excessive amount of additional code.
*/
static int
sk_dma_alloc(struct sk_if_softc *sc_if)
{
struct sk_dmamap_arg ctx;
struct sk_txdesc *txd;
struct sk_rxdesc *rxd;
int error, i;
/* create parent tag */
/*
* XXX
* This driver should use BUS_SPACE_MAXADDR for lowaddr argument
* in bus_dma_tag_create(9) as the NIC would support DAC mode.
* However bz@ reported that it does not work on amd64 with > 4GB
* RAM. Until we have more clues of the breakage, disable DAC mode
* by limiting DMA address to be in 32bit address space.
*/
error = bus_dma_tag_create(
bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->sk_cdata.sk_parent_tag);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create parent DMA tag\n");
goto fail;
}
/* create tag for Tx ring */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
SK_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
SK_TX_RING_SZ, /* maxsize */
1, /* nsegments */
SK_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->sk_cdata.sk_tx_ring_tag);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate Tx ring DMA tag\n");
goto fail;
}
/* create tag for Rx ring */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
SK_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
SK_RX_RING_SZ, /* maxsize */
1, /* nsegments */
SK_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->sk_cdata.sk_rx_ring_tag);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate Rx ring DMA tag\n");
goto fail;
}
/* create tag for Tx buffers */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES * SK_MAXTXSEGS, /* maxsize */
SK_MAXTXSEGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->sk_cdata.sk_tx_tag);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate Tx DMA tag\n");
goto fail;
}
/* create tag for Rx buffers */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->sk_cdata.sk_rx_tag);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate Rx DMA tag\n");
goto fail;
}
/* allocate DMA'able memory and load the DMA map for Tx ring */
error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
(void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate DMA'able memory for Tx ring\n");
goto fail;
}
ctx.sk_busaddr = 0;
error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to load DMA'able memory for Tx ring\n");
goto fail;
}
sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
/* allocate DMA'able memory and load the DMA map for Rx ring */
error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
(void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate DMA'able memory for Rx ring\n");
goto fail;
}
ctx.sk_busaddr = 0;
error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to load DMA'able memory for Rx ring\n");
goto fail;
}
sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
/* create DMA maps for Tx buffers */
for (i = 0; i < SK_TX_RING_CNT; i++) {
txd = &sc_if->sk_cdata.sk_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create Tx dmamap\n");
goto fail;
}
}
/* create DMA maps for Rx buffers */
if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
&sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create spare Rx dmamap\n");
goto fail;
}
for (i = 0; i < SK_RX_RING_CNT; i++) {
rxd = &sc_if->sk_cdata.sk_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create Rx dmamap\n");
goto fail;
}
}
fail:
return (error);
}
static int
sk_dma_jumbo_alloc(struct sk_if_softc *sc_if)
{
struct sk_dmamap_arg ctx;
struct sk_rxdesc *jrxd;
int error, i;
if (jumbo_disable != 0) {
device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
sc_if->sk_jumbo_disable = 1;
return (0);
}
/* create tag for jumbo Rx ring */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
SK_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
SK_JUMBO_RX_RING_SZ, /* maxsize */
1, /* nsegments */
SK_JUMBO_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate jumbo Rx ring DMA tag\n");
goto jumbo_fail;
}
/* create tag for jumbo Rx buffers */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUM9BYTES, /* maxsize */
1, /* nsegments */
MJUM9BYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc_if->sk_cdata.sk_jumbo_rx_tag);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate jumbo Rx DMA tag\n");
goto jumbo_fail;
}
/* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
(void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT | BUS_DMA_ZERO,
&sc_if->sk_cdata.sk_jumbo_rx_ring_map);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to allocate DMA'able memory for jumbo Rx ring\n");
goto jumbo_fail;
}
ctx.sk_busaddr = 0;
error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
sc_if->sk_cdata.sk_jumbo_rx_ring_map,
sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
&ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to load DMA'able memory for jumbo Rx ring\n");
goto jumbo_fail;
}
sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
/* create DMA maps for jumbo Rx buffers */
if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
&sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create spare jumbo Rx dmamap\n");
goto jumbo_fail;
}
for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
jrxd->rx_m = NULL;
jrxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
&jrxd->rx_dmamap);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create jumbo Rx dmamap\n");
goto jumbo_fail;
}
}
return (0);
jumbo_fail:
sk_dma_jumbo_free(sc_if);
device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
"resource shortage\n");
sc_if->sk_jumbo_disable = 1;
return (0);
}
static void
sk_dma_free(struct sk_if_softc *sc_if)
{
struct sk_txdesc *txd;
struct sk_rxdesc *rxd;
int i;
/* Tx ring */
if (sc_if->sk_cdata.sk_tx_ring_tag) {
if (sc_if->sk_rdata.sk_tx_ring_paddr)
bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
sc_if->sk_cdata.sk_tx_ring_map);
if (sc_if->sk_rdata.sk_tx_ring)
bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
sc_if->sk_rdata.sk_tx_ring,
sc_if->sk_cdata.sk_tx_ring_map);
sc_if->sk_rdata.sk_tx_ring = NULL;
sc_if->sk_rdata.sk_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
sc_if->sk_cdata.sk_tx_ring_tag = NULL;
}
/* Rx ring */
if (sc_if->sk_cdata.sk_rx_ring_tag) {
if (sc_if->sk_rdata.sk_rx_ring_paddr)
bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
sc_if->sk_cdata.sk_rx_ring_map);
if (sc_if->sk_rdata.sk_rx_ring)
bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
sc_if->sk_rdata.sk_rx_ring,
sc_if->sk_cdata.sk_rx_ring_map);
sc_if->sk_rdata.sk_rx_ring = NULL;
sc_if->sk_rdata.sk_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
sc_if->sk_cdata.sk_rx_ring_tag = NULL;
}
/* Tx buffers */
if (sc_if->sk_cdata.sk_tx_tag) {
for (i = 0; i < SK_TX_RING_CNT; i++) {
txd = &sc_if->sk_cdata.sk_txdesc[i];
if (txd->tx_dmamap) {
bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
sc_if->sk_cdata.sk_tx_tag = NULL;
}
/* Rx buffers */
if (sc_if->sk_cdata.sk_rx_tag) {
for (i = 0; i < SK_RX_RING_CNT; i++) {
rxd = &sc_if->sk_cdata.sk_rxdesc[i];
if (rxd->rx_dmamap) {
bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc_if->sk_cdata.sk_rx_sparemap) {
bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
sc_if->sk_cdata.sk_rx_sparemap);
sc_if->sk_cdata.sk_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
sc_if->sk_cdata.sk_rx_tag = NULL;
}
if (sc_if->sk_cdata.sk_parent_tag) {
bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
sc_if->sk_cdata.sk_parent_tag = NULL;
}
}
static void
sk_dma_jumbo_free(struct sk_if_softc *sc_if)
{
struct sk_rxdesc *jrxd;
int i;
/* jumbo Rx ring */
if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
if (sc_if->sk_rdata.sk_jumbo_rx_ring_paddr)
bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
sc_if->sk_cdata.sk_jumbo_rx_ring_map);
if (sc_if->sk_rdata.sk_jumbo_rx_ring)
bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
sc_if->sk_rdata.sk_jumbo_rx_ring,
sc_if->sk_cdata.sk_jumbo_rx_ring_map);
sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
}
/* jumbo Rx buffers */
if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
if (jrxd->rx_dmamap) {
bus_dmamap_destroy(
sc_if->sk_cdata.sk_jumbo_rx_tag,
jrxd->rx_dmamap);
jrxd->rx_dmamap = NULL;
}
}
if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
sc_if->sk_cdata.sk_jumbo_rx_sparemap);
sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
}
}
static void
sk_txcksum(if_t ifp, struct mbuf *m, struct sk_tx_desc *f)
{
struct ip *ip;
u_int16_t offset;
u_int8_t *p;
offset = sizeof(struct ip) + ETHER_HDR_LEN;
for(; m && m->m_len == 0; m = m->m_next)
;
if (m == NULL || m->m_len < ETHER_HDR_LEN) {
if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
/* checksum may be corrupted */
goto sendit;
}
if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
if (m->m_len != ETHER_HDR_LEN) {
if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
__func__);
/* checksum may be corrupted */
goto sendit;
}
for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
;
if (m == NULL) {
offset = sizeof(struct ip) + ETHER_HDR_LEN;
/* checksum may be corrupted */
goto sendit;
}
ip = mtod(m, struct ip *);
} else {
p = mtod(m, u_int8_t *);
p += ETHER_HDR_LEN;
ip = (struct ip *)p;
}
offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
sendit:
f->sk_csum_startval = 0;
f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
(offset << 16));
}
static int
sk_encap(struct sk_if_softc *sc_if, struct mbuf **m_head)
{
struct sk_txdesc *txd;
struct sk_tx_desc *f = NULL;
struct mbuf *m;
bus_dma_segment_t txsegs[SK_MAXTXSEGS];
u_int32_t cflags, frag, si, sk_ctl;
int error, i, nseg;
SK_IF_LOCK_ASSERT(sc_if);
if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
return (ENOBUFS);
error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
if (error == EFBIG) {
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nseg == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
return (ENOBUFS);
}
m = *m_head;
if ((m->m_pkthdr.csum_flags & if_gethwassist(sc_if->sk_ifp)) != 0)
cflags = SK_OPCODE_CSUM;
else
cflags = SK_OPCODE_DEFAULT;
si = frag = sc_if->sk_cdata.sk_tx_prod;
for (i = 0; i < nseg; i++) {
f = &sc_if->sk_rdata.sk_tx_ring[frag];
f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
sk_ctl = txsegs[i].ds_len | cflags;
if (i == 0) {
if (cflags == SK_OPCODE_CSUM)
sk_txcksum(sc_if->sk_ifp, m, f);
sk_ctl |= SK_TXCTL_FIRSTFRAG;
} else
sk_ctl |= SK_TXCTL_OWN;
f->sk_ctl = htole32(sk_ctl);
sc_if->sk_cdata.sk_tx_cnt++;
SK_INC(frag, SK_TX_RING_CNT);
}
sc_if->sk_cdata.sk_tx_prod = frag;
/* set EOF on the last descriptor */
frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
f = &sc_if->sk_rdata.sk_tx_ring[frag];
f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
/* turn the first descriptor ownership to NIC */
f = &sc_if->sk_rdata.sk_tx_ring[si];
f->sk_ctl |= htole32(SK_TXCTL_OWN);
STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
txd->tx_m = m;
/* sync descriptors */
bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
sc_if->sk_cdata.sk_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
sk_start(if_t ifp)
{
struct sk_if_softc *sc_if;
sc_if = if_getsoftc(ifp);
SK_IF_LOCK(sc_if);
sk_start_locked(ifp);
SK_IF_UNLOCK(sc_if);
return;
}
static void
sk_start_locked(if_t ifp)
{
struct sk_softc *sc;
struct sk_if_softc *sc_if;
struct mbuf *m_head;
int enq;
sc_if = if_getsoftc(ifp);
sc = sc_if->sk_softc;
SK_IF_LOCK_ASSERT(sc_if);
for (enq = 0; !if_sendq_empty(ifp) &&
sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (sk_encap(sc_if, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
/* Transmit */
CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
/* Set a timeout in case the chip goes out to lunch. */
sc_if->sk_watchdog_timer = 5;
}
}
static void
sk_watchdog(void *arg)
{
struct sk_if_softc *sc_if;
if_t ifp;
ifp = arg;
sc_if = if_getsoftc(ifp);
SK_IF_LOCK_ASSERT(sc_if);
if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
goto done;
/*
* Reclaim first as there is a possibility of losing Tx completion
* interrupts.
*/
sk_txeof(sc_if);
if (sc_if->sk_cdata.sk_tx_cnt != 0) {
if_printf(sc_if->sk_ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sk_init_locked(sc_if);
}
done:
callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
return;
}
static int
skc_shutdown(device_t dev)
{
struct sk_softc *sc;
sc = device_get_softc(dev);
SK_LOCK(sc);
/* Turn off the 'driver is loaded' LED. */
CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
/*
* Reset the GEnesis controller. Doing this should also
* assert the resets on the attached XMAC(s).
*/
sk_reset(sc);
SK_UNLOCK(sc);
return (0);
}
static int
skc_suspend(device_t dev)
{
struct sk_softc *sc;
struct sk_if_softc *sc_if0, *sc_if1;
if_t ifp0 = NULL, ifp1 = NULL;
sc = device_get_softc(dev);
SK_LOCK(sc);
sc_if0 = sc->sk_if[SK_PORT_A];
sc_if1 = sc->sk_if[SK_PORT_B];
if (sc_if0 != NULL)
ifp0 = sc_if0->sk_ifp;
if (sc_if1 != NULL)
ifp1 = sc_if1->sk_ifp;
if (ifp0 != NULL)
sk_stop(sc_if0);
if (ifp1 != NULL)
sk_stop(sc_if1);
sc->sk_suspended = 1;
SK_UNLOCK(sc);
return (0);
}
static int
skc_resume(device_t dev)
{
struct sk_softc *sc;
struct sk_if_softc *sc_if0, *sc_if1;
if_t ifp0 = NULL, ifp1 = NULL;
sc = device_get_softc(dev);
SK_LOCK(sc);
sc_if0 = sc->sk_if[SK_PORT_A];
sc_if1 = sc->sk_if[SK_PORT_B];
if (sc_if0 != NULL)
ifp0 = sc_if0->sk_ifp;
if (sc_if1 != NULL)
ifp1 = sc_if1->sk_ifp;
if (ifp0 != NULL && if_getflags(ifp0) & IFF_UP)
sk_init_locked(sc_if0);
if (ifp1 != NULL && if_getflags(ifp1) & IFF_UP)
sk_init_locked(sc_if1);
sc->sk_suspended = 0;
SK_UNLOCK(sc);
return (0);
}
/*
* According to the data sheet from SK-NET GENESIS the hardware can compute
* two Rx checksums at the same time(Each checksum start position is
* programmed in Rx descriptors). However it seems that TCP/UDP checksum
* does not work at least on my Yukon hardware. I tried every possible ways
* to get correct checksum value but couldn't get correct one. So TCP/UDP
* checksum offload was disabled at the moment and only IP checksum offload
* was enabled.
* As normal IP header size is 20 bytes I can't expect it would give an
* increase in throughput. However it seems it doesn't hurt performance in
* my testing. If there is a more detailed information for checksum secret
* of the hardware in question please contact yongari@FreeBSD.org to add
* TCP/UDP checksum offload support.
*/
static __inline void
sk_rxcksum(if_t ifp, struct mbuf *m, u_int32_t csum)
{
struct ether_header *eh;
struct ip *ip;
int32_t hlen, len, pktlen;
u_int16_t csum1, csum2, ipcsum;
pktlen = m->m_pkthdr.len;
if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
return;
eh = mtod(m, struct ether_header *);
if (eh->ether_type != htons(ETHERTYPE_IP))
return;
ip = (struct ip *)(eh + 1);
if (ip->ip_v != IPVERSION)
return;
hlen = ip->ip_hl << 2;
pktlen -= sizeof(struct ether_header);
if (hlen < sizeof(struct ip))
return;
if (ntohs(ip->ip_len) < hlen)
return;
if (ntohs(ip->ip_len) != pktlen)
return;
csum1 = htons(csum & 0xffff);
csum2 = htons((csum >> 16) & 0xffff);
ipcsum = in_addword(csum1, ~csum2 & 0xffff);
/* checksum fixup for IP options */
len = hlen - sizeof(struct ip);
if (len > 0) {
/*
* If the second checksum value is correct we can compute IP
* checksum with simple math. Unfortunately the second checksum
* value is wrong so we can't verify the checksum from the
* value(It seems there is some magic here to get correct
* value). If the second checksum value is correct it also
* means we can get TCP/UDP checksum) here. However, it still
* needs pseudo header checksum calculation due to hardware
* limitations.
*/
return;
}
m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
if (ipcsum == 0xffff)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
}
static __inline int
sk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
{
if (sc->sk_type == SK_GENESIS) {
if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
XM_RXSTAT_BYTES(stat) != len)
return (0);
} else {
if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
YU_RXSTAT_JABBER)) != 0 ||
(stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
YU_RXSTAT_BYTES(stat) != len)
return (0);
}
return (1);
}
static void
sk_rxeof(struct sk_if_softc *sc_if)
{
struct sk_softc *sc;
struct mbuf *m;
if_t ifp;
struct sk_rx_desc *cur_rx;
struct sk_rxdesc *rxd;
int cons, prog;
u_int32_t csum, rxstat, sk_ctl;
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
SK_IF_LOCK_ASSERT(sc_if);
bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
prog = 0;
for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
prog++, SK_INC(cons, SK_RX_RING_CNT)) {
cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
sk_ctl = le32toh(cur_rx->sk_ctl);
if ((sk_ctl & SK_RXCTL_OWN) != 0)
break;
rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
rxstat = le32toh(cur_rx->sk_xmac_rxstat);
if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
sk_discard_rxbuf(sc_if, cons);
continue;
}
m = rxd->rx_m;
csum = le32toh(cur_rx->sk_csum);
if (sk_newbuf(sc_if, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/* reuse old buffer */
sk_discard_rxbuf(sc_if, cons);
continue;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
sk_rxcksum(ifp, m, csum);
SK_IF_UNLOCK(sc_if);
if_input(ifp, m);
SK_IF_LOCK(sc_if);
}
if (prog > 0) {
sc_if->sk_cdata.sk_rx_cons = cons;
bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
sc_if->sk_cdata.sk_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
}
static void
sk_jumbo_rxeof(struct sk_if_softc *sc_if)
{
struct sk_softc *sc;
struct mbuf *m;
if_t ifp;
struct sk_rx_desc *cur_rx;
struct sk_rxdesc *jrxd;
int cons, prog;
u_int32_t csum, rxstat, sk_ctl;
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
SK_IF_LOCK_ASSERT(sc_if);
bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
prog = 0;
for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
prog < SK_JUMBO_RX_RING_CNT;
prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
sk_ctl = le32toh(cur_rx->sk_ctl);
if ((sk_ctl & SK_RXCTL_OWN) != 0)
break;
jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
rxstat = le32toh(cur_rx->sk_xmac_rxstat);
if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
sk_discard_jumbo_rxbuf(sc_if, cons);
continue;
}
m = jrxd->rx_m;
csum = le32toh(cur_rx->sk_csum);
if (sk_jumbo_newbuf(sc_if, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/* reuse old buffer */
sk_discard_jumbo_rxbuf(sc_if, cons);
continue;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
sk_rxcksum(ifp, m, csum);
SK_IF_UNLOCK(sc_if);
if_input(ifp, m);
SK_IF_LOCK(sc_if);
}
if (prog > 0) {
sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
sc_if->sk_cdata.sk_jumbo_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
}
static void
sk_txeof(struct sk_if_softc *sc_if)
{
struct sk_txdesc *txd;
struct sk_tx_desc *cur_tx;
if_t ifp;
u_int32_t idx, sk_ctl;
ifp = sc_if->sk_ifp;
txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
if (txd == NULL)
return;
bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
/*
* Go through our tx ring and free mbufs for those
* frames that have been sent.
*/
for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
if (sc_if->sk_cdata.sk_tx_cnt <= 0)
break;
cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
sk_ctl = le32toh(cur_tx->sk_ctl);
if (sk_ctl & SK_TXCTL_OWN)
break;
sc_if->sk_cdata.sk_tx_cnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
continue;
bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
m_freem(txd->tx_m);
txd->tx_m = NULL;
STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
}
sc_if->sk_cdata.sk_tx_cons = idx;
sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
sc_if->sk_cdata.sk_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
sk_tick(void *xsc_if)
{
struct sk_if_softc *sc_if;
struct mii_data *mii;
if_t ifp;
int i;
sc_if = xsc_if;
ifp = sc_if->sk_ifp;
mii = device_get_softc(sc_if->sk_miibus);
if (!(if_getflags(ifp) & IFF_UP))
return;
if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
sk_intr_bcom(sc_if);
return;
}
/*
* According to SysKonnect, the correct way to verify that
* the link has come back up is to poll bit 0 of the GPIO
* register three times. This pin has the signal from the
* link_sync pin connected to it; if we read the same link
* state 3 times in a row, we know the link is up.
*/
for (i = 0; i < 3; i++) {
if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
break;
}
if (i != 3) {
callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
return;
}
/* Turn the GP0 interrupt back on. */
SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
SK_XM_READ_2(sc_if, XM_ISR);
mii_tick(mii);
callout_stop(&sc_if->sk_tick_ch);
}
static void
sk_yukon_tick(void *xsc_if)
{
struct sk_if_softc *sc_if;
struct mii_data *mii;
sc_if = xsc_if;
mii = device_get_softc(sc_if->sk_miibus);
mii_tick(mii);
callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
}
static void
sk_intr_bcom(struct sk_if_softc *sc_if)
{
struct mii_data *mii;
if_t ifp;
int status;
mii = device_get_softc(sc_if->sk_miibus);
ifp = sc_if->sk_ifp;
SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
/*
* Read the PHY interrupt register to make sure
* we clear any pending interrupts.
*/
status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
sk_init_xmac(sc_if);
return;
}
if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
int lstat;
lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
BRGPHY_MII_AUXSTS);
if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
mii_mediachg(mii);
/* Turn off the link LED. */
SK_IF_WRITE_1(sc_if, 0,
SK_LINKLED1_CTL, SK_LINKLED_OFF);
sc_if->sk_link = 0;
} else if (status & BRGPHY_ISR_LNK_CHG) {
sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
BRGPHY_MII_IMR, 0xFF00);
mii_tick(mii);
sc_if->sk_link = 1;
/* Turn on the link LED. */
SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
SK_LINKLED_BLINK_OFF);
} else {
mii_tick(mii);
callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
}
}
SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
return;
}
static void
sk_intr_xmac(struct sk_if_softc *sc_if)
{
u_int16_t status;
status = SK_XM_READ_2(sc_if, XM_ISR);
/*
* Link has gone down. Start MII tick timeout to
* watch for link resync.
*/
if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
if (status & XM_ISR_GP0_SET) {
SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
}
if (status & XM_ISR_AUTONEG_DONE) {
callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
}
}
if (status & XM_IMR_TX_UNDERRUN)
SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
if (status & XM_IMR_RX_OVERRUN)
SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
status = SK_XM_READ_2(sc_if, XM_ISR);
return;
}
static void
sk_intr_yukon(struct sk_if_softc *sc_if)
{
u_int8_t status;
status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
/* RX overrun */
if ((status & SK_GMAC_INT_RX_OVER) != 0) {
SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
SK_RFCTL_RX_FIFO_OVER);
}
/* TX underrun */
if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
SK_TFCTL_TX_FIFO_UNDER);
}
}
static void
sk_intr(void *xsc)
{
struct sk_softc *sc = xsc;
struct sk_if_softc *sc_if0, *sc_if1;
if_t ifp0 = NULL, ifp1 = NULL;
u_int32_t status;
SK_LOCK(sc);
status = CSR_READ_4(sc, SK_ISSR);
if (status == 0 || status == 0xffffffff || sc->sk_suspended)
goto done_locked;
sc_if0 = sc->sk_if[SK_PORT_A];
sc_if1 = sc->sk_if[SK_PORT_B];
if (sc_if0 != NULL)
ifp0 = sc_if0->sk_ifp;
if (sc_if1 != NULL)
ifp1 = sc_if1->sk_ifp;
for (; (status &= sc->sk_intrmask) != 0;) {
/* Handle receive interrupts first. */
if (status & SK_ISR_RX1_EOF) {
if (if_getmtu(ifp0) > SK_MAX_FRAMELEN)
sk_jumbo_rxeof(sc_if0);
else
sk_rxeof(sc_if0);
CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
}
if (status & SK_ISR_RX2_EOF) {
if (if_getflags(ifp1) > SK_MAX_FRAMELEN)
sk_jumbo_rxeof(sc_if1);
else
sk_rxeof(sc_if1);
CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
}
/* Then transmit interrupts. */
if (status & SK_ISR_TX1_S_EOF) {
sk_txeof(sc_if0);
CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
}
if (status & SK_ISR_TX2_S_EOF) {
sk_txeof(sc_if1);
CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
}
/* Then MAC interrupts. */
if (status & SK_ISR_MAC1 &&
if_getdrvflags(ifp0) & IFF_DRV_RUNNING) {
if (sc->sk_type == SK_GENESIS)
sk_intr_xmac(sc_if0);
else
sk_intr_yukon(sc_if0);
}
if (status & SK_ISR_MAC2 &&
if_getdrvflags(ifp1) & IFF_DRV_RUNNING) {
if (sc->sk_type == SK_GENESIS)
sk_intr_xmac(sc_if1);
else
sk_intr_yukon(sc_if1);
}
if (status & SK_ISR_EXTERNAL_REG) {
if (ifp0 != NULL &&
sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
sk_intr_bcom(sc_if0);
if (ifp1 != NULL &&
sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
sk_intr_bcom(sc_if1);
}
status = CSR_READ_4(sc, SK_ISSR);
}
CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
if (ifp0 != NULL && !if_sendq_empty(ifp0))
sk_start_locked(ifp0);
if (ifp1 != NULL && !if_sendq_empty(ifp1))
sk_start_locked(ifp1);
done_locked:
SK_UNLOCK(sc);
}
static void
sk_init_xmac(struct sk_if_softc *sc_if)
{
struct sk_softc *sc;
if_t ifp;
u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
static const struct sk_bcom_hack bhack[] = {
{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
{ 0, 0 } };
SK_IF_LOCK_ASSERT(sc_if);
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
/* Unreset the XMAC. */
SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
DELAY(1000);
/* Reset the XMAC's internal state. */
SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
/* Save the XMAC II revision */
sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
/*
* Perform additional initialization for external PHYs,
* namely for the 1000baseTX cards that use the XMAC's
* GMII mode.
*/
if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
int i = 0;
u_int32_t val;
/* Take PHY out of reset. */
val = sk_win_read_4(sc, SK_GPIO);
if (sc_if->sk_port == SK_PORT_A)
val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
else
val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
sk_win_write_4(sc, SK_GPIO, val);
/* Enable GMII mode on the XMAC. */
SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
DELAY(10000);
sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
BRGPHY_MII_IMR, 0xFFF0);
/*
* Early versions of the BCM5400 apparently have
* a bug that requires them to have their reserved
* registers initialized to some magic values. I don't
* know what the numbers do, I'm just the messenger.
*/
if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
== 0x6041) {
while(bhack[i].reg) {
sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
bhack[i].reg, bhack[i].val);
i++;
}
}
}
/* Set station address */
bcopy(if_getlladdr(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
if (if_getflags(ifp) & IFF_BROADCAST) {
SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
} else {
SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
}
/* We don't need the FCS appended to the packet. */
SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
/* We want short frames padded to 60 bytes. */
SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
/*
* Enable the reception of all error frames. This is is
* a necessary evil due to the design of the XMAC. The
* XMAC's receive FIFO is only 8K in size, however jumbo
* frames can be up to 9000 bytes in length. When bad
* frame filtering is enabled, the XMAC's RX FIFO operates
* in 'store and forward' mode. For this to work, the
* entire frame has to fit into the FIFO, but that means
* that jumbo frames larger than 8192 bytes will be
* truncated. Disabling all bad frame filtering causes
* the RX FIFO to operate in streaming mode, in which
* case the XMAC will start transferring frames out of the
* RX FIFO as soon as the FIFO threshold is reached.
*/
if (if_getmtu(ifp) > SK_MAX_FRAMELEN) {
SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
XM_MODE_RX_INRANGELEN);
SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
} else
SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
/*
* Bump up the transmit threshold. This helps hold off transmit
* underruns when we're blasting traffic from both ports at once.
*/
SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
/* Set Rx filter */
sk_rxfilter_genesis(sc_if);
/* Clear and enable interrupts */
SK_XM_READ_2(sc_if, XM_ISR);
if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
else
SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
/* Configure MAC arbiter */
switch(sc_if->sk_xmac_rev) {
case XM_XMAC_REV_B2:
sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
break;
case XM_XMAC_REV_C1:
sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
break;
default:
break;
}
sk_win_write_2(sc, SK_MACARB_CTL,
SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
sc_if->sk_link = 1;
return;
}
static void
sk_init_yukon(struct sk_if_softc *sc_if)
{
u_int32_t phy, v;
u_int16_t reg;
struct sk_softc *sc;
if_t ifp;
u_int8_t *eaddr;
int i;
SK_IF_LOCK_ASSERT(sc_if);
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
if (sc->sk_type == SK_YUKON_LITE &&
sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
/*
* Workaround code for COMA mode, set PHY reset.
* Otherwise it will not correctly take chip out of
* powerdown (coma)
*/
v = sk_win_read_4(sc, SK_GPIO);
v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
sk_win_write_4(sc, SK_GPIO, v);
}
/* GMAC and GPHY Reset */
SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
DELAY(1000);
if (sc->sk_type == SK_YUKON_LITE &&
sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
/*
* Workaround code for COMA mode, clear PHY reset
*/
v = sk_win_read_4(sc, SK_GPIO);
v |= SK_GPIO_DIR9;
v &= ~SK_GPIO_DAT9;
sk_win_write_4(sc, SK_GPIO, v);
}
phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
if (sc->sk_coppertype)
phy |= SK_GPHY_COPPER;
else
phy |= SK_GPHY_FIBER;
SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
DELAY(1000);
SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
/* unused read of the interrupt source register */
SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
reg = SK_YU_READ_2(sc_if, YUKON_PAR);
/* MIB Counter Clear Mode set */
reg |= YU_PAR_MIB_CLR;
SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
/* MIB Counter Clear Mode clear */
reg &= ~YU_PAR_MIB_CLR;
SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
/* receive control reg */
SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
/* transmit parameter register */
SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
/* serial mode register */
reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
if (if_getmtu(ifp) > SK_MAX_FRAMELEN)
reg |= YU_SMR_MFL_JUMBO;
SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
/* Setup Yukon's station address */
eaddr = if_getlladdr(sc_if->sk_ifp);
for (i = 0; i < 3; i++)
SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
/* Set GMAC source address of flow control. */
for (i = 0; i < 3; i++)
SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
/* Set GMAC virtual address. */
for (i = 0; i < 3; i++)
SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
/* Set Rx filter */
sk_rxfilter_yukon(sc_if);
/* enable interrupt mask for counter overflows */
SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
/* Configure RX MAC FIFO Flush Mask */
v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
YU_RXSTAT_JABBER;
SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
v = SK_TFCTL_OPERATION_ON;
else
v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
/* Configure RX MAC FIFO */
SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
/* Increase flush threshould to 64 bytes */
SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
SK_RFCTL_FIFO_THRESHOLD + 1);
/* Configure TX MAC FIFO */
SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
}
/*
* Note that to properly initialize any part of the GEnesis chip,
* you first have to take it out of reset mode.
*/
static void
sk_init(void *xsc)
{
struct sk_if_softc *sc_if = xsc;
SK_IF_LOCK(sc_if);
sk_init_locked(sc_if);
SK_IF_UNLOCK(sc_if);
return;
}
static void
sk_init_locked(struct sk_if_softc *sc_if)
{
struct sk_softc *sc;
if_t ifp;
struct mii_data *mii;
u_int16_t reg;
u_int32_t imr;
int error;
SK_IF_LOCK_ASSERT(sc_if);
ifp = sc_if->sk_ifp;
sc = sc_if->sk_softc;
mii = device_get_softc(sc_if->sk_miibus);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
/* Cancel pending I/O and free all RX/TX buffers. */
sk_stop(sc_if);
if (sc->sk_type == SK_GENESIS) {
/* Configure LINK_SYNC LED */
SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
SK_LINKLED_LINKSYNC_ON);
/* Configure RX LED */
SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
SK_RXLEDCTL_COUNTER_START);
/* Configure TX LED */
SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
SK_TXLEDCTL_COUNTER_START);
}
/*
* Configure descriptor poll timer
*
* SK-NET GENESIS data sheet says that possibility of losing Start
* transmit command due to CPU/cache related interim storage problems
* under certain conditions. The document recommends a polling
* mechanism to send a Start transmit command to initiate transfer
* of ready descriptors regulary. To cope with this issue sk(4) now
* enables descriptor poll timer to initiate descriptor processing
* periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
* issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
* command instead of waiting for next descriptor polling time.
* The same rule may apply to Rx side too but it seems that is not
* needed at the moment.
* Since sk(4) uses descriptor polling as a last resort there is no
* need to set smaller polling time than maximum allowable one.
*/
SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
/* Configure I2C registers */
/* Configure XMAC(s) */
switch (sc->sk_type) {
case SK_GENESIS:
sk_init_xmac(sc_if);
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
sk_init_yukon(sc_if);
break;
}
mii_mediachg(mii);
if (sc->sk_type == SK_GENESIS) {
/* Configure MAC FIFOs */
SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
}
/* Configure transmit arbiter(s) */
SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
/* Configure RAMbuffers */
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
/* Configure BMUs */
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
if (if_getmtu(ifp) > SK_MAX_FRAMELEN) {
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
} else {
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
}
SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
/* Init descriptors */
if (if_getmtu(ifp) > SK_MAX_FRAMELEN)
error = sk_init_jumbo_rx_ring(sc_if);
else
error = sk_init_rx_ring(sc_if);
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"initialization failed: no memory for rx buffers\n");
sk_stop(sc_if);
return;
}
sk_init_tx_ring(sc_if);
/* Set interrupt moderation if changed via sysctl. */
imr = sk_win_read_4(sc, SK_IMTIMERINIT);
if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
sc->sk_int_ticks));
if (bootverbose)
device_printf(sc_if->sk_if_dev,
"interrupt moderation is %d us.\n",
sc->sk_int_mod);
}
/* Configure interrupt handling */
CSR_READ_4(sc, SK_ISSR);
if (sc_if->sk_port == SK_PORT_A)
sc->sk_intrmask |= SK_INTRS1;
else
sc->sk_intrmask |= SK_INTRS2;
sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
/* Start BMUs. */
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
switch(sc->sk_type) {
case SK_GENESIS:
/* Enable XMACs TX and RX state machines */
SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
#if 0
/* XXX disable 100Mbps and full duplex mode? */
reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
#endif
SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
}
/* Activate descriptor polling timer */
SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
/* start transfer of Tx descriptors */
CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
switch (sc->sk_type) {
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
break;
}
callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
return;
}
static void
sk_stop(struct sk_if_softc *sc_if)
{
int i;
struct sk_softc *sc;
struct sk_txdesc *txd;
struct sk_rxdesc *rxd;
struct sk_rxdesc *jrxd;
if_t ifp;
u_int32_t val;
SK_IF_LOCK_ASSERT(sc_if);
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
callout_stop(&sc_if->sk_tick_ch);
callout_stop(&sc_if->sk_watchdog_ch);
/* stop Tx descriptor polling timer */
SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
/* stop transfer of Tx descriptors */
CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
for (i = 0; i < SK_TIMEOUT; i++) {
val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
if ((val & SK_TXBMU_TX_STOP) == 0)
break;
DELAY(1);
}
if (i == SK_TIMEOUT)
device_printf(sc_if->sk_if_dev,
"can not stop transfer of Tx descriptor\n");
/* stop transfer of Rx descriptors */
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
for (i = 0; i < SK_TIMEOUT; i++) {
val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
if ((val & SK_RXBMU_RX_STOP) == 0)
break;
DELAY(1);
}
if (i == SK_TIMEOUT)
device_printf(sc_if->sk_if_dev,
"can not stop transfer of Rx descriptor\n");
if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
/* Put PHY back into reset. */
val = sk_win_read_4(sc, SK_GPIO);
if (sc_if->sk_port == SK_PORT_A) {
val |= SK_GPIO_DIR0;
val &= ~SK_GPIO_DAT0;
} else {
val |= SK_GPIO_DIR2;
val &= ~SK_GPIO_DAT2;
}
sk_win_write_4(sc, SK_GPIO, val);
}
/* Turn off various components of this interface. */
SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
switch (sc->sk_type) {
case SK_GENESIS:
SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
break;
}
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
/* Disable interrupts */
if (sc_if->sk_port == SK_PORT_A)
sc->sk_intrmask &= ~SK_INTRS1;
else
sc->sk_intrmask &= ~SK_INTRS2;
CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
SK_XM_READ_2(sc_if, XM_ISR);
SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
/* Free RX and TX mbufs still in the queues. */
for (i = 0; i < SK_RX_RING_CNT; i++) {
rxd = &sc_if->sk_cdata.sk_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
if (jrxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
jrxd->rx_dmamap);
m_freem(jrxd->rx_m);
jrxd->rx_m = NULL;
}
}
for (i = 0; i < SK_TX_RING_CNT; i++) {
txd = &sc_if->sk_cdata.sk_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING|IFF_DRV_OACTIVE));
return;
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (!arg1)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || !req->newptr)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
}
diff --git a/sys/dev/smc/if_smc.c b/sys/dev/smc/if_smc.c
index cc7a7b1bb12b..12e903d1af9b 100644
--- a/sys/dev/smc/if_smc.c
+++ b/sys/dev/smc/if_smc.c
@@ -1,1347 +1,1343 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Benno Rice. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* Driver for SMSC LAN91C111, may work for older variants.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/syslog.h>
#include <sys/taskqueue.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_mib.h>
#include <net/if_media.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#endif
#include <net/bpf.h>
#include <net/bpfdesc.h>
#include <dev/smc/if_smcreg.h>
#include <dev/smc/if_smcvar.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include "miibus_if.h"
#define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx)
#define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx)
#define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED)
#define SMC_INTR_PRIORITY 0
#define SMC_RX_PRIORITY 5
#define SMC_TX_PRIORITY 10
static const char *smc_chip_ids[16] = {
NULL, NULL, NULL,
/* 3 */ "SMSC LAN91C90 or LAN91C92",
/* 4 */ "SMSC LAN91C94",
/* 5 */ "SMSC LAN91C95",
/* 6 */ "SMSC LAN91C96",
/* 7 */ "SMSC LAN91C100",
/* 8 */ "SMSC LAN91C100FD",
/* 9 */ "SMSC LAN91C110FD or LAN91C111FD",
NULL, NULL, NULL,
NULL, NULL, NULL
};
static void smc_init(void *);
static void smc_start(if_t);
static void smc_stop(struct smc_softc *);
static int smc_ioctl(if_t, u_long, caddr_t);
static void smc_init_locked(struct smc_softc *);
static void smc_start_locked(if_t);
static void smc_reset(struct smc_softc *);
static int smc_mii_ifmedia_upd(if_t);
static void smc_mii_ifmedia_sts(if_t, struct ifmediareq *);
static void smc_mii_tick(void *);
static void smc_mii_mediachg(struct smc_softc *);
static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long);
static void smc_task_intr(void *, int);
static void smc_task_rx(void *, int);
static void smc_task_tx(void *, int);
static driver_filter_t smc_intr;
static callout_func_t smc_watchdog;
#ifdef DEVICE_POLLING
static poll_handler_t smc_poll;
#endif
/*
* MII bit-bang glue
*/
static uint32_t smc_mii_bitbang_read(device_t);
static void smc_mii_bitbang_write(device_t, uint32_t);
static const struct mii_bitbang_ops smc_mii_bitbang_ops = {
smc_mii_bitbang_read,
smc_mii_bitbang_write,
{
MGMT_MDO, /* MII_BIT_MDO */
MGMT_MDI, /* MII_BIT_MDI */
MGMT_MCLK, /* MII_BIT_MDC */
MGMT_MDOE, /* MII_BIT_DIR_HOST_PHY */
0, /* MII_BIT_DIR_PHY_HOST */
}
};
static __inline void
smc_select_bank(struct smc_softc *sc, uint16_t bank)
{
bus_barrier(sc->smc_reg, BSR, 2,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK);
bus_barrier(sc->smc_reg, BSR, 2,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/* Never call this when not in bank 2. */
static __inline void
smc_mmu_wait(struct smc_softc *sc)
{
KASSERT((bus_read_2(sc->smc_reg, BSR) &
BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2",
device_get_nameunit(sc->smc_dev)));
while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY)
;
}
static __inline uint8_t
smc_read_1(struct smc_softc *sc, bus_size_t offset)
{
return (bus_read_1(sc->smc_reg, offset));
}
static __inline void
smc_write_1(struct smc_softc *sc, bus_size_t offset, uint8_t val)
{
bus_write_1(sc->smc_reg, offset, val);
}
static __inline uint16_t
smc_read_2(struct smc_softc *sc, bus_size_t offset)
{
return (bus_read_2(sc->smc_reg, offset));
}
static __inline void
smc_write_2(struct smc_softc *sc, bus_size_t offset, uint16_t val)
{
bus_write_2(sc->smc_reg, offset, val);
}
static __inline void
smc_read_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap,
bus_size_t count)
{
bus_read_multi_2(sc->smc_reg, offset, datap, count);
}
static __inline void
smc_write_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap,
bus_size_t count)
{
bus_write_multi_2(sc->smc_reg, offset, datap, count);
}
static __inline void
smc_barrier(struct smc_softc *sc, bus_size_t offset, bus_size_t length,
int flags)
{
bus_barrier(sc->smc_reg, offset, length, flags);
}
int
smc_probe(device_t dev)
{
int rid, type, error;
uint16_t val;
struct smc_softc *sc;
struct resource *reg;
sc = device_get_softc(dev);
rid = 0;
type = SYS_RES_IOPORT;
error = 0;
if (sc->smc_usemem)
type = SYS_RES_MEMORY;
reg = bus_alloc_resource_anywhere(dev, type, &rid, 16, RF_ACTIVE);
if (reg == NULL) {
if (bootverbose)
device_printf(dev,
"could not allocate I/O resource for probe\n");
return (ENXIO);
}
/* Check for the identification value in the BSR. */
val = bus_read_2(reg, BSR);
if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) {
if (bootverbose)
device_printf(dev, "identification value not in BSR\n");
error = ENXIO;
goto done;
}
/*
* Try switching banks and make sure we still get the identification
* value.
*/
bus_write_2(reg, BSR, 0);
val = bus_read_2(reg, BSR);
if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) {
if (bootverbose)
device_printf(dev,
"identification value not in BSR after write\n");
error = ENXIO;
goto done;
}
#if 0
/* Check the BAR. */
bus_write_2(reg, BSR, 1);
val = bus_read_2(reg, BAR);
val = BAR_ADDRESS(val);
if (rman_get_start(reg) != val) {
if (bootverbose)
device_printf(dev, "BAR address %x does not match "
"I/O resource address %lx\n", val,
rman_get_start(reg));
error = ENXIO;
goto done;
}
#endif
/* Compare REV against known chip revisions. */
bus_write_2(reg, BSR, 3);
val = bus_read_2(reg, REV);
val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
if (smc_chip_ids[val] == NULL) {
if (bootverbose)
device_printf(dev, "Unknown chip revision: %d\n", val);
error = ENXIO;
goto done;
}
device_set_desc(dev, smc_chip_ids[val]);
done:
bus_release_resource(dev, type, rid, reg);
return (error);
}
int
smc_attach(device_t dev)
{
int type, error;
uint16_t val;
u_char eaddr[ETHER_ADDR_LEN];
struct smc_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
error = 0;
sc->smc_dev = dev;
ifp = sc->smc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- error = ENOSPC;
- goto done;
- }
mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
/* Set up watchdog callout. */
callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0);
type = SYS_RES_IOPORT;
if (sc->smc_usemem)
type = SYS_RES_MEMORY;
sc->smc_reg_rid = 0;
sc->smc_reg = bus_alloc_resource_anywhere(dev, type, &sc->smc_reg_rid,
16, RF_ACTIVE);
if (sc->smc_reg == NULL) {
error = ENXIO;
goto done;
}
sc->smc_irq = bus_alloc_resource_anywhere(dev, SYS_RES_IRQ,
&sc->smc_irq_rid, 1, RF_ACTIVE | RF_SHAREABLE);
if (sc->smc_irq == NULL) {
error = ENXIO;
goto done;
}
SMC_LOCK(sc);
smc_reset(sc);
SMC_UNLOCK(sc);
smc_select_bank(sc, 3);
val = smc_read_2(sc, REV);
sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT;
if (bootverbose)
device_printf(dev, "revision %x\n", sc->smc_rev);
callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx,
CALLOUT_RETURNUNLOCKED);
if (sc->smc_chip >= REV_CHIP_91110FD) {
(void)mii_attach(dev, &sc->smc_miibus, ifp,
smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK,
MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (sc->smc_miibus != NULL) {
sc->smc_mii_tick = smc_mii_tick;
sc->smc_mii_mediachg = smc_mii_mediachg;
sc->smc_mii_mediaioctl = smc_mii_mediaioctl;
}
}
smc_select_bank(sc, 1);
eaddr[0] = smc_read_1(sc, IAR0);
eaddr[1] = smc_read_1(sc, IAR1);
eaddr[2] = smc_read_1(sc, IAR2);
eaddr[3] = smc_read_1(sc, IAR3);
eaddr[4] = smc_read_1(sc, IAR4);
eaddr[5] = smc_read_1(sc, IAR5);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, smc_init);
if_setioctlfn(ifp, smc_ioctl);
if_setstartfn(ifp, smc_start);
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
if_setcapabilities(ifp, if_getcapenable(ifp) );
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
ether_ifattach(ifp, eaddr);
/* Set up taskqueue */
TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp);
NET_TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp);
sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->smc_tq);
taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->smc_dev));
/* Mask all interrupts. */
sc->smc_mask = 0;
smc_write_1(sc, MSK, 0);
/* Wire up interrupt */
error = bus_setup_intr(dev, sc->smc_irq,
INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih);
if (error != 0)
goto done;
done:
if (error != 0)
smc_detach(dev);
return (error);
}
int
smc_detach(device_t dev)
{
int type;
struct smc_softc *sc;
sc = device_get_softc(dev);
SMC_LOCK(sc);
smc_stop(sc);
SMC_UNLOCK(sc);
if (sc->smc_ifp != NULL) {
ether_ifdetach(sc->smc_ifp);
}
callout_drain(&sc->smc_watchdog);
callout_drain(&sc->smc_mii_tick_ch);
#ifdef DEVICE_POLLING
if (sc->smc_if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(sc->smc_ifp);
#endif
if (sc->smc_ih != NULL)
bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih);
if (sc->smc_tq != NULL) {
taskqueue_drain(sc->smc_tq, &sc->smc_intr);
taskqueue_drain(sc->smc_tq, &sc->smc_rx);
taskqueue_drain(sc->smc_tq, &sc->smc_tx);
taskqueue_free(sc->smc_tq);
sc->smc_tq = NULL;
}
if (sc->smc_ifp != NULL) {
if_free(sc->smc_ifp);
}
if (sc->smc_miibus != NULL) {
device_delete_child(sc->smc_dev, sc->smc_miibus);
bus_generic_detach(sc->smc_dev);
}
if (sc->smc_reg != NULL) {
type = SYS_RES_IOPORT;
if (sc->smc_usemem)
type = SYS_RES_MEMORY;
bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid,
sc->smc_reg);
}
if (sc->smc_irq != NULL)
bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid,
sc->smc_irq);
if (mtx_initialized(&sc->smc_mtx))
mtx_destroy(&sc->smc_mtx);
return (0);
}
static device_method_t smc_methods[] = {
/* Device interface */
DEVMETHOD(device_attach, smc_attach),
DEVMETHOD(device_detach, smc_detach),
/* MII interface */
DEVMETHOD(miibus_readreg, smc_miibus_readreg),
DEVMETHOD(miibus_writereg, smc_miibus_writereg),
DEVMETHOD(miibus_statchg, smc_miibus_statchg),
{ 0, 0 }
};
driver_t smc_driver = {
"smc",
smc_methods,
sizeof(struct smc_softc),
};
DRIVER_MODULE(miibus, smc, miibus_driver, 0, 0);
static void
smc_start(if_t ifp)
{
struct smc_softc *sc;
sc = if_getsoftc(ifp);
SMC_LOCK(sc);
smc_start_locked(ifp);
SMC_UNLOCK(sc);
}
static void
smc_start_locked(if_t ifp)
{
struct smc_softc *sc;
struct mbuf *m;
u_int len, npages, spin_count;
sc = if_getsoftc(ifp);
SMC_ASSERT_LOCKED(sc);
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
return;
if (if_sendq_empty(ifp))
return;
/*
* Grab the next packet. If it's too big, drop it.
*/
m = if_dequeue(ifp);
len = m_length(m, NULL);
len += (len & 1);
if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) {
if_printf(ifp, "large packet discarded\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return; /* XXX readcheck? */
}
/*
* Flag that we're busy.
*/
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
sc->smc_pending = m;
/*
* Work out how many 256 byte "pages" we need. We have to include the
* control data for the packet in this calculation.
*/
npages = (len + PKT_CTRL_DATA_LEN) >> 8;
if (npages == 0)
npages = 1;
/*
* Request memory.
*/
smc_select_bank(sc, 2);
smc_mmu_wait(sc);
smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages);
/*
* Spin briefly to see if the allocation succeeds.
*/
spin_count = TX_ALLOC_WAIT_TIME;
do {
if (smc_read_1(sc, IST) & ALLOC_INT) {
smc_write_1(sc, ACK, ALLOC_INT);
break;
}
} while (--spin_count);
/*
* If the allocation is taking too long, unmask the alloc interrupt
* and wait.
*/
if (spin_count == 0) {
sc->smc_mask |= ALLOC_INT;
if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
smc_write_1(sc, MSK, sc->smc_mask);
return;
}
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
}
static void
smc_task_tx(void *context, int pending)
{
if_t ifp;
struct smc_softc *sc;
struct mbuf *m, *m0;
u_int packet, len;
int last_len;
uint8_t *data;
(void)pending;
ifp = (if_t)context;
sc = if_getsoftc(ifp);
SMC_LOCK(sc);
if (sc->smc_pending == NULL) {
SMC_UNLOCK(sc);
goto next_packet;
}
m = m0 = sc->smc_pending;
sc->smc_pending = NULL;
smc_select_bank(sc, 2);
/*
* Check the allocation result.
*/
packet = smc_read_1(sc, ARR);
/*
* If the allocation failed, requeue the packet and retry.
*/
if (packet & ARR_FAILED) {
if_sendq_prepend(ifp, m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
smc_start_locked(ifp);
SMC_UNLOCK(sc);
return;
}
/*
* Tell the device to write to our packet number.
*/
smc_write_1(sc, PNR, packet);
smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR);
/*
* Tell the device how long the packet is (including control data).
*/
len = m_length(m, 0);
len += PKT_CTRL_DATA_LEN;
smc_write_2(sc, DATA0, 0);
smc_write_2(sc, DATA0, len);
/*
* Push the data out to the device.
*/
data = NULL;
last_len = 0;
for (; m != NULL; m = m->m_next) {
data = mtod(m, uint8_t *);
smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2);
last_len = m->m_len;
}
/*
* Push out the control byte and and the odd byte if needed.
*/
if ((len & 1) != 0 && data != NULL)
smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]);
else
smc_write_2(sc, DATA0, 0);
/*
* Unmask the TX empty interrupt.
*/
sc->smc_mask |= TX_EMPTY_INT;
if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
smc_write_1(sc, MSK, sc->smc_mask);
/*
* Enqueue the packet.
*/
smc_mmu_wait(sc);
smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE);
callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc);
/*
* Finish up.
*/
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
SMC_UNLOCK(sc);
BPF_MTAP(ifp, m0);
m_freem(m0);
next_packet:
/*
* See if there's anything else to do.
*/
smc_start(ifp);
}
static void
smc_task_rx(void *context, int pending)
{
u_int packet, status, len;
uint8_t *data;
if_t ifp;
struct smc_softc *sc;
struct mbuf *m, *mhead, *mtail;
(void)pending;
ifp = (if_t)context;
sc = if_getsoftc(ifp);
mhead = mtail = NULL;
SMC_LOCK(sc);
packet = smc_read_1(sc, FIFO_RX);
while ((packet & FIFO_EMPTY) == 0) {
/*
* Grab an mbuf and attach a cluster.
*/
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
break;
}
if (!(MCLGET(m, M_NOWAIT))) {
m_freem(m);
break;
}
/*
* Point to the start of the packet.
*/
smc_select_bank(sc, 2);
smc_write_1(sc, PNR, packet);
smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR);
/*
* Grab status and packet length.
*/
status = smc_read_2(sc, DATA0);
len = smc_read_2(sc, DATA0) & RX_LEN_MASK;
len -= 6;
if (status & RX_ODDFRM)
len += 1;
/*
* Check for errors.
*/
if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) {
smc_mmu_wait(sc);
smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
break;
}
/*
* Set the mbuf up the way we want it.
*/
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */
m_adj(m, ETHER_ALIGN);
/*
* Pull the packet out of the device. Make sure we're in the
* right bank first as things may have changed while we were
* allocating our mbuf.
*/
smc_select_bank(sc, 2);
smc_write_1(sc, PNR, packet);
smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR);
data = mtod(m, uint8_t *);
smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1);
if (len & 1) {
data += len & ~1;
*data = smc_read_1(sc, DATA0);
}
/*
* Tell the device we're done.
*/
smc_mmu_wait(sc);
smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE);
if (m == NULL) {
break;
}
if (mhead == NULL) {
mhead = mtail = m;
m->m_next = NULL;
} else {
mtail->m_next = m;
mtail = m;
}
packet = smc_read_1(sc, FIFO_RX);
}
sc->smc_mask |= RCV_INT;
if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
smc_write_1(sc, MSK, sc->smc_mask);
SMC_UNLOCK(sc);
while (mhead != NULL) {
m = mhead;
mhead = mhead->m_next;
m->m_next = NULL;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_input(ifp, m);
}
}
#ifdef DEVICE_POLLING
static int
smc_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct smc_softc *sc;
sc = if_getsoftc(ifp);
SMC_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
SMC_UNLOCK(sc);
return (0);
}
SMC_UNLOCK(sc);
if (cmd == POLL_AND_CHECK_STATUS)
taskqueue_enqueue(sc->smc_tq, &sc->smc_intr);
return (0);
}
#endif
static int
smc_intr(void *context)
{
struct smc_softc *sc;
uint32_t curbank;
sc = (struct smc_softc *)context;
/*
* Save current bank and restore later in this function
*/
curbank = (smc_read_2(sc, BSR) & BSR_BANK_MASK);
/*
* Block interrupts in order to let smc_task_intr to kick in
*/
smc_select_bank(sc, 2);
smc_write_1(sc, MSK, 0);
/* Restore bank */
smc_select_bank(sc, curbank);
taskqueue_enqueue(sc->smc_tq, &sc->smc_intr);
return (FILTER_HANDLED);
}
static void
smc_task_intr(void *context, int pending)
{
struct smc_softc *sc;
if_t ifp;
u_int status, packet, counter, tcr;
(void)pending;
ifp = (if_t)context;
sc = if_getsoftc(ifp);
SMC_LOCK(sc);
smc_select_bank(sc, 2);
/*
* Find out what interrupts are flagged.
*/
status = smc_read_1(sc, IST) & sc->smc_mask;
/*
* Transmit error
*/
if (status & TX_INT) {
/*
* Kill off the packet if there is one and re-enable transmit.
*/
packet = smc_read_1(sc, FIFO_TX);
if ((packet & FIFO_EMPTY) == 0) {
callout_stop(&sc->smc_watchdog);
smc_select_bank(sc, 2);
smc_write_1(sc, PNR, packet);
smc_write_2(sc, PTR, 0 | PTR_READ |
PTR_AUTO_INCR);
smc_select_bank(sc, 0);
tcr = smc_read_2(sc, EPHSR);
#if 0
if ((tcr & EPHSR_TX_SUC) == 0)
device_printf(sc->smc_dev,
"bad packet\n");
#endif
smc_select_bank(sc, 2);
smc_mmu_wait(sc);
smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT);
smc_select_bank(sc, 0);
tcr = smc_read_2(sc, TCR);
tcr |= TCR_TXENA | TCR_PAD_EN;
smc_write_2(sc, TCR, tcr);
smc_select_bank(sc, 2);
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
}
/*
* Ack the interrupt.
*/
smc_write_1(sc, ACK, TX_INT);
}
/*
* Receive
*/
if (status & RCV_INT) {
smc_write_1(sc, ACK, RCV_INT);
sc->smc_mask &= ~RCV_INT;
taskqueue_enqueue(sc->smc_tq, &sc->smc_rx);
}
/*
* Allocation
*/
if (status & ALLOC_INT) {
smc_write_1(sc, ACK, ALLOC_INT);
sc->smc_mask &= ~ALLOC_INT;
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
}
/*
* Receive overrun
*/
if (status & RX_OVRN_INT) {
smc_write_1(sc, ACK, RX_OVRN_INT);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
}
/*
* Transmit empty
*/
if (status & TX_EMPTY_INT) {
smc_write_1(sc, ACK, TX_EMPTY_INT);
sc->smc_mask &= ~TX_EMPTY_INT;
callout_stop(&sc->smc_watchdog);
/*
* Update collision stats.
*/
smc_select_bank(sc, 0);
counter = smc_read_2(sc, ECR);
smc_select_bank(sc, 2);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
((counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT) +
((counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT));
/*
* See if there are any packets to transmit.
*/
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
}
/*
* Update the interrupt mask.
*/
smc_select_bank(sc, 2);
if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
smc_write_1(sc, MSK, sc->smc_mask);
SMC_UNLOCK(sc);
}
static uint32_t
smc_mii_bitbang_read(device_t dev)
{
struct smc_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
SMC_ASSERT_LOCKED(sc);
KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3,
("%s: smc_mii_bitbang_read called with bank %d (!= 3)",
device_get_nameunit(sc->smc_dev),
smc_read_2(sc, BSR) & BSR_BANK_MASK));
val = smc_read_2(sc, MGMT);
smc_barrier(sc, MGMT, 2,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
static void
smc_mii_bitbang_write(device_t dev, uint32_t val)
{
struct smc_softc *sc;
sc = device_get_softc(dev);
SMC_ASSERT_LOCKED(sc);
KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3,
("%s: smc_mii_bitbang_write called with bank %d (!= 3)",
device_get_nameunit(sc->smc_dev),
smc_read_2(sc, BSR) & BSR_BANK_MASK));
smc_write_2(sc, MGMT, val);
smc_barrier(sc, MGMT, 2,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
int
smc_miibus_readreg(device_t dev, int phy, int reg)
{
struct smc_softc *sc;
int val;
sc = device_get_softc(dev);
SMC_LOCK(sc);
smc_select_bank(sc, 3);
val = mii_bitbang_readreg(dev, &smc_mii_bitbang_ops, phy, reg);
SMC_UNLOCK(sc);
return (val);
}
int
smc_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct smc_softc *sc;
sc = device_get_softc(dev);
SMC_LOCK(sc);
smc_select_bank(sc, 3);
mii_bitbang_writereg(dev, &smc_mii_bitbang_ops, phy, reg, data);
SMC_UNLOCK(sc);
return (0);
}
void
smc_miibus_statchg(device_t dev)
{
struct smc_softc *sc;
struct mii_data *mii;
uint16_t tcr;
sc = device_get_softc(dev);
mii = device_get_softc(sc->smc_miibus);
SMC_LOCK(sc);
smc_select_bank(sc, 0);
tcr = smc_read_2(sc, TCR);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
tcr |= TCR_SWFDUP;
else
tcr &= ~TCR_SWFDUP;
smc_write_2(sc, TCR, tcr);
SMC_UNLOCK(sc);
}
static int
smc_mii_ifmedia_upd(if_t ifp)
{
struct smc_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
if (sc->smc_miibus == NULL)
return (ENXIO);
mii = device_get_softc(sc->smc_miibus);
return (mii_mediachg(mii));
}
static void
smc_mii_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct smc_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
if (sc->smc_miibus == NULL)
return;
mii = device_get_softc(sc->smc_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static void
smc_mii_tick(void *context)
{
struct smc_softc *sc;
sc = (struct smc_softc *)context;
if (sc->smc_miibus == NULL)
return;
SMC_UNLOCK(sc);
mii_tick(device_get_softc(sc->smc_miibus));
callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc);
}
static void
smc_mii_mediachg(struct smc_softc *sc)
{
if (sc->smc_miibus == NULL)
return;
mii_mediachg(device_get_softc(sc->smc_miibus));
}
static int
smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command)
{
struct mii_data *mii;
if (sc->smc_miibus == NULL)
return (EINVAL);
mii = device_get_softc(sc->smc_miibus);
return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command));
}
static void
smc_reset(struct smc_softc *sc)
{
u_int ctr;
SMC_ASSERT_LOCKED(sc);
smc_select_bank(sc, 2);
/*
* Mask all interrupts.
*/
smc_write_1(sc, MSK, 0);
/*
* Tell the device to reset.
*/
smc_select_bank(sc, 0);
smc_write_2(sc, RCR, RCR_SOFT_RST);
/*
* Set up the configuration register.
*/
smc_select_bank(sc, 1);
smc_write_2(sc, CR, CR_EPH_POWER_EN);
DELAY(1);
/*
* Turn off transmit and receive.
*/
smc_select_bank(sc, 0);
smc_write_2(sc, TCR, 0);
smc_write_2(sc, RCR, 0);
/*
* Set up the control register.
*/
smc_select_bank(sc, 1);
ctr = smc_read_2(sc, CTRL);
ctr |= CTRL_LE_ENABLE | CTRL_AUTO_RELEASE;
smc_write_2(sc, CTRL, ctr);
/*
* Reset the MMU.
*/
smc_select_bank(sc, 2);
smc_mmu_wait(sc);
smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET);
}
static void
smc_enable(struct smc_softc *sc)
{
if_t ifp;
SMC_ASSERT_LOCKED(sc);
ifp = sc->smc_ifp;
/*
* Set up the receive/PHY control register.
*/
smc_select_bank(sc, 0);
smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT)
| (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT));
/*
* Set up the transmit and receive control registers.
*/
smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN);
smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC);
/*
* Set up the interrupt mask.
*/
smc_select_bank(sc, 2);
sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT;
if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
smc_write_1(sc, MSK, sc->smc_mask);
}
static void
smc_stop(struct smc_softc *sc)
{
SMC_ASSERT_LOCKED(sc);
/*
* Turn off callouts.
*/
callout_stop(&sc->smc_watchdog);
callout_stop(&sc->smc_mii_tick_ch);
/*
* Mask all interrupts.
*/
smc_select_bank(sc, 2);
sc->smc_mask = 0;
smc_write_1(sc, MSK, 0);
#ifdef DEVICE_POLLING
ether_poll_deregister(sc->smc_ifp);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
#endif
/*
* Disable transmit and receive.
*/
smc_select_bank(sc, 0);
smc_write_2(sc, TCR, 0);
smc_write_2(sc, RCR, 0);
if_setdrvflagbits(sc->smc_ifp, 0, IFF_DRV_RUNNING);
}
static void
smc_watchdog(void *arg)
{
struct smc_softc *sc;
sc = (struct smc_softc *)arg;
device_printf(sc->smc_dev, "watchdog timeout\n");
taskqueue_enqueue(sc->smc_tq, &sc->smc_intr);
}
static void
smc_init(void *context)
{
struct smc_softc *sc;
sc = (struct smc_softc *)context;
SMC_LOCK(sc);
smc_init_locked(sc);
SMC_UNLOCK(sc);
}
static void
smc_init_locked(struct smc_softc *sc)
{
if_t ifp;
SMC_ASSERT_LOCKED(sc);
ifp = sc->smc_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
smc_reset(sc);
smc_enable(sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
smc_start_locked(ifp);
if (sc->smc_mii_tick != NULL)
callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc);
#ifdef DEVICE_POLLING
SMC_UNLOCK(sc);
ether_poll_register(smc_poll, ifp);
SMC_LOCK(sc);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
#endif
}
static int
smc_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct smc_softc *sc;
int error;
sc = if_getsoftc(ifp);
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
if ((if_getflags(ifp) & IFF_UP) == 0 &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
SMC_LOCK(sc);
smc_stop(sc);
SMC_UNLOCK(sc);
} else {
smc_init(sc);
if (sc->smc_mii_mediachg != NULL)
sc->smc_mii_mediachg(sc);
}
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/* XXX
SMC_LOCK(sc);
smc_setmcast(sc);
SMC_UNLOCK(sc);
*/
error = EINVAL;
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
if (sc->smc_mii_mediaioctl == NULL) {
error = EINVAL;
break;
}
sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
diff --git a/sys/dev/ste/if_ste.c b/sys/dev/ste/if_ste.c
index 7c7eb02ccfb7..06e25175c617 100644
--- a/sys/dev/ste/if_ste.c
+++ b/sys/dev/ste/if_ste.c
@@ -1,2131 +1,2126 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/ste/if_stereg.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
MODULE_DEPEND(ste, pci, 1, 1, 1);
MODULE_DEPEND(ste, ether, 1, 1, 1);
MODULE_DEPEND(ste, miibus, 1, 1, 1);
/* Define to show Tx error status. */
#define STE_SHOW_TXERRORS
/*
* Various supported device vendors/types and their names.
*/
static const struct ste_type ste_devs[] = {
{ ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" },
{ ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" },
{ DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" },
{ 0, 0, NULL }
};
static int ste_attach(device_t);
static int ste_detach(device_t);
static int ste_probe(device_t);
static int ste_resume(device_t);
static int ste_shutdown(device_t);
static int ste_suspend(device_t);
static int ste_dma_alloc(struct ste_softc *);
static void ste_dma_free(struct ste_softc *);
static void ste_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int ste_eeprom_wait(struct ste_softc *);
static int ste_encap(struct ste_softc *, struct mbuf **,
struct ste_chain *);
static int ste_ifmedia_upd(if_t);
static void ste_ifmedia_sts(if_t, struct ifmediareq *);
static void ste_init(void *);
static void ste_init_locked(struct ste_softc *);
static int ste_init_rx_list(struct ste_softc *);
static void ste_init_tx_list(struct ste_softc *);
static void ste_intr(void *);
static int ste_ioctl(if_t, u_long, caddr_t);
static uint32_t ste_mii_bitbang_read(device_t);
static void ste_mii_bitbang_write(device_t, uint32_t);
static int ste_miibus_readreg(device_t, int, int);
static void ste_miibus_statchg(device_t);
static int ste_miibus_writereg(device_t, int, int, int);
static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *);
static int ste_read_eeprom(struct ste_softc *, uint16_t *, int, int);
static void ste_reset(struct ste_softc *);
static void ste_restart_tx(struct ste_softc *);
static int ste_rxeof(struct ste_softc *, int);
static void ste_rxfilter(struct ste_softc *);
static void ste_setwol(struct ste_softc *);
static void ste_start(if_t);
static void ste_start_locked(if_t);
static void ste_stats_clear(struct ste_softc *);
static void ste_stats_update(struct ste_softc *);
static void ste_stop(struct ste_softc *);
static void ste_sysctl_node(struct ste_softc *);
static void ste_tick(void *);
static void ste_txeoc(struct ste_softc *);
static void ste_txeof(struct ste_softc *);
static void ste_wait(struct ste_softc *);
static void ste_watchdog(struct ste_softc *);
/*
* MII bit-bang glue
*/
static const struct mii_bitbang_ops ste_mii_bitbang_ops = {
ste_mii_bitbang_read,
ste_mii_bitbang_write,
{
STE_PHYCTL_MDATA, /* MII_BIT_MDO */
STE_PHYCTL_MDATA, /* MII_BIT_MDI */
STE_PHYCTL_MCLK, /* MII_BIT_MDC */
STE_PHYCTL_MDIR, /* MII_BIT_DIR_HOST_PHY */
0, /* MII_BIT_DIR_PHY_HOST */
}
};
static device_method_t ste_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ste_probe),
DEVMETHOD(device_attach, ste_attach),
DEVMETHOD(device_detach, ste_detach),
DEVMETHOD(device_shutdown, ste_shutdown),
DEVMETHOD(device_suspend, ste_suspend),
DEVMETHOD(device_resume, ste_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, ste_miibus_readreg),
DEVMETHOD(miibus_writereg, ste_miibus_writereg),
DEVMETHOD(miibus_statchg, ste_miibus_statchg),
DEVMETHOD_END
};
static driver_t ste_driver = {
"ste",
ste_methods,
sizeof(struct ste_softc)
};
DRIVER_MODULE(ste, pci, ste_driver, 0, 0);
DRIVER_MODULE(miibus, ste, miibus_driver, 0, 0);
#define STE_SETBIT4(sc, reg, x) \
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
#define STE_CLRBIT4(sc, reg, x) \
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
#define STE_SETBIT2(sc, reg, x) \
CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
#define STE_CLRBIT2(sc, reg, x) \
CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
#define STE_SETBIT1(sc, reg, x) \
CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
#define STE_CLRBIT1(sc, reg, x) \
CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
/*
* Read the MII serial port for the MII bit-bang module.
*/
static uint32_t
ste_mii_bitbang_read(device_t dev)
{
struct ste_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
val = CSR_READ_1(sc, STE_PHYCTL);
CSR_BARRIER(sc, STE_PHYCTL, 1,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
/*
* Write the MII serial port for the MII bit-bang module.
*/
static void
ste_mii_bitbang_write(device_t dev, uint32_t val)
{
struct ste_softc *sc;
sc = device_get_softc(dev);
CSR_WRITE_1(sc, STE_PHYCTL, val);
CSR_BARRIER(sc, STE_PHYCTL, 1,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
ste_miibus_readreg(device_t dev, int phy, int reg)
{
return (mii_bitbang_readreg(dev, &ste_mii_bitbang_ops, phy, reg));
}
static int
ste_miibus_writereg(device_t dev, int phy, int reg, int data)
{
mii_bitbang_writereg(dev, &ste_mii_bitbang_ops, phy, reg, data);
return (0);
}
static void
ste_miibus_statchg(device_t dev)
{
struct ste_softc *sc;
struct mii_data *mii;
if_t ifp;
uint16_t cfg;
sc = device_get_softc(dev);
mii = device_get_softc(sc->ste_miibus);
ifp = sc->ste_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->ste_flags &= ~STE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
case IFM_100_FX:
case IFM_100_T4:
sc->ste_flags |= STE_FLAG_LINK;
default:
break;
}
}
/* Program MACs with resolved speed/duplex/flow-control. */
if ((sc->ste_flags & STE_FLAG_LINK) != 0) {
cfg = CSR_READ_2(sc, STE_MACCTL0);
cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
/*
* ST201 data sheet says driver should enable receiving
* MAC control frames bit of receive mode register to
* receive flow-control frames but the register has no
* such bits. In addition the controller has no ability
* to send pause frames so it should be handled in
* driver. Implementing pause timer handling in driver
* layer is not trivial, so don't enable flow-control
* here.
*/
cfg |= STE_MACCTL0_FULLDUPLEX;
}
CSR_WRITE_2(sc, STE_MACCTL0, cfg);
}
}
static int
ste_ifmedia_upd(if_t ifp)
{
struct ste_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
STE_LOCK(sc);
mii = device_get_softc(sc->ste_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
STE_UNLOCK(sc);
return (error);
}
static void
ste_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct ste_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->ste_miibus);
STE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
STE_UNLOCK(sc);
return;
}
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
STE_UNLOCK(sc);
}
static void
ste_wait(struct ste_softc *sc)
{
int i;
for (i = 0; i < STE_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
break;
DELAY(1);
}
if (i == STE_TIMEOUT)
device_printf(sc->ste_dev, "command never completed!\n");
}
/*
* The EEPROM is slow: give it time to come ready after issuing
* it a command.
*/
static int
ste_eeprom_wait(struct ste_softc *sc)
{
int i;
DELAY(1000);
for (i = 0; i < 100; i++) {
if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
DELAY(1000);
else
break;
}
if (i == 100) {
device_printf(sc->ste_dev, "eeprom failed to come ready\n");
return (1);
}
return (0);
}
/*
* Read a sequence of words from the EEPROM. Note that ethernet address
* data is stored in the EEPROM in network byte order.
*/
static int
ste_read_eeprom(struct ste_softc *sc, uint16_t *dest, int off, int cnt)
{
int err = 0, i;
if (ste_eeprom_wait(sc))
return (1);
for (i = 0; i < cnt; i++) {
CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
err = ste_eeprom_wait(sc);
if (err)
break;
*dest = le16toh(CSR_READ_2(sc, STE_EEPROM_DATA));
dest++;
}
return (err ? 1 : 0);
}
static u_int
ste_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *hashes = arg;
int h;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) & 0x3F;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
static void
ste_rxfilter(struct ste_softc *sc)
{
if_t ifp;
uint32_t hashes[2] = { 0, 0 };
uint8_t rxcfg;
STE_LOCK_ASSERT(sc);
ifp = sc->ste_ifp;
rxcfg = CSR_READ_1(sc, STE_RX_MODE);
rxcfg |= STE_RXMODE_UNICAST;
rxcfg &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_MULTIHASH |
STE_RXMODE_BROADCAST | STE_RXMODE_PROMISC);
if (if_getflags(ifp) & IFF_BROADCAST)
rxcfg |= STE_RXMODE_BROADCAST;
if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
rxcfg |= STE_RXMODE_ALLMULTI;
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxcfg |= STE_RXMODE_PROMISC;
goto chipit;
}
rxcfg |= STE_RXMODE_MULTIHASH;
/* Now program new ones. */
if_foreach_llmaddr(ifp, ste_hash_maddr, hashes);
chipit:
CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
CSR_WRITE_1(sc, STE_RX_MODE, rxcfg);
CSR_READ_1(sc, STE_RX_MODE);
}
#ifdef DEVICE_POLLING
static poll_handler_t ste_poll, ste_poll_locked;
static int
ste_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct ste_softc *sc = if_getsoftc(ifp);
int rx_npkts = 0;
STE_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rx_npkts = ste_poll_locked(ifp, cmd, count);
STE_UNLOCK(sc);
return (rx_npkts);
}
static int
ste_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
{
struct ste_softc *sc = if_getsoftc(ifp);
int rx_npkts;
STE_LOCK_ASSERT(sc);
rx_npkts = ste_rxeof(sc, count);
ste_txeof(sc);
ste_txeoc(sc);
if (!if_sendq_empty(ifp))
ste_start_locked(ifp);
if (cmd == POLL_AND_CHECK_STATUS) {
uint16_t status;
status = CSR_READ_2(sc, STE_ISR_ACK);
if (status & STE_ISR_STATS_OFLOW)
ste_stats_update(sc);
if (status & STE_ISR_HOSTERR) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ste_init_locked(sc);
}
}
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
ste_intr(void *xsc)
{
struct ste_softc *sc;
if_t ifp;
uint16_t intrs, status;
sc = xsc;
STE_LOCK(sc);
ifp = sc->ste_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
STE_UNLOCK(sc);
return;
}
#endif
/* Reading STE_ISR_ACK clears STE_IMR register. */
status = CSR_READ_2(sc, STE_ISR_ACK);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
STE_UNLOCK(sc);
return;
}
intrs = STE_INTRS;
if (status == 0xFFFF || (status & intrs) == 0)
goto done;
if (sc->ste_int_rx_act > 0) {
status &= ~STE_ISR_RX_DMADONE;
intrs &= ~STE_IMR_RX_DMADONE;
}
if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) {
ste_rxeof(sc, -1);
/*
* The controller has no ability to Rx interrupt
* moderation feature. Receiving 64 bytes frames
* from wire generates too many interrupts which in
* turn make system useless to process other useful
* things. Fortunately ST201 supports single shot
* timer so use the timer to implement Rx interrupt
* moderation in driver. This adds more register
* access but it greatly reduces number of Rx
* interrupts under high network load.
*/
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
(sc->ste_int_rx_mod != 0)) {
if ((status & STE_ISR_RX_DMADONE) != 0) {
CSR_WRITE_2(sc, STE_COUNTDOWN,
STE_TIMER_USECS(sc->ste_int_rx_mod));
intrs &= ~STE_IMR_RX_DMADONE;
sc->ste_int_rx_act = 1;
} else {
intrs |= STE_IMR_RX_DMADONE;
sc->ste_int_rx_act = 0;
}
}
}
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if ((status & STE_ISR_TX_DMADONE) != 0)
ste_txeof(sc);
if ((status & STE_ISR_TX_DONE) != 0)
ste_txeoc(sc);
if ((status & STE_ISR_STATS_OFLOW) != 0)
ste_stats_update(sc);
if ((status & STE_ISR_HOSTERR) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ste_init_locked(sc);
STE_UNLOCK(sc);
return;
}
if (!if_sendq_empty(ifp))
ste_start_locked(ifp);
done:
/* Re-enable interrupts */
CSR_WRITE_2(sc, STE_IMR, intrs);
}
STE_UNLOCK(sc);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static int
ste_rxeof(struct ste_softc *sc, int count)
{
struct mbuf *m;
if_t ifp;
struct ste_chain_onefrag *cur_rx;
uint32_t rxstat;
int total_len, rx_npkts;
ifp = sc->ste_ifp;
bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
sc->ste_cdata.ste_rx_list_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cur_rx = sc->ste_cdata.ste_rx_head;
for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++,
cur_rx = cur_rx->ste_next) {
rxstat = le32toh(cur_rx->ste_ptr->ste_status);
if ((rxstat & STE_RXSTAT_DMADONE) == 0)
break;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (count == 0)
break;
count--;
}
#endif
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
/*
* If an error occurs, update stats, clear the
* status word and leave the mbuf cluster in place:
* it should simply get re-used next time this descriptor
* comes up in the ring.
*/
if (rxstat & STE_RXSTAT_FRAME_ERR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
cur_rx->ste_ptr->ste_status = 0;
continue;
}
/* No errors; receive the packet. */
m = cur_rx->ste_mbuf;
total_len = STE_RX_BYTES(rxstat);
/*
* Try to conjure up a new mbuf cluster. If that
* fails, it means we have an out of memory condition and
* should leave the buffer in place and continue. This will
* result in a lost packet, but there's little else we
* can do in this situation.
*/
if (ste_newbuf(sc, cur_rx) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
cur_rx->ste_ptr->ste_status = 0;
continue;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = total_len;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
STE_UNLOCK(sc);
if_input(ifp, m);
STE_LOCK(sc);
}
if (rx_npkts > 0) {
sc->ste_cdata.ste_rx_head = cur_rx;
bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
sc->ste_cdata.ste_rx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
return (rx_npkts);
}
static void
ste_txeoc(struct ste_softc *sc)
{
uint16_t txstat;
if_t ifp;
STE_LOCK_ASSERT(sc);
ifp = sc->ste_ifp;
/*
* STE_TX_STATUS register implements a queue of up to 31
* transmit status byte. Writing an arbitrary value to the
* register will advance the queue to the next transmit
* status byte. This means if driver does not read
* STE_TX_STATUS register after completing sending more
* than 31 frames the controller would be stalled so driver
* should re-wake the Tx MAC. This is the most severe
* limitation of ST201 based controller.
*/
for (;;) {
txstat = CSR_READ_2(sc, STE_TX_STATUS);
if ((txstat & STE_TXSTATUS_TXDONE) == 0)
break;
if ((txstat & (STE_TXSTATUS_UNDERRUN |
STE_TXSTATUS_EXCESSCOLLS | STE_TXSTATUS_RECLAIMERR |
STE_TXSTATUS_STATSOFLOW)) != 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
#ifdef STE_SHOW_TXERRORS
device_printf(sc->ste_dev, "TX error : 0x%b\n",
txstat & 0xFF, STE_ERR_BITS);
#endif
if ((txstat & STE_TXSTATUS_UNDERRUN) != 0 &&
sc->ste_tx_thresh < STE_PACKET_SIZE) {
sc->ste_tx_thresh += STE_MIN_FRAMELEN;
if (sc->ste_tx_thresh > STE_PACKET_SIZE)
sc->ste_tx_thresh = STE_PACKET_SIZE;
device_printf(sc->ste_dev,
"TX underrun, increasing TX"
" start threshold to %d bytes\n",
sc->ste_tx_thresh);
/* Make sure to disable active DMA cycles. */
STE_SETBIT4(sc, STE_DMACTL,
STE_DMACTL_TXDMA_STALL);
ste_wait(sc);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ste_init_locked(sc);
break;
}
/* Restart Tx. */
ste_restart_tx(sc);
}
/*
* Advance to next status and ACK TxComplete
* interrupt. ST201 data sheet was wrong here, to
* get next Tx status, we have to write both
* STE_TX_STATUS and STE_TX_FRAMEID register.
* Otherwise controller returns the same status
* as well as not acknowledge Tx completion
* interrupt.
*/
CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
}
}
static void
ste_tick(void *arg)
{
struct ste_softc *sc;
struct mii_data *mii;
sc = (struct ste_softc *)arg;
STE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->ste_miibus);
mii_tick(mii);
/*
* ukphy(4) does not seem to generate CB that reports
* resolved link state so if we know we lost a link,
* explicitly check the link state.
*/
if ((sc->ste_flags & STE_FLAG_LINK) == 0)
ste_miibus_statchg(sc->ste_dev);
/*
* Because we are not generating Tx completion
* interrupt for every frame, reclaim transmitted
* buffers here.
*/
ste_txeof(sc);
ste_txeoc(sc);
ste_stats_update(sc);
ste_watchdog(sc);
callout_reset(&sc->ste_callout, hz, ste_tick, sc);
}
static void
ste_txeof(struct ste_softc *sc)
{
if_t ifp;
struct ste_chain *cur_tx;
uint32_t txstat;
int idx;
STE_LOCK_ASSERT(sc);
ifp = sc->ste_ifp;
idx = sc->ste_cdata.ste_tx_cons;
if (idx == sc->ste_cdata.ste_tx_prod)
return;
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
sc->ste_cdata.ste_tx_list_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
while (idx != sc->ste_cdata.ste_tx_prod) {
cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
txstat = le32toh(cur_tx->ste_ptr->ste_ctl);
if ((txstat & STE_TXCTL_DMADONE) == 0)
break;
bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map);
KASSERT(cur_tx->ste_mbuf != NULL,
("%s: freeing NULL mbuf!\n", __func__));
m_freem(cur_tx->ste_mbuf);
cur_tx->ste_mbuf = NULL;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
sc->ste_cdata.ste_tx_cnt--;
STE_INC(idx, STE_TX_LIST_CNT);
}
sc->ste_cdata.ste_tx_cons = idx;
if (sc->ste_cdata.ste_tx_cnt == 0)
sc->ste_timer = 0;
}
static void
ste_stats_clear(struct ste_softc *sc)
{
STE_LOCK_ASSERT(sc);
/* Rx stats. */
CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO);
CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI);
CSR_READ_2(sc, STE_STAT_RX_FRAMES);
CSR_READ_1(sc, STE_STAT_RX_BCAST);
CSR_READ_1(sc, STE_STAT_RX_MCAST);
CSR_READ_1(sc, STE_STAT_RX_LOST);
/* Tx stats. */
CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO);
CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI);
CSR_READ_2(sc, STE_STAT_TX_FRAMES);
CSR_READ_1(sc, STE_STAT_TX_BCAST);
CSR_READ_1(sc, STE_STAT_TX_MCAST);
CSR_READ_1(sc, STE_STAT_CARRIER_ERR);
CSR_READ_1(sc, STE_STAT_SINGLE_COLLS);
CSR_READ_1(sc, STE_STAT_MULTI_COLLS);
CSR_READ_1(sc, STE_STAT_LATE_COLLS);
CSR_READ_1(sc, STE_STAT_TX_DEFER);
CSR_READ_1(sc, STE_STAT_TX_EXDEFER);
CSR_READ_1(sc, STE_STAT_TX_ABORT);
}
static void
ste_stats_update(struct ste_softc *sc)
{
if_t ifp;
struct ste_hw_stats *stats;
uint32_t val;
STE_LOCK_ASSERT(sc);
ifp = sc->ste_ifp;
stats = &sc->ste_stats;
/* Rx stats. */
val = (uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO) |
((uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI)) << 16;
val &= 0x000FFFFF;
stats->rx_bytes += val;
stats->rx_frames += CSR_READ_2(sc, STE_STAT_RX_FRAMES);
stats->rx_bcast_frames += CSR_READ_1(sc, STE_STAT_RX_BCAST);
stats->rx_mcast_frames += CSR_READ_1(sc, STE_STAT_RX_MCAST);
stats->rx_lost_frames += CSR_READ_1(sc, STE_STAT_RX_LOST);
/* Tx stats. */
val = (uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO) |
((uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI)) << 16;
val &= 0x000FFFFF;
stats->tx_bytes += val;
stats->tx_frames += CSR_READ_2(sc, STE_STAT_TX_FRAMES);
stats->tx_bcast_frames += CSR_READ_1(sc, STE_STAT_TX_BCAST);
stats->tx_mcast_frames += CSR_READ_1(sc, STE_STAT_TX_MCAST);
stats->tx_carrsense_errs += CSR_READ_1(sc, STE_STAT_CARRIER_ERR);
val = CSR_READ_1(sc, STE_STAT_SINGLE_COLLS);
stats->tx_single_colls += val;
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, val);
val = CSR_READ_1(sc, STE_STAT_MULTI_COLLS);
stats->tx_multi_colls += val;
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, val);
val += CSR_READ_1(sc, STE_STAT_LATE_COLLS);
stats->tx_late_colls += val;
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, val);
stats->tx_frames_defered += CSR_READ_1(sc, STE_STAT_TX_DEFER);
stats->tx_excess_defers += CSR_READ_1(sc, STE_STAT_TX_EXDEFER);
stats->tx_abort += CSR_READ_1(sc, STE_STAT_TX_ABORT);
}
/*
* Probe for a Sundance ST201 chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
ste_probe(device_t dev)
{
const struct ste_type *t;
t = ste_devs;
while (t->ste_name != NULL) {
if ((pci_get_vendor(dev) == t->ste_vid) &&
(pci_get_device(dev) == t->ste_did)) {
device_set_desc(dev, t->ste_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
ste_attach(device_t dev)
{
struct ste_softc *sc;
if_t ifp;
uint16_t eaddr[ETHER_ADDR_LEN / 2];
int error = 0, phy, pmc, prefer_iomap, rid;
sc = device_get_softc(dev);
sc->ste_dev = dev;
/*
* Only use one PHY since this chip reports multiple
* Note on the DFE-550 the PHY is at 1 on the DFE-580
* it is at 0 & 1. It is rev 0x12.
*/
if (pci_get_vendor(dev) == DL_VENDORID &&
pci_get_device(dev) == DL_DEVICEID_DL10050 &&
pci_get_revid(dev) == 0x12 )
sc->ste_flags |= STE_FLAG_ONE_PHY;
mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
/*
* Prefer memory space register mapping over IO space but use
* IO space for a device that is known to have issues on memory
* mapping.
*/
prefer_iomap = 0;
if (pci_get_device(dev) == ST_DEVICEID_ST201_1)
prefer_iomap = 1;
else
resource_int_value(device_get_name(sc->ste_dev),
device_get_unit(sc->ste_dev), "prefer_iomap",
&prefer_iomap);
if (prefer_iomap == 0) {
sc->ste_res_id = PCIR_BAR(1);
sc->ste_res_type = SYS_RES_MEMORY;
sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type,
&sc->ste_res_id, RF_ACTIVE);
}
if (prefer_iomap || sc->ste_res == NULL) {
sc->ste_res_id = PCIR_BAR(0);
sc->ste_res_type = SYS_RES_IOPORT;
sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type,
&sc->ste_res_id, RF_ACTIVE);
}
if (sc->ste_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
/* Allocate interrupt */
rid = 0;
sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->ste_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0);
/* Reset the adapter. */
ste_reset(sc);
/*
* Get station address from the EEPROM.
*/
if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) {
device_printf(dev, "failed to read station address\n");
error = ENXIO;
goto fail;
}
ste_sysctl_node(sc);
if ((error = ste_dma_alloc(sc)) != 0)
goto fail;
ifp = sc->ste_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/* Do MII setup. */
phy = MII_PHY_ANY;
if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0)
phy = 0;
error = mii_attach(dev, &sc->ste_miibus, ifp, ste_ifmedia_upd,
ste_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, ste_ioctl);
if_setstartfn(ifp, ste_start);
if_setinitfn(ifp, ste_init);
if_setsendqlen(ifp, STE_TX_LIST_CNT - 1);
if_setsendqready(ifp);
sc->ste_tx_thresh = STE_TXSTART_THRESH;
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, (uint8_t *)eaddr);
/*
* Tell the upper layer(s) we support long frames.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, ste_intr, sc, &sc->ste_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
ste_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
ste_detach(device_t dev)
{
struct ste_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized"));
ifp = sc->ste_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
STE_LOCK(sc);
ste_stop(sc);
STE_UNLOCK(sc);
callout_drain(&sc->ste_callout);
}
if (sc->ste_miibus)
device_delete_child(dev, sc->ste_miibus);
bus_generic_detach(dev);
if (sc->ste_intrhand)
bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand);
if (sc->ste_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq);
if (sc->ste_res)
bus_release_resource(dev, sc->ste_res_type, sc->ste_res_id,
sc->ste_res);
if (ifp)
if_free(ifp);
ste_dma_free(sc);
mtx_destroy(&sc->ste_mtx);
return (0);
}
struct ste_dmamap_arg {
bus_addr_t ste_busaddr;
};
static void
ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct ste_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct ste_dmamap_arg *)arg;
ctx->ste_busaddr = segs[0].ds_addr;
}
static int
ste_dma_alloc(struct ste_softc *sc)
{
struct ste_chain *txc;
struct ste_chain_onefrag *rxc;
struct ste_dmamap_arg ctx;
int error, i;
/* Create parent DMA tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->ste_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ste_cdata.ste_parent_tag);
if (error != 0) {
device_printf(sc->ste_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* Create DMA tag for Tx descriptor list. */
error = bus_dma_tag_create(
sc->ste_cdata.ste_parent_tag, /* parent */
STE_DESC_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
STE_TX_LIST_SZ, /* maxsize */
1, /* nsegments */
STE_TX_LIST_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ste_cdata.ste_tx_list_tag);
if (error != 0) {
device_printf(sc->ste_dev,
"could not create Tx list DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx descriptor list. */
error = bus_dma_tag_create(
sc->ste_cdata.ste_parent_tag, /* parent */
STE_DESC_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
STE_RX_LIST_SZ, /* maxsize */
1, /* nsegments */
STE_RX_LIST_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ste_cdata.ste_rx_list_tag);
if (error != 0) {
device_printf(sc->ste_dev,
"could not create Rx list DMA tag.\n");
goto fail;
}
/* Create DMA tag for Tx buffers. */
error = bus_dma_tag_create(
sc->ste_cdata.ste_parent_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES * STE_MAXFRAGS, /* maxsize */
STE_MAXFRAGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ste_cdata.ste_tx_tag);
if (error != 0) {
device_printf(sc->ste_dev, "could not create Tx DMA tag.\n");
goto fail;
}
/* Create DMA tag for Rx buffers. */
error = bus_dma_tag_create(
sc->ste_cdata.ste_parent_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->ste_cdata.ste_rx_tag);
if (error != 0) {
device_printf(sc->ste_dev, "could not create Rx DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for Tx list. */
error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag,
(void **)&sc->ste_ldata.ste_tx_list,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->ste_cdata.ste_tx_list_map);
if (error != 0) {
device_printf(sc->ste_dev,
"could not allocate DMA'able memory for Tx list.\n");
goto fail;
}
ctx.ste_busaddr = 0;
error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag,
sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list,
STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.ste_busaddr == 0) {
device_printf(sc->ste_dev,
"could not load DMA'able memory for Tx list.\n");
goto fail;
}
sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx list. */
error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag,
(void **)&sc->ste_ldata.ste_rx_list,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->ste_cdata.ste_rx_list_map);
if (error != 0) {
device_printf(sc->ste_dev,
"could not allocate DMA'able memory for Rx list.\n");
goto fail;
}
ctx.ste_busaddr = 0;
error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag,
sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list,
STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.ste_busaddr == 0) {
device_printf(sc->ste_dev,
"could not load DMA'able memory for Rx list.\n");
goto fail;
}
sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr;
/* Create DMA maps for Tx buffers. */
for (i = 0; i < STE_TX_LIST_CNT; i++) {
txc = &sc->ste_cdata.ste_tx_chain[i];
txc->ste_ptr = NULL;
txc->ste_mbuf = NULL;
txc->ste_next = NULL;
txc->ste_phys = 0;
txc->ste_map = NULL;
error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0,
&txc->ste_map);
if (error != 0) {
device_printf(sc->ste_dev,
"could not create Tx dmamap.\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
&sc->ste_cdata.ste_rx_sparemap)) != 0) {
device_printf(sc->ste_dev,
"could not create spare Rx dmamap.\n");
goto fail;
}
for (i = 0; i < STE_RX_LIST_CNT; i++) {
rxc = &sc->ste_cdata.ste_rx_chain[i];
rxc->ste_ptr = NULL;
rxc->ste_mbuf = NULL;
rxc->ste_next = NULL;
rxc->ste_map = NULL;
error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
&rxc->ste_map);
if (error != 0) {
device_printf(sc->ste_dev,
"could not create Rx dmamap.\n");
goto fail;
}
}
fail:
return (error);
}
static void
ste_dma_free(struct ste_softc *sc)
{
struct ste_chain *txc;
struct ste_chain_onefrag *rxc;
int i;
/* Tx buffers. */
if (sc->ste_cdata.ste_tx_tag != NULL) {
for (i = 0; i < STE_TX_LIST_CNT; i++) {
txc = &sc->ste_cdata.ste_tx_chain[i];
if (txc->ste_map != NULL) {
bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag,
txc->ste_map);
txc->ste_map = NULL;
}
}
bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag);
sc->ste_cdata.ste_tx_tag = NULL;
}
/* Rx buffers. */
if (sc->ste_cdata.ste_rx_tag != NULL) {
for (i = 0; i < STE_RX_LIST_CNT; i++) {
rxc = &sc->ste_cdata.ste_rx_chain[i];
if (rxc->ste_map != NULL) {
bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
rxc->ste_map);
rxc->ste_map = NULL;
}
}
if (sc->ste_cdata.ste_rx_sparemap != NULL) {
bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
sc->ste_cdata.ste_rx_sparemap);
sc->ste_cdata.ste_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag);
sc->ste_cdata.ste_rx_tag = NULL;
}
/* Tx descriptor list. */
if (sc->ste_cdata.ste_tx_list_tag != NULL) {
if (sc->ste_ldata.ste_tx_list_paddr != 0)
bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag,
sc->ste_cdata.ste_tx_list_map);
if (sc->ste_ldata.ste_tx_list != NULL)
bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag,
sc->ste_ldata.ste_tx_list,
sc->ste_cdata.ste_tx_list_map);
sc->ste_ldata.ste_tx_list = NULL;
sc->ste_ldata.ste_tx_list_paddr = 0;
bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag);
sc->ste_cdata.ste_tx_list_tag = NULL;
}
/* Rx descriptor list. */
if (sc->ste_cdata.ste_rx_list_tag != NULL) {
if (sc->ste_ldata.ste_rx_list_paddr != 0)
bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag,
sc->ste_cdata.ste_rx_list_map);
if (sc->ste_ldata.ste_rx_list != NULL)
bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag,
sc->ste_ldata.ste_rx_list,
sc->ste_cdata.ste_rx_list_map);
sc->ste_ldata.ste_rx_list = NULL;
sc->ste_ldata.ste_rx_list_paddr = 0;
bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag);
sc->ste_cdata.ste_rx_list_tag = NULL;
}
if (sc->ste_cdata.ste_parent_tag != NULL) {
bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag);
sc->ste_cdata.ste_parent_tag = NULL;
}
}
static int
ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc)
{
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int error, nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, ETHER_ALIGN);
if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag,
sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) {
m_freem(m);
return (error);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rxc->ste_mbuf != NULL) {
bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map);
}
map = rxc->ste_map;
rxc->ste_map = sc->ste_cdata.ste_rx_sparemap;
sc->ste_cdata.ste_rx_sparemap = map;
bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
BUS_DMASYNC_PREREAD);
rxc->ste_mbuf = m;
rxc->ste_ptr->ste_status = 0;
rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr);
rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len |
STE_FRAG_LAST);
return (0);
}
static int
ste_init_rx_list(struct ste_softc *sc)
{
struct ste_chain_data *cd;
struct ste_list_data *ld;
int error, i;
sc->ste_int_rx_act = 0;
cd = &sc->ste_cdata;
ld = &sc->ste_ldata;
bzero(ld->ste_rx_list, STE_RX_LIST_SZ);
for (i = 0; i < STE_RX_LIST_CNT; i++) {
cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
error = ste_newbuf(sc, &cd->ste_rx_chain[i]);
if (error != 0)
return (error);
if (i == (STE_RX_LIST_CNT - 1)) {
cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0];
ld->ste_rx_list[i].ste_next =
htole32(ld->ste_rx_list_paddr +
(sizeof(struct ste_desc_onefrag) * 0));
} else {
cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1];
ld->ste_rx_list[i].ste_next =
htole32(ld->ste_rx_list_paddr +
(sizeof(struct ste_desc_onefrag) * (i + 1)));
}
}
cd->ste_rx_head = &cd->ste_rx_chain[0];
bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
sc->ste_cdata.ste_rx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
ste_init_tx_list(struct ste_softc *sc)
{
struct ste_chain_data *cd;
struct ste_list_data *ld;
int i;
cd = &sc->ste_cdata;
ld = &sc->ste_ldata;
bzero(ld->ste_tx_list, STE_TX_LIST_SZ);
for (i = 0; i < STE_TX_LIST_CNT; i++) {
cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
cd->ste_tx_chain[i].ste_mbuf = NULL;
if (i == (STE_TX_LIST_CNT - 1)) {
cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0];
cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
ld->ste_tx_list_paddr +
(sizeof(struct ste_desc) * 0)));
} else {
cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1];
cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
ld->ste_tx_list_paddr +
(sizeof(struct ste_desc) * (i + 1))));
}
}
cd->ste_last_tx = NULL;
cd->ste_tx_prod = 0;
cd->ste_tx_cons = 0;
cd->ste_tx_cnt = 0;
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
sc->ste_cdata.ste_tx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
ste_init(void *xsc)
{
struct ste_softc *sc;
sc = xsc;
STE_LOCK(sc);
ste_init_locked(sc);
STE_UNLOCK(sc);
}
static void
ste_init_locked(struct ste_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint8_t val;
int i;
STE_LOCK_ASSERT(sc);
ifp = sc->ste_ifp;
mii = device_get_softc(sc->ste_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
ste_stop(sc);
/* Reset the chip to a known state. */
ste_reset(sc);
/* Init our MAC address */
for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
CSR_WRITE_2(sc, STE_PAR0 + i,
((if_getlladdr(sc->ste_ifp)[i] & 0xff) |
if_getlladdr(sc->ste_ifp)[i + 1] << 8));
}
/* Init RX list */
if (ste_init_rx_list(sc) != 0) {
device_printf(sc->ste_dev,
"initialization failed: no memory for RX buffers\n");
ste_stop(sc);
return;
}
/* Set RX polling interval */
CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
/* Init TX descriptors */
ste_init_tx_list(sc);
/* Clear and disable WOL. */
val = CSR_READ_1(sc, STE_WAKE_EVENT);
val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB |
STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB);
CSR_WRITE_1(sc, STE_WAKE_EVENT, val);
/* Set the TX freethresh value */
CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8);
/* Set the TX start threshold for best performance. */
CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
/* Set the TX reclaim threshold. */
CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4));
/* Accept VLAN length packets */
CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
/* Set up the RX filter. */
ste_rxfilter(sc);
/* Load the address of the RX list. */
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
ste_wait(sc);
CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr));
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
/* Set TX polling interval(defer until we TX first packet). */
CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
/* Load address of the TX list */
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
ste_wait(sc);
CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
ste_wait(sc);
/* Select 3.2us timer. */
STE_CLRBIT4(sc, STE_DMACTL, STE_DMACTL_COUNTDOWN_SPEED |
STE_DMACTL_COUNTDOWN_MODE);
/* Enable receiver and transmitter */
CSR_WRITE_2(sc, STE_MACCTL0, 0);
CSR_WRITE_2(sc, STE_MACCTL1, 0);
STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
/* Enable stats counters. */
STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
/* Clear stats counters. */
ste_stats_clear(sc);
CSR_WRITE_2(sc, STE_COUNTDOWN, 0);
CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
if (if_getcapenable(ifp) & IFCAP_POLLING)
CSR_WRITE_2(sc, STE_IMR, 0);
else
#endif
/* Enable interrupts. */
CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
sc->ste_flags &= ~STE_FLAG_LINK;
/* Switch to the current media. */
mii_mediachg(mii);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->ste_callout, hz, ste_tick, sc);
}
static void
ste_stop(struct ste_softc *sc)
{
if_t ifp;
struct ste_chain_onefrag *cur_rx;
struct ste_chain *cur_tx;
uint32_t val;
int i;
STE_LOCK_ASSERT(sc);
ifp = sc->ste_ifp;
callout_stop(&sc->ste_callout);
sc->ste_timer = 0;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING|IFF_DRV_OACTIVE));
CSR_WRITE_2(sc, STE_IMR, 0);
CSR_WRITE_2(sc, STE_COUNTDOWN, 0);
/* Stop pending DMA. */
val = CSR_READ_4(sc, STE_DMACTL);
val |= STE_DMACTL_TXDMA_STALL | STE_DMACTL_RXDMA_STALL;
CSR_WRITE_4(sc, STE_DMACTL, val);
ste_wait(sc);
/* Disable auto-polling. */
CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 0);
CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
/* Nullify DMA address to stop any further DMA. */
CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 0);
CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
/* Stop TX/RX MAC. */
val = CSR_READ_2(sc, STE_MACCTL1);
val |= STE_MACCTL1_TX_DISABLE | STE_MACCTL1_RX_DISABLE |
STE_MACCTL1_STATS_DISABLE;
CSR_WRITE_2(sc, STE_MACCTL1, val);
for (i = 0; i < STE_TIMEOUT; i++) {
DELAY(10);
if ((CSR_READ_2(sc, STE_MACCTL1) & (STE_MACCTL1_TX_DISABLE |
STE_MACCTL1_RX_DISABLE | STE_MACCTL1_STATS_DISABLE)) == 0)
break;
}
if (i == STE_TIMEOUT)
device_printf(sc->ste_dev, "Stopping MAC timed out\n");
/* Acknowledge any pending interrupts. */
CSR_READ_2(sc, STE_ISR_ACK);
ste_stats_update(sc);
for (i = 0; i < STE_RX_LIST_CNT; i++) {
cur_rx = &sc->ste_cdata.ste_rx_chain[i];
if (cur_rx->ste_mbuf != NULL) {
bus_dmamap_sync(sc->ste_cdata.ste_rx_tag,
cur_rx->ste_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ste_cdata.ste_rx_tag,
cur_rx->ste_map);
m_freem(cur_rx->ste_mbuf);
cur_rx->ste_mbuf = NULL;
}
}
for (i = 0; i < STE_TX_LIST_CNT; i++) {
cur_tx = &sc->ste_cdata.ste_tx_chain[i];
if (cur_tx->ste_mbuf != NULL) {
bus_dmamap_sync(sc->ste_cdata.ste_tx_tag,
cur_tx->ste_map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ste_cdata.ste_tx_tag,
cur_tx->ste_map);
m_freem(cur_tx->ste_mbuf);
cur_tx->ste_mbuf = NULL;
}
}
}
static void
ste_reset(struct ste_softc *sc)
{
uint32_t ctl;
int i;
ctl = CSR_READ_4(sc, STE_ASICCTL);
ctl |= STE_ASICCTL_GLOBAL_RESET | STE_ASICCTL_RX_RESET |
STE_ASICCTL_TX_RESET | STE_ASICCTL_DMA_RESET |
STE_ASICCTL_FIFO_RESET | STE_ASICCTL_NETWORK_RESET |
STE_ASICCTL_AUTOINIT_RESET |STE_ASICCTL_HOST_RESET |
STE_ASICCTL_EXTRESET_RESET;
CSR_WRITE_4(sc, STE_ASICCTL, ctl);
CSR_READ_4(sc, STE_ASICCTL);
/*
* Due to the need of accessing EEPROM controller can take
* up to 1ms to complete the global reset.
*/
DELAY(1000);
for (i = 0; i < STE_TIMEOUT; i++) {
if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
break;
DELAY(10);
}
if (i == STE_TIMEOUT)
device_printf(sc->ste_dev, "global reset never completed\n");
}
static void
ste_restart_tx(struct ste_softc *sc)
{
uint16_t mac;
int i;
for (i = 0; i < STE_TIMEOUT; i++) {
mac = CSR_READ_2(sc, STE_MACCTL1);
mac |= STE_MACCTL1_TX_ENABLE;
CSR_WRITE_2(sc, STE_MACCTL1, mac);
mac = CSR_READ_2(sc, STE_MACCTL1);
if ((mac & STE_MACCTL1_TX_ENABLED) != 0)
break;
DELAY(10);
}
if (i == STE_TIMEOUT)
device_printf(sc->ste_dev, "starting Tx failed");
}
static int
ste_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct ste_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error = 0, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFFLAGS:
STE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->ste_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
ste_rxfilter(sc);
else
ste_init_locked(sc);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
ste_stop(sc);
sc->ste_if_flags = if_getflags(ifp);
STE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
STE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
ste_rxfilter(sc);
STE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->ste_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
STE_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if ((mask & IFCAP_POLLING) != 0 &&
(IFCAP_POLLING & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_POLLING);
if ((IFCAP_POLLING & if_getcapenable(ifp)) != 0) {
error = ether_poll_register(ste_poll, ifp);
if (error != 0) {
STE_UNLOCK(sc);
break;
}
/* Disable interrupts. */
CSR_WRITE_2(sc, STE_IMR, 0);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
}
}
#endif /* DEVICE_POLLING */
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
STE_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc)
{
struct ste_frag *frag;
struct mbuf *m;
struct ste_desc *desc;
bus_dma_segment_t txsegs[STE_MAXFRAGS];
int error, i, nsegs;
STE_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
txc->ste_map, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, STE_MAXFRAGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
txc->ste_map, *m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map,
BUS_DMASYNC_PREWRITE);
desc = txc->ste_ptr;
for (i = 0; i < nsegs; i++) {
frag = &desc->ste_frags[i];
frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr));
frag->ste_len = htole32(txsegs[i].ds_len);
}
desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST);
/*
* Because we use Tx polling we can't chain multiple
* Tx descriptors here. Otherwise we race with controller.
*/
desc->ste_next = 0;
if ((sc->ste_cdata.ste_tx_prod % STE_TX_INTR_FRAMES) == 0)
desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS |
STE_TXCTL_DMAINTR);
else
desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS);
txc->ste_mbuf = *m_head;
STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT);
sc->ste_cdata.ste_tx_cnt++;
return (0);
}
static void
ste_start(if_t ifp)
{
struct ste_softc *sc;
sc = if_getsoftc(ifp);
STE_LOCK(sc);
ste_start_locked(ifp);
STE_UNLOCK(sc);
}
static void
ste_start_locked(if_t ifp)
{
struct ste_softc *sc;
struct ste_chain *cur_tx;
struct mbuf *m_head = NULL;
int enq;
sc = if_getsoftc(ifp);
STE_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp);) {
if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) {
/*
* Controller may have cached copy of the last used
* next ptr so we have to reserve one TFD to avoid
* TFD overruns.
*/
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod];
if (ste_encap(sc, &m_head, cur_tx) != 0) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
break;
}
if (sc->ste_cdata.ste_last_tx == NULL) {
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
sc->ste_cdata.ste_tx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
ste_wait(sc);
CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr));
CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
ste_wait(sc);
} else {
sc->ste_cdata.ste_last_tx->ste_ptr->ste_next =
sc->ste_cdata.ste_last_tx->ste_phys;
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
sc->ste_cdata.ste_tx_list_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
sc->ste_cdata.ste_last_tx = cur_tx;
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, m_head);
}
if (enq > 0)
sc->ste_timer = STE_TX_TIMEOUT;
}
static void
ste_watchdog(struct ste_softc *sc)
{
if_t ifp;
ifp = sc->ste_ifp;
STE_LOCK_ASSERT(sc);
if (sc->ste_timer == 0 || --sc->ste_timer)
return;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_printf(ifp, "watchdog timeout\n");
ste_txeof(sc);
ste_txeoc(sc);
ste_rxeof(sc, -1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ste_init_locked(sc);
if (!if_sendq_empty(ifp))
ste_start_locked(ifp);
}
static int
ste_shutdown(device_t dev)
{
return (ste_suspend(dev));
}
static int
ste_suspend(device_t dev)
{
struct ste_softc *sc;
sc = device_get_softc(dev);
STE_LOCK(sc);
ste_stop(sc);
ste_setwol(sc);
STE_UNLOCK(sc);
return (0);
}
static int
ste_resume(device_t dev)
{
struct ste_softc *sc;
if_t ifp;
int pmc;
uint16_t pmstat;
sc = device_get_softc(dev);
STE_LOCK(sc);
if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) == 0) {
/* Disable PME and clear PME status. */
pmstat = pci_read_config(sc->ste_dev,
pmc + PCIR_POWER_STATUS, 2);
if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->ste_dev,
pmc + PCIR_POWER_STATUS, pmstat, 2);
}
}
ifp = sc->ste_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ste_init_locked(sc);
}
STE_UNLOCK(sc);
return (0);
}
#define STE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
#define STE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
static void
ste_sysctl_node(struct ste_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct ste_hw_stats *stats;
stats = &sc->ste_stats;
ctx = device_get_sysctl_ctx(sc->ste_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ste_dev));
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_rx_mod",
CTLFLAG_RW, &sc->ste_int_rx_mod, 0, "ste RX interrupt moderation");
/* Pull in device tunables. */
sc->ste_int_rx_mod = STE_IM_RX_TIMER_DEFAULT;
resource_int_value(device_get_name(sc->ste_dev),
device_get_unit(sc->ste_dev), "int_rx_mod", &sc->ste_int_rx_mod);
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "STE statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
child = SYSCTL_CHILDREN(tree);
STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
&stats->rx_bytes, "Good octets");
STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->rx_frames, "Good frames");
STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
&stats->rx_bcast_frames, "Good broadcast frames");
STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
&stats->rx_mcast_frames, "Good multicast frames");
STE_SYSCTL_STAT_ADD32(ctx, child, "lost_frames",
&stats->rx_lost_frames, "Lost frames");
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
&stats->tx_bytes, "Good octets");
STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->tx_frames, "Good frames");
STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
&stats->tx_bcast_frames, "Good broadcast frames");
STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
&stats->tx_mcast_frames, "Good multicast frames");
STE_SYSCTL_STAT_ADD32(ctx, child, "carrier_errs",
&stats->tx_carrsense_errs, "Carrier sense errors");
STE_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
&stats->tx_single_colls, "Single collisions");
STE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
&stats->tx_multi_colls, "Multiple collisions");
STE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
&stats->tx_late_colls, "Late collisions");
STE_SYSCTL_STAT_ADD32(ctx, child, "defers",
&stats->tx_frames_defered, "Frames with deferrals");
STE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
&stats->tx_excess_defers, "Frames with excessive derferrals");
STE_SYSCTL_STAT_ADD32(ctx, child, "abort",
&stats->tx_abort, "Aborted frames due to Excessive collisions");
}
#undef STE_SYSCTL_STAT_ADD32
#undef STE_SYSCTL_STAT_ADD64
static void
ste_setwol(struct ste_softc *sc)
{
if_t ifp;
uint16_t pmstat;
uint8_t val;
int pmc;
STE_LOCK_ASSERT(sc);
if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) != 0) {
/* Disable WOL. */
CSR_READ_1(sc, STE_WAKE_EVENT);
CSR_WRITE_1(sc, STE_WAKE_EVENT, 0);
return;
}
ifp = sc->ste_ifp;
val = CSR_READ_1(sc, STE_WAKE_EVENT);
val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB |
STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB;
CSR_WRITE_1(sc, STE_WAKE_EVENT, val);
/* Request PME. */
pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
diff --git a/sys/dev/stge/if_stge.c b/sys/dev/stge/if_stge.c
index 170d9e4da94c..97c2b751d476 100644
--- a/sys/dev/stge/if_stge.c
+++ b/sys/dev/stge/if_stge.c
@@ -1,2598 +1,2592 @@
/* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Device driver for the Sundance Tech. TC9021 10/100/1000
* Ethernet controller.
*/
#include <sys/cdefs.h>
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/stge/if_stgereg.h>
#define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
MODULE_DEPEND(stge, pci, 1, 1, 1);
MODULE_DEPEND(stge, ether, 1, 1, 1);
MODULE_DEPEND(stge, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
/*
* Devices supported by this driver.
*/
static const struct stge_product {
uint16_t stge_vendorid;
uint16_t stge_deviceid;
const char *stge_name;
} stge_products[] = {
{ VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023,
"Sundance ST-1023 Gigabit Ethernet" },
{ VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021,
"Sundance ST-2021 Gigabit Ethernet" },
{ VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021,
"Tamarack TC9021 Gigabit Ethernet" },
{ VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT,
"Tamarack TC9021 Gigabit Ethernet" },
/*
* The Sundance sample boards use the Sundance vendor ID,
* but the Tamarack product ID.
*/
{ VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021,
"Sundance TC9021 Gigabit Ethernet" },
{ VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT,
"Sundance TC9021 Gigabit Ethernet" },
{ VENDOR_DLINK, DEVICEID_DLINK_DL4000,
"D-Link DL-4000 Gigabit Ethernet" },
{ VENDOR_ANTARES, DEVICEID_ANTARES_TC9021,
"Antares Gigabit Ethernet" }
};
static int stge_probe(device_t);
static int stge_attach(device_t);
static int stge_detach(device_t);
static int stge_shutdown(device_t);
static int stge_suspend(device_t);
static int stge_resume(device_t);
static int stge_encap(struct stge_softc *, struct mbuf **);
static void stge_start(if_t);
static void stge_start_locked(if_t);
static void stge_watchdog(struct stge_softc *);
static int stge_ioctl(if_t, u_long, caddr_t);
static void stge_init(void *);
static void stge_init_locked(struct stge_softc *);
static void stge_vlan_setup(struct stge_softc *);
static void stge_stop(struct stge_softc *);
static void stge_start_tx(struct stge_softc *);
static void stge_start_rx(struct stge_softc *);
static void stge_stop_tx(struct stge_softc *);
static void stge_stop_rx(struct stge_softc *);
static void stge_reset(struct stge_softc *, uint32_t);
static int stge_eeprom_wait(struct stge_softc *);
static void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
static void stge_tick(void *);
static void stge_stats_update(struct stge_softc *);
static void stge_set_filter(struct stge_softc *);
static void stge_set_multi(struct stge_softc *);
static void stge_link_task(void *, int);
static void stge_intr(void *);
static __inline int stge_tx_error(struct stge_softc *);
static void stge_txeof(struct stge_softc *);
static int stge_rxeof(struct stge_softc *);
static __inline void stge_discard_rxbuf(struct stge_softc *, int);
static int stge_newbuf(struct stge_softc *, int);
#ifndef __NO_STRICT_ALIGNMENT
static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
#endif
static int stge_miibus_readreg(device_t, int, int);
static int stge_miibus_writereg(device_t, int, int, int);
static void stge_miibus_statchg(device_t);
static int stge_mediachange(if_t);
static void stge_mediastatus(if_t, struct ifmediareq *);
static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int stge_dma_alloc(struct stge_softc *);
static void stge_dma_free(struct stge_softc *);
static void stge_dma_wait(struct stge_softc *);
static void stge_init_tx_ring(struct stge_softc *);
static int stge_init_rx_ring(struct stge_softc *);
#ifdef DEVICE_POLLING
static int stge_poll(if_t, enum poll_cmd, int);
#endif
static void stge_setwol(struct stge_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
/*
* MII bit-bang glue
*/
static uint32_t stge_mii_bitbang_read(device_t);
static void stge_mii_bitbang_write(device_t, uint32_t);
static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
stge_mii_bitbang_read,
stge_mii_bitbang_write,
{
PC_MgmtData, /* MII_BIT_MDO */
PC_MgmtData, /* MII_BIT_MDI */
PC_MgmtClk, /* MII_BIT_MDC */
PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
0, /* MII_BIT_DIR_PHY_HOST */
}
};
static device_method_t stge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, stge_probe),
DEVMETHOD(device_attach, stge_attach),
DEVMETHOD(device_detach, stge_detach),
DEVMETHOD(device_shutdown, stge_shutdown),
DEVMETHOD(device_suspend, stge_suspend),
DEVMETHOD(device_resume, stge_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, stge_miibus_readreg),
DEVMETHOD(miibus_writereg, stge_miibus_writereg),
DEVMETHOD(miibus_statchg, stge_miibus_statchg),
DEVMETHOD_END
};
static driver_t stge_driver = {
"stge",
stge_methods,
sizeof(struct stge_softc)
};
DRIVER_MODULE(stge, pci, stge_driver, 0, 0);
DRIVER_MODULE(miibus, stge, miibus_driver, 0, 0);
static struct resource_spec stge_res_spec_io[] = {
{ SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec stge_res_spec_mem[] = {
{ SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
/*
* stge_mii_bitbang_read: [mii bit-bang interface function]
*
* Read the MII serial port for the MII bit-bang module.
*/
static uint32_t
stge_mii_bitbang_read(device_t dev)
{
struct stge_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
val = CSR_READ_1(sc, STGE_PhyCtrl);
CSR_BARRIER(sc, STGE_PhyCtrl, 1,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
/*
* stge_mii_bitbang_write: [mii big-bang interface function]
*
* Write the MII serial port for the MII bit-bang module.
*/
static void
stge_mii_bitbang_write(device_t dev, uint32_t val)
{
struct stge_softc *sc;
sc = device_get_softc(dev);
CSR_WRITE_1(sc, STGE_PhyCtrl, val);
CSR_BARRIER(sc, STGE_PhyCtrl, 1,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/*
* sc_miibus_readreg: [mii interface function]
*
* Read a PHY register on the MII of the TC9021.
*/
static int
stge_miibus_readreg(device_t dev, int phy, int reg)
{
struct stge_softc *sc;
int error, val;
sc = device_get_softc(dev);
if (reg == STGE_PhyCtrl) {
/* XXX allow ip1000phy read STGE_PhyCtrl register. */
STGE_MII_LOCK(sc);
error = CSR_READ_1(sc, STGE_PhyCtrl);
STGE_MII_UNLOCK(sc);
return (error);
}
STGE_MII_LOCK(sc);
val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
STGE_MII_UNLOCK(sc);
return (val);
}
/*
* stge_miibus_writereg: [mii interface function]
*
* Write a PHY register on the MII of the TC9021.
*/
static int
stge_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct stge_softc *sc;
sc = device_get_softc(dev);
STGE_MII_LOCK(sc);
mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
STGE_MII_UNLOCK(sc);
return (0);
}
/*
* stge_miibus_statchg: [mii interface function]
*
* Callback from MII layer when media changes.
*/
static void
stge_miibus_statchg(device_t dev)
{
struct stge_softc *sc;
sc = device_get_softc(dev);
taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
}
/*
* stge_mediastatus: [ifmedia interface function]
*
* Get the current interface media status.
*/
static void
stge_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct stge_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->sc_miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
}
/*
* stge_mediachange: [ifmedia interface function]
*
* Set hardware to newly-selected media.
*/
static int
stge_mediachange(if_t ifp)
{
struct stge_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->sc_miibus);
mii_mediachg(mii);
return (0);
}
static int
stge_eeprom_wait(struct stge_softc *sc)
{
int i;
for (i = 0; i < STGE_TIMEOUT; i++) {
DELAY(1000);
if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
return (0);
}
return (1);
}
/*
* stge_read_eeprom:
*
* Read data from the serial EEPROM.
*/
static void
stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
{
if (stge_eeprom_wait(sc))
device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
CSR_WRITE_2(sc, STGE_EepromCtrl,
EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
if (stge_eeprom_wait(sc))
device_printf(sc->sc_dev, "EEPROM read timed out\n");
*data = CSR_READ_2(sc, STGE_EepromData);
}
static int
stge_probe(device_t dev)
{
const struct stge_product *sp;
int i;
uint16_t vendor, devid;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
sp = stge_products;
for (i = 0; i < nitems(stge_products); i++, sp++) {
if (vendor == sp->stge_vendorid &&
devid == sp->stge_deviceid) {
device_set_desc(dev, sp->stge_name);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
static int
stge_attach(device_t dev)
{
struct stge_softc *sc;
if_t ifp;
uint8_t enaddr[ETHER_ADDR_LEN];
int error, flags, i;
uint16_t cmd;
uint32_t val;
error = 0;
sc = device_get_softc(dev);
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
/*
* Map the device.
*/
pci_enable_busmaster(dev);
cmd = pci_read_config(dev, PCIR_COMMAND, 2);
val = pci_read_config(dev, PCIR_BAR(1), 4);
if (PCI_BAR_IO(val))
sc->sc_spec = stge_res_spec_mem;
else {
val = pci_read_config(dev, PCIR_BAR(0), 4);
if (!PCI_BAR_IO(val)) {
device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
error = ENXIO;
goto fail;
}
sc->sc_spec = stge_res_spec_io;
}
error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
if (error != 0) {
device_printf(dev, "couldn't allocate %s resources\n",
sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
goto fail;
}
sc->sc_rev = pci_get_revid(dev);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rxint_nframe", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->sc_rxint_nframe, 0, sysctl_hw_stge_rxint_nframe, "I",
"stge rx interrupt nframe");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rxint_dmawait", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->sc_rxint_dmawait, 0, sysctl_hw_stge_rxint_dmawait, "I",
"stge rx interrupt dmawait");
/* Pull in device tunables. */
sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
error = resource_int_value(device_get_name(dev), device_get_unit(dev),
"rxint_nframe", &sc->sc_rxint_nframe);
if (error == 0) {
if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
device_printf(dev, "rxint_nframe value out of range; "
"using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
}
}
sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
error = resource_int_value(device_get_name(dev), device_get_unit(dev),
"rxint_dmawait", &sc->sc_rxint_dmawait);
if (error == 0) {
if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
device_printf(dev, "rxint_dmawait value out of range; "
"using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
}
}
if ((error = stge_dma_alloc(sc)) != 0)
goto fail;
/*
* Determine if we're copper or fiber. It affects how we
* reset the card.
*/
if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
sc->sc_usefiber = 1;
else
sc->sc_usefiber = 0;
/* Load LED configuration from EEPROM. */
stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
/*
* Reset the chip to a known state.
*/
STGE_LOCK(sc);
stge_reset(sc, STGE_RESET_FULL);
STGE_UNLOCK(sc);
/*
* Reading the station address from the EEPROM doesn't seem
* to work, at least on my sample boards. Instead, since
* the reset sequence does AutoInit, read it from the station
* address registers. For Sundance 1023 you can only read it
* from EEPROM.
*/
if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
uint16_t v;
v = CSR_READ_2(sc, STGE_StationAddress0);
enaddr[0] = v & 0xff;
enaddr[1] = v >> 8;
v = CSR_READ_2(sc, STGE_StationAddress1);
enaddr[2] = v & 0xff;
enaddr[3] = v >> 8;
v = CSR_READ_2(sc, STGE_StationAddress2);
enaddr[4] = v & 0xff;
enaddr[5] = v >> 8;
sc->sc_stge1023 = 0;
} else {
uint16_t myaddr[ETHER_ADDR_LEN / 2];
for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
&myaddr[i]);
myaddr[i] = le16toh(myaddr[i]);
}
bcopy(myaddr, enaddr, sizeof(enaddr));
sc->sc_stge1023 = 1;
}
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "failed to if_alloc()\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, stge_ioctl);
if_setstartfn(ifp, stge_start);
if_setinitfn(ifp, stge_init);
if_setsendqlen(ifp, STGE_TX_RING_CNT - 1);
if_setsendqready(ifp);
/* Revision B3 and earlier chips have checksum bug. */
if (sc->sc_rev >= 0x0c) {
if_sethwassist(ifp, STGE_CSUM_FEATURES);
if_setcapabilities(ifp, IFCAP_HWCSUM);
} else {
if_sethwassist(ifp, 0);
if_setcapabilities(ifp, 0);
}
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/*
* Read some important bits from the PhyCtrl register.
*/
sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
(PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
/* Set up MII bus. */
flags = MIIF_DOPAUSE;
if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
flags |= MIIF_MACPRIV0;
error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
flags);
if (error != 0) {
device_printf(sc->sc_dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, enaddr);
/* VLAN capability setup */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0);
if (sc->sc_rev >= 0x0c)
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* The manual recommends disabling early transmit, so we
* do. It's disabled anyway, if using IP checksumming,
* since the entire packet must be in the FIFO in order
* for the chip to perform the checksum.
*/
sc->sc_txthresh = 0x0fff;
/*
* Disable MWI if the PCI layer tells us to.
*/
sc->sc_DMACtrl = 0;
if ((cmd & PCIM_CMD_MWRICEN) == 0)
sc->sc_DMACtrl |= DMAC_MWIDisable;
/*
* Hookup IRQ
*/
error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
NULL, stge_intr, sc, &sc->sc_ih);
if (error != 0) {
ether_ifdetach(ifp);
device_printf(sc->sc_dev, "couldn't set up IRQ\n");
sc->sc_ifp = NULL;
goto fail;
}
fail:
if (error != 0)
stge_detach(dev);
return (error);
}
static int
stge_detach(device_t dev)
{
struct stge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->sc_ifp;
#ifdef DEVICE_POLLING
if (ifp && if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
if (device_is_attached(dev)) {
STGE_LOCK(sc);
/* XXX */
sc->sc_detach = 1;
stge_stop(sc);
STGE_UNLOCK(sc);
callout_drain(&sc->sc_tick_ch);
taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
ether_ifdetach(ifp);
}
if (sc->sc_miibus != NULL) {
device_delete_child(dev, sc->sc_miibus);
sc->sc_miibus = NULL;
}
bus_generic_detach(dev);
stge_dma_free(sc);
if (ifp != NULL) {
if_free(ifp);
sc->sc_ifp = NULL;
}
if (sc->sc_ih) {
bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
sc->sc_ih = NULL;
}
if (sc->sc_spec)
bus_release_resources(dev, sc->sc_spec, sc->sc_res);
mtx_destroy(&sc->sc_mii_mtx);
mtx_destroy(&sc->sc_mtx);
return (0);
}
struct stge_dmamap_arg {
bus_addr_t stge_busaddr;
};
static void
stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct stge_dmamap_arg *ctx;
if (error != 0)
return;
ctx = (struct stge_dmamap_arg *)arg;
ctx->stge_busaddr = segs[0].ds_addr;
}
static int
stge_dma_alloc(struct stge_softc *sc)
{
struct stge_dmamap_arg ctx;
struct stge_txdesc *txd;
struct stge_rxdesc *rxd;
int error, i;
/* create parent tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
1, 0, /* algnmnt, boundary */
STGE_DMA_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_cdata.stge_parent_tag);
if (error != 0) {
device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
goto fail;
}
/* create tag for Tx ring. */
error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
STGE_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
STGE_TX_RING_SZ, /* maxsize */
1, /* nsegments */
STGE_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_cdata.stge_tx_ring_tag);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to allocate Tx ring DMA tag\n");
goto fail;
}
/* create tag for Rx ring. */
error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
STGE_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
STGE_RX_RING_SZ, /* maxsize */
1, /* nsegments */
STGE_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_cdata.stge_rx_ring_tag);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to allocate Rx ring DMA tag\n");
goto fail;
}
/* create tag for Tx buffers. */
error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES * STGE_MAXTXSEGS, /* maxsize */
STGE_MAXTXSEGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_cdata.stge_tx_tag);
if (error != 0) {
device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
goto fail;
}
/* create tag for Rx buffers. */
error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_cdata.stge_rx_tag);
if (error != 0) {
device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
goto fail;
}
/* allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
(void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to allocate DMA'able memory for Tx ring\n");
goto fail;
}
ctx.stge_busaddr = 0;
error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0 || ctx.stge_busaddr == 0) {
device_printf(sc->sc_dev,
"failed to load DMA'able memory for Tx ring\n");
goto fail;
}
sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
/* allocate DMA'able memory and load the DMA map for Rx ring. */
error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
(void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to allocate DMA'able memory for Rx ring\n");
goto fail;
}
ctx.stge_busaddr = 0;
error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0 || ctx.stge_busaddr == 0) {
device_printf(sc->sc_dev,
"failed to load DMA'able memory for Rx ring\n");
goto fail;
}
sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
/* create DMA maps for Tx buffers. */
for (i = 0; i < STGE_TX_RING_CNT; i++) {
txd = &sc->sc_cdata.stge_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = 0;
error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to create Tx dmamap\n");
goto fail;
}
}
/* create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
&sc->sc_cdata.stge_rx_sparemap)) != 0) {
device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
goto fail;
}
for (i = 0; i < STGE_RX_RING_CNT; i++) {
rxd = &sc->sc_cdata.stge_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = 0;
error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to create Rx dmamap\n");
goto fail;
}
}
fail:
return (error);
}
static void
stge_dma_free(struct stge_softc *sc)
{
struct stge_txdesc *txd;
struct stge_rxdesc *rxd;
int i;
/* Tx ring */
if (sc->sc_cdata.stge_tx_ring_tag) {
if (sc->sc_rdata.stge_tx_ring_paddr)
bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
sc->sc_cdata.stge_tx_ring_map);
if (sc->sc_rdata.stge_tx_ring)
bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
sc->sc_rdata.stge_tx_ring,
sc->sc_cdata.stge_tx_ring_map);
sc->sc_rdata.stge_tx_ring = NULL;
sc->sc_rdata.stge_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
sc->sc_cdata.stge_tx_ring_tag = NULL;
}
/* Rx ring */
if (sc->sc_cdata.stge_rx_ring_tag) {
if (sc->sc_rdata.stge_rx_ring_paddr)
bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
sc->sc_cdata.stge_rx_ring_map);
if (sc->sc_rdata.stge_rx_ring)
bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
sc->sc_rdata.stge_rx_ring,
sc->sc_cdata.stge_rx_ring_map);
sc->sc_rdata.stge_rx_ring = NULL;
sc->sc_rdata.stge_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
sc->sc_cdata.stge_rx_ring_tag = NULL;
}
/* Tx buffers */
if (sc->sc_cdata.stge_tx_tag) {
for (i = 0; i < STGE_TX_RING_CNT; i++) {
txd = &sc->sc_cdata.stge_txdesc[i];
if (txd->tx_dmamap) {
bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = 0;
}
}
bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
sc->sc_cdata.stge_tx_tag = NULL;
}
/* Rx buffers */
if (sc->sc_cdata.stge_rx_tag) {
for (i = 0; i < STGE_RX_RING_CNT; i++) {
rxd = &sc->sc_cdata.stge_rxdesc[i];
if (rxd->rx_dmamap) {
bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = 0;
}
}
if (sc->sc_cdata.stge_rx_sparemap) {
bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
sc->sc_cdata.stge_rx_sparemap);
sc->sc_cdata.stge_rx_sparemap = 0;
}
bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
sc->sc_cdata.stge_rx_tag = NULL;
}
if (sc->sc_cdata.stge_parent_tag) {
bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
sc->sc_cdata.stge_parent_tag = NULL;
}
}
/*
* stge_shutdown:
*
* Make sure the interface is stopped at reboot time.
*/
static int
stge_shutdown(device_t dev)
{
return (stge_suspend(dev));
}
static void
stge_setwol(struct stge_softc *sc)
{
if_t ifp;
uint8_t v;
STGE_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
v = CSR_READ_1(sc, STGE_WakeEvent);
/* Disable all WOL bits. */
v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
WE_WakeOnLanEnable);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
CSR_WRITE_1(sc, STGE_WakeEvent, v);
/* Reset Tx and prevent transmission. */
CSR_WRITE_4(sc, STGE_AsicCtrl,
CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
/*
* TC9021 automatically reset link speed to 100Mbps when it's put
* into sleep so there is no need to try to resetting link speed.
*/
}
static int
stge_suspend(device_t dev)
{
struct stge_softc *sc;
sc = device_get_softc(dev);
STGE_LOCK(sc);
stge_stop(sc);
sc->sc_suspended = 1;
stge_setwol(sc);
STGE_UNLOCK(sc);
return (0);
}
static int
stge_resume(device_t dev)
{
struct stge_softc *sc;
if_t ifp;
uint8_t v;
sc = device_get_softc(dev);
STGE_LOCK(sc);
/*
* Clear WOL bits, so special frames wouldn't interfere
* normal Rx operation anymore.
*/
v = CSR_READ_1(sc, STGE_WakeEvent);
v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
WE_WakeOnLanEnable);
CSR_WRITE_1(sc, STGE_WakeEvent, v);
ifp = sc->sc_ifp;
if (if_getflags(ifp) & IFF_UP)
stge_init_locked(sc);
sc->sc_suspended = 0;
STGE_UNLOCK(sc);
return (0);
}
static void
stge_dma_wait(struct stge_softc *sc)
{
int i;
for (i = 0; i < STGE_TIMEOUT; i++) {
DELAY(2);
if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
break;
}
if (i == STGE_TIMEOUT)
device_printf(sc->sc_dev, "DMA wait timed out\n");
}
static int
stge_encap(struct stge_softc *sc, struct mbuf **m_head)
{
struct stge_txdesc *txd;
struct stge_tfd *tfd;
struct mbuf *m;
bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
int error, i, nsegs, si;
uint64_t csum_flags, tfc;
STGE_LOCK_ASSERT(sc);
if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
return (ENOBUFS);
error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
m = *m_head;
csum_flags = 0;
if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
if (m->m_pkthdr.csum_flags & CSUM_IP)
csum_flags |= TFD_IPChecksumEnable;
if (m->m_pkthdr.csum_flags & CSUM_TCP)
csum_flags |= TFD_TCPChecksumEnable;
else if (m->m_pkthdr.csum_flags & CSUM_UDP)
csum_flags |= TFD_UDPChecksumEnable;
}
si = sc->sc_cdata.stge_tx_prod;
tfd = &sc->sc_rdata.stge_tx_ring[si];
for (i = 0; i < nsegs; i++)
tfd->tfd_frags[i].frag_word0 =
htole64(FRAG_ADDR(txsegs[i].ds_addr) |
FRAG_LEN(txsegs[i].ds_len));
sc->sc_cdata.stge_tx_cnt++;
tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
TFD_FragCount(nsegs) | csum_flags;
if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
tfc |= TFD_TxDMAIndicate;
/* Update producer index. */
sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
/* Check if we have a VLAN tag to insert. */
if (m->m_flags & M_VLANTAG)
tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
tfd->tfd_control = htole64(tfc);
/* Update Tx Queue. */
STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
txd->tx_m = m;
/* Sync descriptors. */
bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
sc->sc_cdata.stge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* stge_start: [ifnet interface function]
*
* Start packet transmission on the interface.
*/
static void
stge_start(if_t ifp)
{
struct stge_softc *sc;
sc = if_getsoftc(ifp);
STGE_LOCK(sc);
stge_start_locked(ifp);
STGE_UNLOCK(sc);
}
static void
stge_start_locked(if_t ifp)
{
struct stge_softc *sc;
struct mbuf *m_head;
int enq;
sc = if_getsoftc(ifp);
STGE_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || sc->sc_link == 0)
return;
for (enq = 0; !if_sendq_empty(ifp); ) {
if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (stge_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
/* Transmit */
CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
/* Set a timeout in case the chip goes out to lunch. */
sc->sc_watchdog_timer = 5;
}
}
/*
* stge_watchdog:
*
* Watchdog timer handler.
*/
static void
stge_watchdog(struct stge_softc *sc)
{
if_t ifp;
STGE_LOCK_ASSERT(sc);
if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
return;
ifp = sc->sc_ifp;
if_printf(sc->sc_ifp, "device timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
stge_init_locked(sc);
if (!if_sendq_empty(ifp))
stge_start_locked(ifp);
}
/*
* stge_ioctl: [ifnet interface function]
*
* Handle control requests from the operator.
*/
static int
stge_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct stge_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
if_setmtu(ifp, ifr->ifr_mtu);
STGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
stge_init_locked(sc);
}
STGE_UNLOCK(sc);
}
break;
case SIOCSIFFLAGS:
STGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (((if_getflags(ifp) ^ sc->sc_if_flags)
& IFF_PROMISC) != 0)
stge_set_filter(sc);
} else {
if (sc->sc_detach == 0)
stge_init_locked(sc);
}
} else {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
stge_stop(sc);
}
sc->sc_if_flags = if_getflags(ifp);
STGE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
STGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
stge_set_multi(sc);
STGE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->sc_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if ((mask & IFCAP_POLLING) != 0) {
if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
error = ether_poll_register(stge_poll, ifp);
if (error != 0)
break;
STGE_LOCK(sc);
CSR_WRITE_2(sc, STGE_IntEnable, 0);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
STGE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
if (error != 0)
break;
STGE_LOCK(sc);
CSR_WRITE_2(sc, STGE_IntEnable,
sc->sc_IntEnable);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
STGE_UNLOCK(sc);
}
}
#endif
if ((mask & IFCAP_HWCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_HWCSUM);
if ((IFCAP_HWCSUM & if_getcapenable(ifp)) != 0 &&
(IFCAP_HWCSUM & if_getcapabilities(ifp)) != 0)
if_sethwassist(ifp, STGE_CSUM_FEATURES);
else
if_sethwassist(ifp, 0);
}
if ((mask & IFCAP_WOL) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
if ((mask & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
}
if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
STGE_LOCK(sc);
stge_vlan_setup(sc);
STGE_UNLOCK(sc);
}
}
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
stge_link_task(void *arg, int pending)
{
struct stge_softc *sc;
struct mii_data *mii;
uint32_t v, ac;
int i;
sc = (struct stge_softc *)arg;
STGE_LOCK(sc);
mii = device_get_softc(sc->sc_miibus);
if (mii->mii_media_status & IFM_ACTIVE) {
if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
sc->sc_link = 1;
} else
sc->sc_link = 0;
sc->sc_MACCtrl = 0;
if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
sc->sc_MACCtrl |= MC_DuplexSelect;
if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
sc->sc_MACCtrl |= MC_RxFlowControlEnable;
if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
sc->sc_MACCtrl |= MC_TxFlowControlEnable;
/*
* Update STGE_MACCtrl register depending on link status.
* (duplex, flow control etc)
*/
v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
v |= sc->sc_MACCtrl;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
/* Duplex setting changed, reset Tx/Rx functions. */
ac = CSR_READ_4(sc, STGE_AsicCtrl);
ac |= AC_TxReset | AC_RxReset;
CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
for (i = 0; i < STGE_TIMEOUT; i++) {
DELAY(100);
if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
break;
}
if (i == STGE_TIMEOUT)
device_printf(sc->sc_dev, "reset failed to complete\n");
}
STGE_UNLOCK(sc);
}
static __inline int
stge_tx_error(struct stge_softc *sc)
{
uint32_t txstat;
int error;
for (error = 0;;) {
txstat = CSR_READ_4(sc, STGE_TxStatus);
if ((txstat & TS_TxComplete) == 0)
break;
/* Tx underrun */
if ((txstat & TS_TxUnderrun) != 0) {
/*
* XXX
* There should be a more better way to recover
* from Tx underrun instead of a full reset.
*/
if (sc->sc_nerr++ < STGE_MAXERR)
device_printf(sc->sc_dev, "Tx underrun, "
"resetting...\n");
if (sc->sc_nerr == STGE_MAXERR)
device_printf(sc->sc_dev, "too many errors; "
"not reporting any more\n");
error = -1;
break;
}
/* Maximum/Late collisions, Re-enable Tx MAC. */
if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
CSR_WRITE_4(sc, STGE_MACCtrl,
(CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
MC_TxEnable);
}
return (error);
}
/*
* stge_intr:
*
* Interrupt service routine.
*/
static void
stge_intr(void *arg)
{
struct stge_softc *sc;
if_t ifp;
int reinit;
uint16_t status;
sc = (struct stge_softc *)arg;
ifp = sc->sc_ifp;
STGE_LOCK(sc);
#ifdef DEVICE_POLLING
if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
goto done_locked;
#endif
status = CSR_READ_2(sc, STGE_IntStatus);
if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
goto done_locked;
/* Disable interrupts. */
for (reinit = 0;;) {
status = CSR_READ_2(sc, STGE_IntStatusAck);
status &= sc->sc_IntEnable;
if (status == 0)
break;
/* Host interface errors. */
if ((status & IS_HostError) != 0) {
device_printf(sc->sc_dev,
"Host interface error, resetting...\n");
reinit = 1;
goto force_init;
}
/* Receive interrupts. */
if ((status & IS_RxDMAComplete) != 0) {
stge_rxeof(sc);
if ((status & IS_RFDListEnd) != 0)
CSR_WRITE_4(sc, STGE_DMACtrl,
DMAC_RxDMAPollNow);
}
/* Transmit interrupts. */
if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
stge_txeof(sc);
/* Transmission errors.*/
if ((status & IS_TxComplete) != 0) {
if ((reinit = stge_tx_error(sc)) != 0)
break;
}
}
force_init:
if (reinit != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
stge_init_locked(sc);
}
/* Re-enable interrupts. */
CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
/* Try to get more packets going. */
if (!if_sendq_empty(ifp))
stge_start_locked(ifp);
done_locked:
STGE_UNLOCK(sc);
}
/*
* stge_txeof:
*
* Helper; handle transmit interrupts.
*/
static void
stge_txeof(struct stge_softc *sc)
{
if_t ifp;
struct stge_txdesc *txd;
uint64_t control;
int cons;
STGE_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
if (txd == NULL)
return;
bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
/*
* Go through our Tx list and free mbufs for those
* frames which have been transmitted.
*/
for (cons = sc->sc_cdata.stge_tx_cons;;
cons = (cons + 1) % STGE_TX_RING_CNT) {
if (sc->sc_cdata.stge_tx_cnt <= 0)
break;
control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
if ((control & TFD_TFDDone) == 0)
break;
sc->sc_cdata.stge_tx_cnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
/* Output counter is updated with statistics register */
m_freem(txd->tx_m);
txd->tx_m = NULL;
STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
}
sc->sc_cdata.stge_tx_cons = cons;
if (sc->sc_cdata.stge_tx_cnt == 0)
sc->sc_watchdog_timer = 0;
bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
sc->sc_cdata.stge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static __inline void
stge_discard_rxbuf(struct stge_softc *sc, int idx)
{
struct stge_rfd *rfd;
rfd = &sc->sc_rdata.stge_rx_ring[idx];
rfd->rfd_status = 0;
}
#ifndef __NO_STRICT_ALIGNMENT
/*
* It seems that TC9021's DMA engine has alignment restrictions in
* DMA scatter operations. The first DMA segment has no address
* alignment restrictins but the rest should be aligned on 4(?) bytes
* boundary. Otherwise it would corrupt random memory. Since we don't
* know which one is used for the first segment in advance we simply
* don't align at all.
* To avoid copying over an entire frame to align, we allocate a new
* mbuf and copy ethernet header to the new mbuf. The new mbuf is
* prepended into the existing mbuf chain.
*/
static __inline struct mbuf *
stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
{
struct mbuf *n;
n = NULL;
if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
n = m;
} else {
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n != NULL) {
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;
m->m_len -= ETHER_HDR_LEN;
n->m_len = ETHER_HDR_LEN;
M_MOVE_PKTHDR(n, m);
n->m_next = m;
} else
m_freem(m);
}
return (n);
}
#endif
/*
* stge_rxeof:
*
* Helper; handle receive interrupts.
*/
static int
stge_rxeof(struct stge_softc *sc)
{
if_t ifp;
struct stge_rxdesc *rxd;
struct mbuf *mp, *m;
uint64_t status64;
uint32_t status;
int cons, prog, rx_npkts;
STGE_LOCK_ASSERT(sc);
rx_npkts = 0;
ifp = sc->sc_ifp;
bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
prog = 0;
for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
status = RFD_RxStatus(status64);
if ((status & RFD_RFDDone) == 0)
break;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->sc_cdata.stge_rxcycles <= 0)
break;
sc->sc_cdata.stge_rxcycles--;
}
#endif
prog++;
rxd = &sc->sc_cdata.stge_rxdesc[cons];
mp = rxd->rx_m;
/*
* If the packet had an error, drop it. Note we count
* the error later in the periodic stats update.
*/
if ((status & RFD_FrameEnd) != 0 && (status &
(RFD_RxFIFOOverrun | RFD_RxRuntFrame |
RFD_RxAlignmentError | RFD_RxFCSError |
RFD_RxLengthError)) != 0) {
stge_discard_rxbuf(sc, cons);
if (sc->sc_cdata.stge_rxhead != NULL) {
m_freem(sc->sc_cdata.stge_rxhead);
STGE_RXCHAIN_RESET(sc);
}
continue;
}
/*
* Add a new receive buffer to the ring.
*/
if (stge_newbuf(sc, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
stge_discard_rxbuf(sc, cons);
if (sc->sc_cdata.stge_rxhead != NULL) {
m_freem(sc->sc_cdata.stge_rxhead);
STGE_RXCHAIN_RESET(sc);
}
continue;
}
if ((status & RFD_FrameEnd) != 0)
mp->m_len = RFD_RxDMAFrameLen(status) -
sc->sc_cdata.stge_rxlen;
sc->sc_cdata.stge_rxlen += mp->m_len;
/* Chain mbufs. */
if (sc->sc_cdata.stge_rxhead == NULL) {
sc->sc_cdata.stge_rxhead = mp;
sc->sc_cdata.stge_rxtail = mp;
} else {
mp->m_flags &= ~M_PKTHDR;
sc->sc_cdata.stge_rxtail->m_next = mp;
sc->sc_cdata.stge_rxtail = mp;
}
if ((status & RFD_FrameEnd) != 0) {
m = sc->sc_cdata.stge_rxhead;
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
if (m->m_pkthdr.len > sc->sc_if_framesize) {
m_freem(m);
STGE_RXCHAIN_RESET(sc);
continue;
}
/*
* Set the incoming checksum information for
* the packet.
*/
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
if ((status & RFD_IPDetected) != 0) {
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED;
if ((status & RFD_IPError) == 0)
m->m_pkthdr.csum_flags |=
CSUM_IP_VALID;
}
if (((status & RFD_TCPDetected) != 0 &&
(status & RFD_TCPError) == 0) ||
((status & RFD_UDPDetected) != 0 &&
(status & RFD_UDPError) == 0)) {
m->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
m->m_pkthdr.csum_data = 0xffff;
}
}
#ifndef __NO_STRICT_ALIGNMENT
if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
if ((m = stge_fixup_rx(sc, m)) == NULL) {
STGE_RXCHAIN_RESET(sc);
continue;
}
}
#endif
/* Check for VLAN tagged packets. */
if ((status & RFD_VLANDetected) != 0 &&
(if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag = RFD_TCI(status64);
m->m_flags |= M_VLANTAG;
}
STGE_UNLOCK(sc);
/* Pass it on. */
if_input(ifp, m);
STGE_LOCK(sc);
rx_npkts++;
STGE_RXCHAIN_RESET(sc);
}
}
if (prog > 0) {
/* Update the consumer index. */
sc->sc_cdata.stge_rx_cons = cons;
bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
sc->sc_cdata.stge_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
return (rx_npkts);
}
#ifdef DEVICE_POLLING
static int
stge_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct stge_softc *sc;
uint16_t status;
int rx_npkts;
rx_npkts = 0;
sc = if_getsoftc(ifp);
STGE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
STGE_UNLOCK(sc);
return (rx_npkts);
}
sc->sc_cdata.stge_rxcycles = count;
rx_npkts = stge_rxeof(sc);
stge_txeof(sc);
if (cmd == POLL_AND_CHECK_STATUS) {
status = CSR_READ_2(sc, STGE_IntStatus);
status &= sc->sc_IntEnable;
if (status != 0) {
if ((status & IS_HostError) != 0) {
device_printf(sc->sc_dev,
"Host interface error, resetting...\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
stge_init_locked(sc);
}
if ((status & IS_TxComplete) != 0) {
if (stge_tx_error(sc) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
stge_init_locked(sc);
}
}
}
}
if (!if_sendq_empty(ifp))
stge_start_locked(ifp);
STGE_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
/*
* stge_tick:
*
* One second timer, used to tick the MII.
*/
static void
stge_tick(void *arg)
{
struct stge_softc *sc;
struct mii_data *mii;
sc = (struct stge_softc *)arg;
STGE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->sc_miibus);
mii_tick(mii);
/* Update statistics counters. */
stge_stats_update(sc);
/*
* Relcaim any pending Tx descriptors to release mbufs in a
* timely manner as we don't generate Tx completion interrupts
* for every frame. This limits the delay to a maximum of one
* second.
*/
if (sc->sc_cdata.stge_tx_cnt != 0)
stge_txeof(sc);
stge_watchdog(sc);
callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
}
/*
* stge_stats_update:
*
* Read the TC9021 statistics counters.
*/
static void
stge_stats_update(struct stge_softc *sc)
{
if_t ifp;
STGE_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
CSR_READ_4(sc,STGE_OctetRcvOk);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, CSR_READ_4(sc, STGE_FramesRcvdOk));
if_inc_counter(ifp, IFCOUNTER_IERRORS, CSR_READ_2(sc, STGE_FramesLostRxErrors));
CSR_READ_4(sc, STGE_OctetXmtdOk);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, CSR_READ_4(sc, STGE_FramesXmtdOk));
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
CSR_READ_4(sc, STGE_LateCollisions) +
CSR_READ_4(sc, STGE_MultiColFrames) +
CSR_READ_4(sc, STGE_SingleColFrames));
if_inc_counter(ifp, IFCOUNTER_OERRORS,
CSR_READ_2(sc, STGE_FramesAbortXSColls) +
CSR_READ_2(sc, STGE_FramesWEXDeferal));
}
/*
* stge_reset:
*
* Perform a soft reset on the TC9021.
*/
static void
stge_reset(struct stge_softc *sc, uint32_t how)
{
uint32_t ac;
uint8_t v;
int i, dv;
STGE_LOCK_ASSERT(sc);
dv = 5000;
ac = CSR_READ_4(sc, STGE_AsicCtrl);
switch (how) {
case STGE_RESET_TX:
ac |= AC_TxReset | AC_FIFO;
dv = 100;
break;
case STGE_RESET_RX:
ac |= AC_RxReset | AC_FIFO;
dv = 100;
break;
case STGE_RESET_FULL:
default:
/*
* Only assert RstOut if we're fiber. We need GMII clocks
* to be present in order for the reset to complete on fiber
* cards.
*/
ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
(sc->sc_usefiber ? AC_RstOut : 0);
break;
}
CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
/* Account for reset problem at 10Mbps. */
DELAY(dv);
for (i = 0; i < STGE_TIMEOUT; i++) {
if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
break;
DELAY(dv);
}
if (i == STGE_TIMEOUT)
device_printf(sc->sc_dev, "reset failed to complete\n");
/* Set LED, from Linux IPG driver. */
ac = CSR_READ_4(sc, STGE_AsicCtrl);
ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
if ((sc->sc_led & 0x01) != 0)
ac |= AC_LEDMode;
if ((sc->sc_led & 0x03) != 0)
ac |= AC_LEDModeBit1;
if ((sc->sc_led & 0x08) != 0)
ac |= AC_LEDSpeed;
CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
/* Set PHY, from Linux IPG driver */
v = CSR_READ_1(sc, STGE_PhySet);
v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
v |= ((sc->sc_led & 0x70) >> 4);
CSR_WRITE_1(sc, STGE_PhySet, v);
}
/*
* stge_init: [ ifnet interface function ]
*
* Initialize the interface.
*/
static void
stge_init(void *xsc)
{
struct stge_softc *sc;
sc = (struct stge_softc *)xsc;
STGE_LOCK(sc);
stge_init_locked(sc);
STGE_UNLOCK(sc);
}
static void
stge_init_locked(struct stge_softc *sc)
{
if_t ifp;
struct mii_data *mii;
uint16_t eaddr[3];
uint32_t v;
int error;
STGE_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
mii = device_get_softc(sc->sc_miibus);
/*
* Cancel any pending I/O.
*/
stge_stop(sc);
/*
* Reset the chip to a known state.
*/
stge_reset(sc, STGE_RESET_FULL);
/* Init descriptors. */
error = stge_init_rx_ring(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"initialization failed: no memory for rx buffers\n");
stge_stop(sc);
goto out;
}
stge_init_tx_ring(sc);
/* Set the station address. */
bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
/*
* Set the statistics masks. Disable all the RMON stats,
* and disable selected stats in the non-RMON stats registers.
*/
CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
CSR_WRITE_4(sc, STGE_StatisticsMask,
(1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
(1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
(1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
(1U << 21));
/* Set up the receive filter. */
stge_set_filter(sc);
/* Program multicast filter. */
stge_set_multi(sc);
/*
* Give the transmit and receive ring to the chip.
*/
CSR_WRITE_4(sc, STGE_TFDListPtrHi,
STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
CSR_WRITE_4(sc, STGE_TFDListPtrLo,
STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
CSR_WRITE_4(sc, STGE_RFDListPtrHi,
STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
CSR_WRITE_4(sc, STGE_RFDListPtrLo,
STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
/*
* Initialize the Tx auto-poll period. It's OK to make this number
* large (255 is the max, but we use 127) -- we explicitly kick the
* transmit engine when there's actually a packet.
*/
CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
/* ..and the Rx auto-poll period. */
CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
/* Initialize the Tx start threshold. */
CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
/* Rx DMA thresholds, from Linux */
CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
/* Rx early threhold, from Linux */
CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
/* Tx DMA thresholds, from Linux */
CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
/*
* Initialize the Rx DMA interrupt control register. We
* request an interrupt after every incoming packet, but
* defer it for sc_rxint_dmawait us. When the number of
* interrupts pending reaches STGE_RXINT_NFRAME, we stop
* deferring the interrupt, and signal it immediately.
*/
CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
RDIC_RxFrameCount(sc->sc_rxint_nframe) |
RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
/*
* Initialize the interrupt mask.
*/
sc->sc_IntEnable = IS_HostError | IS_TxComplete |
IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
CSR_WRITE_2(sc, STGE_IntEnable, 0);
else
#endif
CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
/*
* Configure the DMA engine.
* XXX Should auto-tune TxBurstLimit.
*/
CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
/*
* Send a PAUSE frame when we reach 29,696 bytes in the Rx
* FIFO, and send an un-PAUSE frame when we reach 3056 bytes
* in the Rx FIFO.
*/
CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
/*
* Set the maximum frame size.
*/
sc->sc_if_framesize = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
/*
* Initialize MacCtrl -- do it before setting the media,
* as setting the media will actually program the register.
*
* Note: We have to poke the IFS value before poking
* anything else.
*/
/* Tx/Rx MAC should be disabled before programming IFS.*/
CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
stge_vlan_setup(sc);
if (sc->sc_rev >= 6) { /* >= B.2 */
/* Multi-frag frame bug work-around. */
CSR_WRITE_2(sc, STGE_DebugCtrl,
CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
/* Tx Poll Now bug work-around. */
CSR_WRITE_2(sc, STGE_DebugCtrl,
CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
/* Tx Poll Now bug work-around. */
CSR_WRITE_2(sc, STGE_DebugCtrl,
CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
}
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
/*
* It seems that transmitting frames without checking the state of
* Rx/Tx MAC wedge the hardware.
*/
stge_start_tx(sc);
stge_start_rx(sc);
sc->sc_link = 0;
/*
* Set the current media.
*/
mii_mediachg(mii);
/*
* Start the one second MII clock.
*/
callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
/*
* ...all done!
*/
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
out:
if (error != 0)
device_printf(sc->sc_dev, "interface not running\n");
}
static void
stge_vlan_setup(struct stge_softc *sc)
{
if_t ifp;
uint32_t v;
ifp = sc->sc_ifp;
/*
* The NIC always copy a VLAN tag regardless of STGE_MACCtrl
* MC_AutoVLANuntagging bit.
* MC_AutoVLANtagging bit selects which VLAN source to use
* between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
* bit has priority over MC_AutoVLANtagging bit. So we always
* use TFC instead of STGE_VLANTag register.
*/
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
v |= MC_AutoVLANuntagging;
else
v &= ~MC_AutoVLANuntagging;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
}
/*
* Stop transmission on the interface.
*/
static void
stge_stop(struct stge_softc *sc)
{
if_t ifp;
struct stge_txdesc *txd;
struct stge_rxdesc *rxd;
uint32_t v;
int i;
STGE_LOCK_ASSERT(sc);
/*
* Stop the one second clock.
*/
callout_stop(&sc->sc_tick_ch);
sc->sc_watchdog_timer = 0;
/*
* Disable interrupts.
*/
CSR_WRITE_2(sc, STGE_IntEnable, 0);
/*
* Stop receiver, transmitter, and stats update.
*/
stge_stop_rx(sc);
stge_stop_tx(sc);
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
v |= MC_StatisticsDisable;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
/*
* Stop the transmit and receive DMA.
*/
stge_dma_wait(sc);
CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
/*
* Free RX and TX mbufs still in the queues.
*/
for (i = 0; i < STGE_RX_RING_CNT; i++) {
rxd = &sc->sc_cdata.stge_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < STGE_TX_RING_CNT; i++) {
txd = &sc->sc_cdata.stge_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
/*
* Mark the interface down and cancel the watchdog timer.
*/
ifp = sc->sc_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->sc_link = 0;
}
static void
stge_start_tx(struct stge_softc *sc)
{
uint32_t v;
int i;
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_TxEnabled) != 0)
return;
v |= MC_TxEnable;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
for (i = STGE_TIMEOUT; i > 0; i--) {
DELAY(10);
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_TxEnabled) != 0)
break;
}
if (i == 0)
device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
}
static void
stge_start_rx(struct stge_softc *sc)
{
uint32_t v;
int i;
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_RxEnabled) != 0)
return;
v |= MC_RxEnable;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
for (i = STGE_TIMEOUT; i > 0; i--) {
DELAY(10);
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_RxEnabled) != 0)
break;
}
if (i == 0)
device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
}
static void
stge_stop_tx(struct stge_softc *sc)
{
uint32_t v;
int i;
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_TxEnabled) == 0)
return;
v |= MC_TxDisable;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
for (i = STGE_TIMEOUT; i > 0; i--) {
DELAY(10);
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_TxEnabled) == 0)
break;
}
if (i == 0)
device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
}
static void
stge_stop_rx(struct stge_softc *sc)
{
uint32_t v;
int i;
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_RxEnabled) == 0)
return;
v |= MC_RxDisable;
CSR_WRITE_4(sc, STGE_MACCtrl, v);
for (i = STGE_TIMEOUT; i > 0; i--) {
DELAY(10);
v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
if ((v & MC_RxEnabled) == 0)
break;
}
if (i == 0)
device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
}
static void
stge_init_tx_ring(struct stge_softc *sc)
{
struct stge_ring_data *rd;
struct stge_txdesc *txd;
bus_addr_t addr;
int i;
STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
sc->sc_cdata.stge_tx_prod = 0;
sc->sc_cdata.stge_tx_cons = 0;
sc->sc_cdata.stge_tx_cnt = 0;
rd = &sc->sc_rdata;
bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
for (i = 0; i < STGE_TX_RING_CNT; i++) {
if (i == (STGE_TX_RING_CNT - 1))
addr = STGE_TX_RING_ADDR(sc, 0);
else
addr = STGE_TX_RING_ADDR(sc, i + 1);
rd->stge_tx_ring[i].tfd_next = htole64(addr);
rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
txd = &sc->sc_cdata.stge_txdesc[i];
STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
}
bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
sc->sc_cdata.stge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static int
stge_init_rx_ring(struct stge_softc *sc)
{
struct stge_ring_data *rd;
bus_addr_t addr;
int i;
sc->sc_cdata.stge_rx_cons = 0;
STGE_RXCHAIN_RESET(sc);
rd = &sc->sc_rdata;
bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
for (i = 0; i < STGE_RX_RING_CNT; i++) {
if (stge_newbuf(sc, i) != 0)
return (ENOBUFS);
if (i == (STGE_RX_RING_CNT - 1))
addr = STGE_RX_RING_ADDR(sc, 0);
else
addr = STGE_RX_RING_ADDR(sc, i + 1);
rd->stge_rx_ring[i].rfd_next = htole64(addr);
rd->stge_rx_ring[i].rfd_status = 0;
}
bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
sc->sc_cdata.stge_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* stge_newbuf:
*
* Add a receive buffer to the indicated descriptor.
*/
static int
stge_newbuf(struct stge_softc *sc, int idx)
{
struct stge_rxdesc *rxd;
struct stge_rfd *rfd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
/*
* The hardware requires 4bytes aligned DMA address when JUMBO
* frame is used.
*/
if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
m_adj(m, ETHER_ALIGN);
if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc->sc_cdata.stge_rxdesc[idx];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
sc->sc_cdata.stge_rx_sparemap = map;
bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
rfd = &sc->sc_rdata.stge_rx_ring[idx];
rfd->rfd_frag.frag_word0 =
htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
rfd->rfd_status = 0;
return (0);
}
/*
* stge_set_filter:
*
* Set up the receive filter.
*/
static void
stge_set_filter(struct stge_softc *sc)
{
if_t ifp;
uint16_t mode;
STGE_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
mode = CSR_READ_2(sc, STGE_ReceiveMode);
mode |= RM_ReceiveUnicast;
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
mode |= RM_ReceiveBroadcast;
else
mode &= ~RM_ReceiveBroadcast;
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
mode |= RM_ReceiveAllFrames;
else
mode &= ~RM_ReceiveAllFrames;
CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
}
static u_int
stge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t crc, *mchash = arg;
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
/* Just want the 6 least significant bits. */
crc &= 0x3f;
/* Set the corresponding bit in the hash table. */
mchash[crc >> 5] |= 1 << (crc & 0x1f);
return (1);
}
static void
stge_set_multi(struct stge_softc *sc)
{
if_t ifp;
uint32_t mchash[2];
uint16_t mode;
int count;
STGE_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
mode = CSR_READ_2(sc, STGE_ReceiveMode);
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
mode |= RM_ReceiveAllFrames;
else if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
mode |= RM_ReceiveMulticast;
CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
return;
}
/* clear existing filters. */
CSR_WRITE_4(sc, STGE_HashTable0, 0);
CSR_WRITE_4(sc, STGE_HashTable1, 0);
/*
* Set up the multicast address filter by passing all multicast
* addresses through a CRC generator, and then using the low-order
* 6 bits as an index into the 64 bit multicast hash table. The
* high order bits select the register, while the rest of the bits
* select the bit within the register.
*/
bzero(mchash, sizeof(mchash));
count = if_foreach_llmaddr(ifp, stge_hash_maddr, mchash);
mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
if (count > 0)
mode |= RM_ReceiveMulticastHash;
else
mode &= ~RM_ReceiveMulticastHash;
CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (!arg1)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || !req->newptr)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
}
static int
sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
}
diff --git a/sys/dev/sume/if_sume.c b/sys/dev/sume/if_sume.c
index 8d6223cb54d1..319853ac3c7a 100644
--- a/sys/dev/sume/if_sume.c
+++ b/sys/dev/sume/if_sume.c
@@ -1,1598 +1,1588 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2015 Bjoern A. Zeeb
* Copyright (c) 2020 Denis Salopek
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-11-C-0249
* ("MRC2"), as part of the DARPA MRC research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <machine/bus.h>
#include "adapter.h"
#define PCI_VENDOR_ID_XILINX 0x10ee
#define PCI_DEVICE_ID_SUME 0x7028
/* SUME bus driver interface */
static int sume_probe(device_t);
static int sume_attach(device_t);
static int sume_detach(device_t);
static device_method_t sume_methods[] = {
DEVMETHOD(device_probe, sume_probe),
DEVMETHOD(device_attach, sume_attach),
DEVMETHOD(device_detach, sume_detach),
DEVMETHOD_END
};
static driver_t sume_driver = {
"sume",
sume_methods,
sizeof(struct sume_adapter)
};
/*
* The DMA engine for SUME generates interrupts for each RX/TX transaction.
* Depending on the channel (0 if packet transaction, 1 if register transaction)
* the used bits of the interrupt vector will be the lowest or the second lowest
* 5 bits.
*
* When receiving packets from SUME (RX):
* (1) SUME received a packet on one of the interfaces.
* (2) SUME generates an interrupt vector, bit 00001 is set (channel 0 - new RX
* transaction).
* (3) We read the length of the incoming packet and the offset along with the
* 'last' flag from the SUME registers.
* (4) We prepare for the DMA transaction by setting the bouncebuffer on the
* address buf_addr. For now, this is how it's done:
* - First 3*sizeof(uint32_t) bytes are: lower and upper 32 bits of physical
* address where we want the data to arrive (buf_addr[0] and buf_addr[1]),
* and length of incoming data (buf_addr[2]).
* - Data will start right after, at buf_addr+3*sizeof(uint32_t). The
* physical address buf_hw_addr is a block of contiguous memory mapped to
* buf_addr, so we can set the incoming data's physical address (buf_addr[0]
* and buf_addr[1]) to buf_hw_addr+3*sizeof(uint32_t).
* (5) We notify SUME that the bouncebuffer is ready for the transaction by
* writing the lower/upper physical address buf_hw_addr to the SUME
* registers RIFFA_TX_SG_ADDR_LO_REG_OFF and RIFFA_TX_SG_ADDR_HI_REG_OFF as
* well as the number of segments to the register RIFFA_TX_SG_LEN_REG_OFF.
* (6) SUME generates an interrupt vector, bit 00010 is set (channel 0 -
* bouncebuffer received).
* (7) SUME generates an interrupt vector, bit 00100 is set (channel 0 -
* transaction is done).
* (8) SUME can do both steps (6) and (7) using the same interrupt.
* (8) We read the first 16 bytes (metadata) of the received data and note the
* incoming interface so we can later forward it to the right one in the OS
* (sume0, sume1, sume2 or sume3).
* (10) We create an mbuf and copy the data from the bouncebuffer to the mbuf
* and set the mbuf rcvif to the incoming interface.
* (11) We forward the mbuf to the appropriate interface via ifp->if_input.
*
* When sending packets to SUME (TX):
* (1) The OS calls sume_if_start() function on TX.
* (2) We get the mbuf packet data and copy it to the
* buf_addr+3*sizeof(uint32_t) + metadata 16 bytes.
* (3) We create the metadata based on the output interface and copy it to the
* buf_addr+3*sizeof(uint32_t).
* (4) We write the offset/last and length of the packet to the SUME registers
* RIFFA_RX_OFFLAST_REG_OFF and RIFFA_RX_LEN_REG_OFF.
* (5) We fill the bouncebuffer by filling the first 3*sizeof(uint32_t) bytes
* with the physical address and length just as in RX step (4).
* (6) We notify SUME that the bouncebuffer is ready by writing to SUME
* registers RIFFA_RX_SG_ADDR_LO_REG_OFF, RIFFA_RX_SG_ADDR_HI_REG_OFF and
* RIFFA_RX_SG_LEN_REG_OFF just as in RX step (5).
* (7) SUME generates an interrupt vector, bit 01000 is set (channel 0 -
* bouncebuffer is read).
* (8) SUME generates an interrupt vector, bit 10000 is set (channel 0 -
* transaction is done).
* (9) SUME can do both steps (7) and (8) using the same interrupt.
*
* Internal registers
* Every module in the SUME hardware has its own set of internal registers
* (IDs, for debugging and statistic purposes, etc.). Their base addresses are
* defined in 'projects/reference_nic/hw/tcl/reference_nic_defines.tcl' and the
* offsets to different memory locations of every module are defined in their
* corresponding folder inside the library. These registers can be RO/RW and
* there is a special method to fetch/change this data over 1 or 2 DMA
* transactions. For writing, by calling the sume_module_reg_write(). For
* reading, by calling the sume_module_reg_write() and then
* sume_module_reg_read(). Check those functions for more information.
*/
MALLOC_DECLARE(M_SUME);
MALLOC_DEFINE(M_SUME, "sume", "NetFPGA SUME device driver");
static void check_tx_queues(struct sume_adapter *);
static void sume_fill_bb_desc(struct sume_adapter *, struct riffa_chnl_dir *,
uint64_t);
static struct unrhdr *unr;
static struct {
uint16_t device;
char *desc;
} sume_pciids[] = {
{PCI_DEVICE_ID_SUME, "NetFPGA SUME reference NIC"},
};
static inline uint32_t
read_reg(struct sume_adapter *adapter, int offset)
{
return (bus_space_read_4(adapter->bt, adapter->bh, offset << 2));
}
static inline void
write_reg(struct sume_adapter *adapter, int offset, uint32_t val)
{
bus_space_write_4(adapter->bt, adapter->bh, offset << 2, val);
}
static int
sume_probe(device_t dev)
{
int i;
uint16_t v = pci_get_vendor(dev);
uint16_t d = pci_get_device(dev);
if (v != PCI_VENDOR_ID_XILINX)
return (ENXIO);
for (i = 0; i < nitems(sume_pciids); i++) {
if (d == sume_pciids[i].device) {
device_set_desc(dev, sume_pciids[i].desc);
return (BUS_PROBE_DEFAULT);
}
}
return (ENXIO);
}
/*
* Building mbuf for packet received from SUME. We expect to receive 'len'
* bytes of data (including metadata) written from the bouncebuffer address
* buf_addr+3*sizeof(uint32_t). Metadata will tell us which SUME interface
* received the packet (sport will be 1, 2, 4 or 8), the packet length (plen),
* and the magic word needs to be 0xcafe. When we have the packet data, we
* create an mbuf and copy the data to it using m_copyback() function, set the
* correct interface to rcvif and return the mbuf to be later sent to the OS
* with if_input.
*/
static struct mbuf *
sume_rx_build_mbuf(struct sume_adapter *adapter, uint32_t len)
{
struct nf_priv *nf_priv;
struct mbuf *m;
if_t ifp = NULL;
int np;
uint16_t dport, plen, magic;
device_t dev = adapter->dev;
uint8_t *indata = (uint8_t *)
adapter->recv[SUME_RIFFA_CHANNEL_DATA]->buf_addr +
sizeof(struct nf_bb_desc);
struct nf_metadata *mdata = (struct nf_metadata *) indata;
/* The metadata header is 16 bytes. */
if (len < sizeof(struct nf_metadata)) {
device_printf(dev, "short frame (%d)\n", len);
adapter->packets_err++;
adapter->bytes_err += len;
return (NULL);
}
dport = le16toh(mdata->dport);
plen = le16toh(mdata->plen);
magic = le16toh(mdata->magic);
if (sizeof(struct nf_metadata) + plen > len ||
magic != SUME_RIFFA_MAGIC) {
device_printf(dev, "corrupted packet (%zd + %d > %d || magic "
"0x%04x != 0x%04x)\n", sizeof(struct nf_metadata), plen,
len, magic, SUME_RIFFA_MAGIC);
return (NULL);
}
/* We got the packet from one of the even bits */
np = (ffs(dport & SUME_DPORT_MASK) >> 1) - 1;
if (np > SUME_NPORTS) {
device_printf(dev, "invalid destination port 0x%04x (%d)\n",
dport, np);
adapter->packets_err++;
adapter->bytes_err += plen;
return (NULL);
}
ifp = adapter->ifp[np];
nf_priv = if_getsoftc(ifp);
nf_priv->stats.rx_packets++;
nf_priv->stats.rx_bytes += plen;
/* If the interface is down, well, we are done. */
if (!(if_getflags(ifp) & IFF_UP)) {
nf_priv->stats.ifc_down_packets++;
nf_priv->stats.ifc_down_bytes += plen;
return (NULL);
}
if (adapter->sume_debug)
printf("Building mbuf with length: %d\n", plen);
m = m_getm(NULL, plen, M_NOWAIT, MT_DATA);
if (m == NULL) {
adapter->packets_err++;
adapter->bytes_err += plen;
return (NULL);
}
/* Copy the data in at the right offset. */
m_copyback(m, 0, plen, (void *) (indata + sizeof(struct nf_metadata)));
m->m_pkthdr.rcvif = ifp;
return (m);
}
/*
* SUME interrupt handler for when we get a valid interrupt from the board.
* Theoretically, we can receive interrupt for any of the available channels,
* but RIFFA DMA uses only 2: 0 and 1, so we use only vect0. The vector is a 32
* bit number, using 5 bits for every channel, the least significant bits
* correspond to channel 0 and the next 5 bits correspond to channel 1. Vector
* bits for RX/TX are:
* RX
* bit 0 - new transaction from SUME
* bit 1 - SUME received our bouncebuffer address
* bit 2 - SUME copied the received data to our bouncebuffer, transaction done
* TX
* bit 3 - SUME received our bouncebuffer address
* bit 4 - SUME copied the data from our bouncebuffer, transaction done
*
* There are two finite state machines (one for TX, one for RX). We loop
* through channels 0 and 1 to check and our current state and which interrupt
* bit is set.
* TX
* SUME_RIFFA_CHAN_STATE_IDLE: waiting for the first TX transaction.
* SUME_RIFFA_CHAN_STATE_READY: we prepared (filled with data) the bouncebuffer
* and triggered the SUME for the TX transaction. Waiting for interrupt bit 3
* to go to the next state.
* SUME_RIFFA_CHAN_STATE_READ: waiting for interrupt bit 4 (for SUME to send
* our packet). Then we get the length of the sent data and go back to the
* IDLE state.
* RX
* SUME_RIFFA_CHAN_STATE_IDLE: waiting for the interrupt bit 0 (new RX
* transaction). When we get it, we prepare our bouncebuffer for reading and
* trigger the SUME to start the transaction. Go to the next state.
* SUME_RIFFA_CHAN_STATE_READY: waiting for the interrupt bit 1 (SUME got our
* bouncebuffer). Go to the next state.
* SUME_RIFFA_CHAN_STATE_READ: SUME copied data and our bouncebuffer is ready,
* we can build the mbuf and go back to the IDLE state.
*/
static void
sume_intr_handler(void *arg)
{
struct sume_adapter *adapter = arg;
uint32_t vect, vect0, len;
int ch, loops;
device_t dev = adapter->dev;
struct mbuf *m = NULL;
if_t ifp = NULL;
struct riffa_chnl_dir *send, *recv;
SUME_LOCK(adapter);
vect0 = read_reg(adapter, RIFFA_IRQ_REG0_OFF);
if ((vect0 & SUME_INVALID_VECT) != 0) {
SUME_UNLOCK(adapter);
return;
}
/*
* We only have one interrupt for all channels and no way
* to quickly lookup for which channel(s) we got an interrupt?
*/
for (ch = 0; ch < SUME_RIFFA_CHANNELS; ch++) {
vect = vect0 >> (5 * ch);
send = adapter->send[ch];
recv = adapter->recv[ch];
loops = 0;
while ((vect & (SUME_MSI_TXBUF | SUME_MSI_TXDONE)) &&
loops <= 5) {
if (adapter->sume_debug)
device_printf(dev, "TX ch %d state %u vect = "
"0x%08x\n", ch, send->state, vect);
switch (send->state) {
case SUME_RIFFA_CHAN_STATE_IDLE:
break;
case SUME_RIFFA_CHAN_STATE_READY:
if (!(vect & SUME_MSI_TXBUF)) {
device_printf(dev, "ch %d unexpected "
"interrupt in send+3 state %u: "
"vect = 0x%08x\n", ch, send->state,
vect);
send->recovery = 1;
break;
}
send->state = SUME_RIFFA_CHAN_STATE_READ;
vect &= ~SUME_MSI_TXBUF;
break;
case SUME_RIFFA_CHAN_STATE_READ:
if (!(vect & SUME_MSI_TXDONE)) {
device_printf(dev, "ch %d unexpected "
"interrupt in send+4 state %u: "
"vect = 0x%08x\n", ch, send->state,
vect);
send->recovery = 1;
break;
}
send->state = SUME_RIFFA_CHAN_STATE_LEN;
len = read_reg(adapter, RIFFA_CHNL_REG(ch,
RIFFA_RX_TNFR_LEN_REG_OFF));
if (ch == SUME_RIFFA_CHANNEL_DATA) {
send->state =
SUME_RIFFA_CHAN_STATE_IDLE;
check_tx_queues(adapter);
} else if (ch == SUME_RIFFA_CHANNEL_REG)
wakeup(&send->event);
else {
device_printf(dev, "ch %d unexpected "
"interrupt in send+4 state %u: "
"vect = 0x%08x\n", ch, send->state,
vect);
send->recovery = 1;
}
vect &= ~SUME_MSI_TXDONE;
break;
case SUME_RIFFA_CHAN_STATE_LEN:
break;
default:
device_printf(dev, "unknown TX state!\n");
}
loops++;
}
if ((vect & (SUME_MSI_TXBUF | SUME_MSI_TXDONE)) &&
send->recovery)
device_printf(dev, "ch %d ignoring vect = 0x%08x "
"during TX; not in recovery; state = %d loops = "
"%d\n", ch, vect, send->state, loops);
loops = 0;
while ((vect & (SUME_MSI_RXQUE | SUME_MSI_RXBUF |
SUME_MSI_RXDONE)) && loops < 5) {
if (adapter->sume_debug)
device_printf(dev, "RX ch %d state %u vect = "
"0x%08x\n", ch, recv->state, vect);
switch (recv->state) {
case SUME_RIFFA_CHAN_STATE_IDLE:
if (!(vect & SUME_MSI_RXQUE)) {
device_printf(dev, "ch %d unexpected "
"interrupt in recv+0 state %u: "
"vect = 0x%08x\n", ch, recv->state,
vect);
recv->recovery = 1;
break;
}
uint32_t max_ptr;
/* Clear recovery state. */
recv->recovery = 0;
/* Get offset and length. */
recv->offlast = read_reg(adapter,
RIFFA_CHNL_REG(ch,
RIFFA_TX_OFFLAST_REG_OFF));
recv->len = read_reg(adapter, RIFFA_CHNL_REG(ch,
RIFFA_TX_LEN_REG_OFF));
/* Boundary checks. */
max_ptr = (uint32_t)((uintptr_t)recv->buf_addr
+ SUME_RIFFA_OFFSET(recv->offlast)
+ SUME_RIFFA_LEN(recv->len) - 1);
if (max_ptr <
(uint32_t)((uintptr_t)recv->buf_addr))
device_printf(dev, "receive buffer "
"wrap-around overflow.\n");
if (SUME_RIFFA_OFFSET(recv->offlast) +
SUME_RIFFA_LEN(recv->len) >
adapter->sg_buf_size)
device_printf(dev, "receive buffer too"
" small.\n");
/* Fill the bouncebuf "descriptor". */
sume_fill_bb_desc(adapter, recv,
SUME_RIFFA_LEN(recv->len));
bus_dmamap_sync(recv->ch_tag, recv->ch_map,
BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
write_reg(adapter, RIFFA_CHNL_REG(ch,
RIFFA_TX_SG_ADDR_LO_REG_OFF),
SUME_RIFFA_LO_ADDR(recv->buf_hw_addr));
write_reg(adapter, RIFFA_CHNL_REG(ch,
RIFFA_TX_SG_ADDR_HI_REG_OFF),
SUME_RIFFA_HI_ADDR(recv->buf_hw_addr));
write_reg(adapter, RIFFA_CHNL_REG(ch,
RIFFA_TX_SG_LEN_REG_OFF),
4 * recv->num_sg);
bus_dmamap_sync(recv->ch_tag, recv->ch_map,
BUS_DMASYNC_POSTREAD |
BUS_DMASYNC_POSTWRITE);
recv->state = SUME_RIFFA_CHAN_STATE_READY;
vect &= ~SUME_MSI_RXQUE;
break;
case SUME_RIFFA_CHAN_STATE_READY:
if (!(vect & SUME_MSI_RXBUF)) {
device_printf(dev, "ch %d unexpected "
"interrupt in recv+1 state %u: "
"vect = 0x%08x\n", ch, recv->state,
vect);
recv->recovery = 1;
break;
}
recv->state = SUME_RIFFA_CHAN_STATE_READ;
vect &= ~SUME_MSI_RXBUF;
break;
case SUME_RIFFA_CHAN_STATE_READ:
if (!(vect & SUME_MSI_RXDONE)) {
device_printf(dev, "ch %d unexpected "
"interrupt in recv+2 state %u: "
"vect = 0x%08x\n", ch, recv->state,
vect);
recv->recovery = 1;
break;
}
len = read_reg(adapter, RIFFA_CHNL_REG(ch,
RIFFA_TX_TNFR_LEN_REG_OFF));
/* Remember, len and recv->len are words. */
if (ch == SUME_RIFFA_CHANNEL_DATA) {
m = sume_rx_build_mbuf(adapter,
len << 2);
recv->state =
SUME_RIFFA_CHAN_STATE_IDLE;
} else if (ch == SUME_RIFFA_CHANNEL_REG)
wakeup(&recv->event);
else {
device_printf(dev, "ch %d unexpected "
"interrupt in recv+2 state %u: "
"vect = 0x%08x\n", ch, recv->state,
vect);
recv->recovery = 1;
}
vect &= ~SUME_MSI_RXDONE;
break;
case SUME_RIFFA_CHAN_STATE_LEN:
break;
default:
device_printf(dev, "unknown RX state!\n");
}
loops++;
}
if ((vect & (SUME_MSI_RXQUE | SUME_MSI_RXBUF |
SUME_MSI_RXDONE)) && recv->recovery) {
device_printf(dev, "ch %d ignoring vect = 0x%08x "
"during RX; not in recovery; state = %d, loops = "
"%d\n", ch, vect, recv->state, loops);
/* Clean the unfinished transaction. */
if (ch == SUME_RIFFA_CHANNEL_REG &&
vect & SUME_MSI_RXDONE) {
read_reg(adapter, RIFFA_CHNL_REG(ch,
RIFFA_TX_TNFR_LEN_REG_OFF));
recv->recovery = 0;
}
}
}
SUME_UNLOCK(adapter);
if (m != NULL) {
ifp = m->m_pkthdr.rcvif;
if_input(ifp, m);
}
}
/*
* As we cannot disable interrupt generation, ignore early interrupts by waiting
* for the adapter to go into the 'running' state.
*/
static int
sume_intr_filter(void *arg)
{
struct sume_adapter *adapter = arg;
if (adapter->running == 0)
return (FILTER_STRAY);
return (FILTER_SCHEDULE_THREAD);
}
static int
sume_probe_riffa_pci(struct sume_adapter *adapter)
{
device_t dev = adapter->dev;
int error, count, capmem;
uint32_t reg, devctl, linkctl;
pci_enable_busmaster(dev);
adapter->rid = PCIR_BAR(0);
adapter->bar0_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&adapter->rid, RF_ACTIVE);
if (adapter->bar0_addr == NULL) {
device_printf(dev, "unable to allocate bus resource: "
"BAR0 address\n");
return (ENXIO);
}
adapter->bt = rman_get_bustag(adapter->bar0_addr);
adapter->bh = rman_get_bushandle(adapter->bar0_addr);
adapter->bar0_len = rman_get_size(adapter->bar0_addr);
if (adapter->bar0_len != 1024) {
device_printf(dev, "BAR0 resource length %lu != 1024\n",
adapter->bar0_len);
return (ENXIO);
}
count = pci_msi_count(dev);
error = pci_alloc_msi(dev, &count);
if (error) {
device_printf(dev, "unable to allocate bus resource: PCI "
"MSI\n");
return (error);
}
adapter->irq.rid = 1; /* Should be 1, thus says pci_alloc_msi() */
adapter->irq.res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&adapter->irq.rid, RF_SHAREABLE | RF_ACTIVE);
if (adapter->irq.res == NULL) {
device_printf(dev, "unable to allocate bus resource: IRQ "
"memory\n");
return (ENXIO);
}
error = bus_setup_intr(dev, adapter->irq.res, INTR_MPSAFE |
INTR_TYPE_NET, sume_intr_filter, sume_intr_handler, adapter,
&adapter->irq.tag);
if (error) {
device_printf(dev, "failed to setup interrupt for rid %d, name"
" %s: %d\n", adapter->irq.rid, "SUME_INTR", error);
return (ENXIO);
}
if (pci_find_cap(dev, PCIY_EXPRESS, &capmem) != 0) {
device_printf(dev, "PCI not PCIe capable\n");
return (ENXIO);
}
devctl = pci_read_config(dev, capmem + PCIER_DEVICE_CTL, 2);
pci_write_config(dev, capmem + PCIER_DEVICE_CTL, (devctl |
PCIEM_CTL_EXT_TAG_FIELD), 2);
devctl = pci_read_config(dev, capmem + PCIER_DEVICE_CTL2, 2);
pci_write_config(dev, capmem + PCIER_DEVICE_CTL2, (devctl |
PCIEM_CTL2_ID_ORDERED_REQ_EN), 2);
linkctl = pci_read_config(dev, capmem + PCIER_LINK_CTL, 2);
pci_write_config(dev, capmem + PCIER_LINK_CTL, (linkctl |
PCIEM_LINK_CTL_RCB), 2);
reg = read_reg(adapter, RIFFA_INFO_REG_OFF);
adapter->num_sg = RIFFA_SG_ELEMS * ((reg >> 19) & 0xf);
adapter->sg_buf_size = RIFFA_SG_BUF_SIZE * ((reg >> 19) & 0xf);
error = ENODEV;
/* Check bus master is enabled. */
if (((reg >> 4) & 0x1) != 1) {
device_printf(dev, "bus master not enabled: %d\n",
(reg >> 4) & 0x1);
return (error);
}
/* Check link parameters are valid. */
if (((reg >> 5) & 0x3f) == 0 || ((reg >> 11) & 0x3) == 0) {
device_printf(dev, "link parameters not valid: %d %d\n",
(reg >> 5) & 0x3f, (reg >> 11) & 0x3);
return (error);
}
/* Check # of channels are within valid range. */
if ((reg & 0xf) == 0 || (reg & 0xf) > RIFFA_MAX_CHNLS) {
device_printf(dev, "number of channels out of range: %d\n",
reg & 0xf);
return (error);
}
/* Check bus width. */
if (((reg >> 19) & 0xf) == 0 ||
((reg >> 19) & 0xf) > RIFFA_MAX_BUS_WIDTH_PARAM) {
device_printf(dev, "bus width out of range: %d\n",
(reg >> 19) & 0xf);
return (error);
}
device_printf(dev, "[riffa] # of channels: %d\n",
reg & 0xf);
device_printf(dev, "[riffa] bus interface width: %d\n",
((reg >> 19) & 0xf) << 5);
device_printf(dev, "[riffa] bus master enabled: %d\n",
(reg >> 4) & 0x1);
device_printf(dev, "[riffa] negotiated link width: %d\n",
(reg >> 5) & 0x3f);
device_printf(dev, "[riffa] negotiated rate width: %d MTs\n",
((reg >> 11) & 0x3) * 2500);
device_printf(dev, "[riffa] max downstream payload: %d B\n",
128 << ((reg >> 13) & 0x7));
device_printf(dev, "[riffa] max upstream payload: %d B\n",
128 << ((reg >> 16) & 0x7));
return (0);
}
/* If there is no sume_if_init, the ether_ioctl panics. */
static void
sume_if_init(void *sc)
{
}
/* Write the address and length for our incoming / outgoing transaction. */
static void
sume_fill_bb_desc(struct sume_adapter *adapter, struct riffa_chnl_dir *p,
uint64_t len)
{
struct nf_bb_desc *bouncebuf = (struct nf_bb_desc *) p->buf_addr;
bouncebuf->lower = (p->buf_hw_addr + sizeof(struct nf_bb_desc));
bouncebuf->upper = (p->buf_hw_addr + sizeof(struct nf_bb_desc)) >> 32;
bouncebuf->len = len >> 2;
}
/* Module register locked write. */
static int
sume_modreg_write_locked(struct sume_adapter *adapter)
{
struct riffa_chnl_dir *send = adapter->send[SUME_RIFFA_CHANNEL_REG];
/* Let the FPGA know about the transfer. */
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_REG,
RIFFA_RX_OFFLAST_REG_OFF), SUME_OFFLAST);
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_REG,
RIFFA_RX_LEN_REG_OFF), send->len); /* words */
/* Fill the bouncebuf "descriptor". */
sume_fill_bb_desc(adapter, send, SUME_RIFFA_LEN(send->len));
/* Update the state before intiating the DMA to avoid races. */
send->state = SUME_RIFFA_CHAN_STATE_READY;
bus_dmamap_sync(send->ch_tag, send->ch_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* DMA. */
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_REG,
RIFFA_RX_SG_ADDR_LO_REG_OFF),
SUME_RIFFA_LO_ADDR(send->buf_hw_addr));
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_REG,
RIFFA_RX_SG_ADDR_HI_REG_OFF),
SUME_RIFFA_HI_ADDR(send->buf_hw_addr));
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_REG,
RIFFA_RX_SG_LEN_REG_OFF), 4 * send->num_sg);
bus_dmamap_sync(send->ch_tag, send->ch_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
return (0);
}
/*
* Request a register read or write (depending on optype).
* If optype is set (0x1f) this will result in a register write,
* otherwise this will result in a register read request at the given
* address and the result will need to be DMAed back.
*/
static int
sume_module_reg_write(struct nf_priv *nf_priv, struct sume_ifreq *sifr,
uint32_t optype)
{
struct sume_adapter *adapter = nf_priv->adapter;
struct riffa_chnl_dir *send = adapter->send[SUME_RIFFA_CHANNEL_REG];
struct nf_regop_data *data;
int error;
/*
* 1. Make sure the channel is free; otherwise return EBUSY.
* 2. Prepare the memory in the bounce buffer (which we always
* use for regs).
* 3. Start the DMA process.
* 4. Sleep and wait for result and return success or error.
*/
SUME_LOCK(adapter);
if (send->state != SUME_RIFFA_CHAN_STATE_IDLE) {
SUME_UNLOCK(adapter);
return (EBUSY);
}
data = (struct nf_regop_data *) (send->buf_addr +
sizeof(struct nf_bb_desc));
data->addr = htole32(sifr->addr);
data->val = htole32(sifr->val);
/* Tag to indentify request. */
data->rtag = htole32(++send->rtag);
data->optype = htole32(optype);
send->len = sizeof(struct nf_regop_data) / 4; /* words */
error = sume_modreg_write_locked(adapter);
if (error) {
SUME_UNLOCK(adapter);
return (EFAULT);
}
/* Timeout after 1s. */
if (send->state != SUME_RIFFA_CHAN_STATE_LEN)
error = msleep(&send->event, &adapter->lock, 0,
"Waiting recv finish", 1 * hz);
/* This was a write so we are done; were interrupted, or timed out. */
if (optype != SUME_MR_READ || error != 0 || error == EWOULDBLOCK) {
send->state = SUME_RIFFA_CHAN_STATE_IDLE;
if (optype == SUME_MR_READ)
error = EWOULDBLOCK;
else
error = 0;
} else
error = 0;
/*
* For read requests we will update state once we are done
* having read the result to avoid any two outstanding
* transactions, or we need a queue and validate tags,
* which is a lot of work for a low priority, infrequent
* event.
*/
SUME_UNLOCK(adapter);
return (error);
}
/* Module register read. */
static int
sume_module_reg_read(struct nf_priv *nf_priv, struct sume_ifreq *sifr)
{
struct sume_adapter *adapter = nf_priv->adapter;
struct riffa_chnl_dir *recv = adapter->recv[SUME_RIFFA_CHANNEL_REG];
struct riffa_chnl_dir *send = adapter->send[SUME_RIFFA_CHANNEL_REG];
struct nf_regop_data *data;
int error = 0;
/*
* 0. Sleep waiting for result if needed (unless condition is
* true already).
* 1. Read DMA results.
* 2. Update state on *TX* to IDLE to allow next read to start.
*/
SUME_LOCK(adapter);
bus_dmamap_sync(recv->ch_tag, recv->ch_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* We only need to be woken up at the end of the transaction.
* Timeout after 1s.
*/
if (recv->state != SUME_RIFFA_CHAN_STATE_READ)
error = msleep(&recv->event, &adapter->lock, 0,
"Waiting transaction finish", 1 * hz);
if (recv->state != SUME_RIFFA_CHAN_STATE_READ || error == EWOULDBLOCK) {
SUME_UNLOCK(adapter);
device_printf(adapter->dev, "wait error: %d\n", error);
return (EWOULDBLOCK);
}
bus_dmamap_sync(recv->ch_tag, recv->ch_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Read reply data and validate address and tag.
* Note: we do access the send side without lock but the state
* machine does prevent the data from changing.
*/
data = (struct nf_regop_data *) (recv->buf_addr +
sizeof(struct nf_bb_desc));
if (le32toh(data->rtag) != send->rtag)
device_printf(adapter->dev, "rtag error: 0x%08x 0x%08x\n",
le32toh(data->rtag), send->rtag);
sifr->val = le32toh(data->val);
recv->state = SUME_RIFFA_CHAN_STATE_IDLE;
/* We are done. */
send->state = SUME_RIFFA_CHAN_STATE_IDLE;
SUME_UNLOCK(adapter);
return (0);
}
/* Read value from a module register and return it to a sume_ifreq. */
static int
get_modreg_value(struct nf_priv *nf_priv, struct sume_ifreq *sifr)
{
int error;
error = sume_module_reg_write(nf_priv, sifr, SUME_MR_READ);
if (!error)
error = sume_module_reg_read(nf_priv, sifr);
return (error);
}
static int
sume_if_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
{
struct ifreq *ifr = (struct ifreq *) data;
struct nf_priv *nf_priv = if_getsoftc(ifp);
struct sume_ifreq sifr;
int error = 0;
switch (cmd) {
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
error = ifmedia_ioctl(ifp, ifr, &nf_priv->media, cmd);
break;
case SUME_IOCTL_CMD_WRITE_REG:
error = copyin(ifr_data_get_ptr(ifr), &sifr, sizeof(sifr));
if (error) {
error = EINVAL;
break;
}
error = sume_module_reg_write(nf_priv, &sifr, SUME_MR_WRITE);
break;
case SUME_IOCTL_CMD_READ_REG:
error = copyin(ifr_data_get_ptr(ifr), &sifr, sizeof(sifr));
if (error) {
error = EINVAL;
break;
}
error = get_modreg_value(nf_priv, &sifr);
if (error)
break;
error = copyout(&sifr, ifr_data_get_ptr(ifr), sizeof(sifr));
if (error)
error = EINVAL;
break;
case SIOCSIFFLAGS:
/* Silence tcpdump 'promisc mode not supported' warning. */
if (if_getflags(ifp) & IFF_PROMISC)
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static int
sume_media_change(if_t ifp)
{
struct nf_priv *nf_priv = if_getsoftc(ifp);
struct ifmedia *ifm = &nf_priv->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10G_SR)
if_setbaudrate(ifp, ifmedia_baudrate(IFM_ETHER | IFM_10G_SR));
else
if_setbaudrate(ifp, ifmedia_baudrate(ifm->ifm_media));
return (0);
}
static void
sume_update_link_status(if_t ifp)
{
struct nf_priv *nf_priv = if_getsoftc(ifp);
struct sume_adapter *adapter = nf_priv->adapter;
struct sume_ifreq sifr;
int link_status;
sifr.addr = SUME_STATUS_ADDR(nf_priv->port);
sifr.val = 0;
if (get_modreg_value(nf_priv, &sifr))
return;
link_status = SUME_LINK_STATUS(sifr.val);
if (!link_status && nf_priv->link_up) {
if_link_state_change(ifp, LINK_STATE_DOWN);
nf_priv->link_up = 0;
if (adapter->sume_debug)
device_printf(adapter->dev, "port %d link state "
"changed to DOWN\n", nf_priv->unit);
} else if (link_status && !nf_priv->link_up) {
nf_priv->link_up = 1;
if_link_state_change(ifp, LINK_STATE_UP);
if (adapter->sume_debug)
device_printf(adapter->dev, "port %d link state "
"changed to UP\n", nf_priv->unit);
}
}
static void
sume_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct nf_priv *nf_priv = if_getsoftc(ifp);
struct ifmedia *ifm = &nf_priv->media;
if (ifm->ifm_cur->ifm_media == (IFM_ETHER | IFM_10G_SR) &&
(if_getflags(ifp) & IFF_UP))
ifmr->ifm_active = IFM_ETHER | IFM_10G_SR;
else
ifmr->ifm_active = ifm->ifm_cur->ifm_media;
ifmr->ifm_status |= IFM_AVALID;
sume_update_link_status(ifp);
if (nf_priv->link_up)
ifmr->ifm_status |= IFM_ACTIVE;
}
/*
* Packet to transmit. We take the packet data from the mbuf and copy it to the
* bouncebuffer address buf_addr+3*sizeof(uint32_t)+16. The 16 bytes before the
* packet data are for metadata: sport/dport (depending on our source
* interface), packet length and magic 0xcafe. We tell the SUME about the
* transfer, fill the first 3*sizeof(uint32_t) bytes of the bouncebuffer with
* the information about the start and length of the packet and trigger the
* transaction.
*/
static int
sume_if_start_locked(if_t ifp)
{
struct mbuf *m;
struct nf_priv *nf_priv = if_getsoftc(ifp);
struct sume_adapter *adapter = nf_priv->adapter;
struct riffa_chnl_dir *send = adapter->send[SUME_RIFFA_CHANNEL_DATA];
uint8_t *outbuf;
struct nf_metadata *mdata;
int plen = SUME_MIN_PKT_SIZE;
KASSERT(mtx_owned(&adapter->lock), ("SUME lock not owned"));
KASSERT(send->state == SUME_RIFFA_CHAN_STATE_IDLE,
("SUME not in IDLE state"));
m = if_dequeue(ifp);
if (m == NULL)
return (EINVAL);
/* Packets large enough do not need to be padded */
if (m->m_pkthdr.len > SUME_MIN_PKT_SIZE)
plen = m->m_pkthdr.len;
if (adapter->sume_debug)
device_printf(adapter->dev, "sending %d bytes to %s%d\n", plen,
SUME_ETH_DEVICE_NAME, nf_priv->unit);
outbuf = (uint8_t *) send->buf_addr + sizeof(struct nf_bb_desc);
mdata = (struct nf_metadata *) outbuf;
/* Clear the recovery flag. */
send->recovery = 0;
/* Make sure we fit with the 16 bytes nf_metadata. */
if (m->m_pkthdr.len + sizeof(struct nf_metadata) >
adapter->sg_buf_size) {
device_printf(adapter->dev, "packet too big for bounce buffer "
"(%d)\n", m->m_pkthdr.len);
m_freem(m);
nf_priv->stats.tx_dropped++;
return (ENOMEM);
}
bus_dmamap_sync(send->ch_tag, send->ch_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Zero out the padded data */
if (m->m_pkthdr.len < SUME_MIN_PKT_SIZE)
bzero(outbuf + sizeof(struct nf_metadata), SUME_MIN_PKT_SIZE);
/* Skip the first 16 bytes for the metadata. */
m_copydata(m, 0, m->m_pkthdr.len, outbuf + sizeof(struct nf_metadata));
send->len = (sizeof(struct nf_metadata) + plen + 3) / 4;
/* Fill in the metadata: CPU(DMA) ports are odd, MAC ports are even. */
mdata->sport = htole16(1 << (nf_priv->port * 2 + 1));
mdata->dport = htole16(1 << (nf_priv->port * 2));
mdata->plen = htole16(plen);
mdata->magic = htole16(SUME_RIFFA_MAGIC);
mdata->t1 = htole32(0);
mdata->t2 = htole32(0);
/* Let the FPGA know about the transfer. */
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_DATA,
RIFFA_RX_OFFLAST_REG_OFF), SUME_OFFLAST);
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_DATA,
RIFFA_RX_LEN_REG_OFF), send->len);
/* Fill the bouncebuf "descriptor". */
sume_fill_bb_desc(adapter, send, SUME_RIFFA_LEN(send->len));
/* Update the state before intiating the DMA to avoid races. */
send->state = SUME_RIFFA_CHAN_STATE_READY;
/* DMA. */
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_DATA,
RIFFA_RX_SG_ADDR_LO_REG_OFF),
SUME_RIFFA_LO_ADDR(send->buf_hw_addr));
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_DATA,
RIFFA_RX_SG_ADDR_HI_REG_OFF),
SUME_RIFFA_HI_ADDR(send->buf_hw_addr));
write_reg(adapter, RIFFA_CHNL_REG(SUME_RIFFA_CHANNEL_DATA,
RIFFA_RX_SG_LEN_REG_OFF), 4 * send->num_sg);
bus_dmamap_sync(send->ch_tag, send->ch_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
nf_priv->stats.tx_packets++;
nf_priv->stats.tx_bytes += plen;
/* We can free as long as we use the bounce buffer. */
m_freem(m);
adapter->last_ifc = nf_priv->port;
/* Reset watchdog counter. */
adapter->wd_counter = 0;
return (0);
}
static void
sume_if_start(if_t ifp)
{
struct nf_priv *nf_priv = if_getsoftc(ifp);
struct sume_adapter *adapter = nf_priv->adapter;
if (!adapter->running || !(if_getflags(ifp) & IFF_UP))
return;
SUME_LOCK(adapter);
if (adapter->send[SUME_RIFFA_CHANNEL_DATA]->state ==
SUME_RIFFA_CHAN_STATE_IDLE)
sume_if_start_locked(ifp);
SUME_UNLOCK(adapter);
}
/*
* We call this function at the end of every TX transaction to check for
* remaining packets in the TX queues for every UP interface.
*/
static void
check_tx_queues(struct sume_adapter *adapter)
{
int i, last_ifc;
KASSERT(mtx_owned(&adapter->lock), ("SUME lock not owned"));
last_ifc = adapter->last_ifc;
/* Check all interfaces */
for (i = last_ifc + 1; i < last_ifc + SUME_NPORTS + 1; i++) {
if_t ifp = adapter->ifp[i % SUME_NPORTS];
if (!(if_getflags(ifp) & IFF_UP))
continue;
if (!sume_if_start_locked(ifp))
break;
}
}
-static int
+static void
sume_ifp_alloc(struct sume_adapter *adapter, uint32_t port)
{
if_t ifp;
struct nf_priv *nf_priv = malloc(sizeof(struct nf_priv), M_SUME,
M_ZERO | M_WAITOK);
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(adapter->dev, "cannot allocate ifnet\n");
- return (ENOMEM);
- }
-
adapter->ifp[port] = ifp;
if_setsoftc(ifp, nf_priv);
nf_priv->adapter = adapter;
nf_priv->unit = alloc_unr(unr);
nf_priv->port = port;
nf_priv->link_up = 0;
if_initname(ifp, SUME_ETH_DEVICE_NAME, nf_priv->unit);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, sume_if_init);
if_setstartfn(ifp, sume_if_start);
if_setioctlfn(ifp, sume_if_ioctl);
uint8_t hw_addr[ETHER_ADDR_LEN] = DEFAULT_ETHER_ADDRESS;
hw_addr[ETHER_ADDR_LEN-1] = nf_priv->unit;
ether_ifattach(ifp, hw_addr);
ifmedia_init(&nf_priv->media, IFM_IMASK, sume_media_change,
sume_media_status);
ifmedia_add(&nf_priv->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
ifmedia_set(&nf_priv->media, IFM_ETHER | IFM_10G_SR);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
-
- return (0);
}
static void
callback_dma(void *arg, bus_dma_segment_t *segs, int nseg, int err)
{
if (err)
return;
KASSERT(nseg == 1, ("%d segments returned!", nseg));
*(bus_addr_t *) arg = segs[0].ds_addr;
}
static int
sume_probe_riffa_buffer(const struct sume_adapter *adapter,
struct riffa_chnl_dir ***p, const char *dir)
{
struct riffa_chnl_dir **rp;
bus_addr_t hw_addr;
int error, ch;
device_t dev = adapter->dev;
error = ENOMEM;
*p = malloc(SUME_RIFFA_CHANNELS * sizeof(struct riffa_chnl_dir *),
M_SUME, M_ZERO | M_WAITOK);
if (*p == NULL) {
device_printf(dev, "malloc(%s) failed.\n", dir);
return (error);
}
rp = *p;
/* Allocate the chnl_dir structs themselves. */
for (ch = 0; ch < SUME_RIFFA_CHANNELS; ch++) {
/* One direction. */
rp[ch] = malloc(sizeof(struct riffa_chnl_dir), M_SUME,
M_ZERO | M_WAITOK);
if (rp[ch] == NULL) {
device_printf(dev, "malloc(%s[%d]) riffa_chnl_dir "
"failed.\n", dir, ch);
return (error);
}
int err = bus_dma_tag_create(bus_get_dma_tag(dev),
4, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
adapter->sg_buf_size,
1,
adapter->sg_buf_size,
0,
NULL,
NULL,
&rp[ch]->ch_tag);
if (err) {
device_printf(dev, "bus_dma_tag_create(%s[%d]) "
"failed.\n", dir, ch);
return (err);
}
err = bus_dmamem_alloc(rp[ch]->ch_tag, (void **)
&rp[ch]->buf_addr, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
BUS_DMA_ZERO, &rp[ch]->ch_map);
if (err) {
device_printf(dev, "bus_dmamem_alloc(%s[%d]) failed.\n",
dir, ch);
return (err);
}
bzero(rp[ch]->buf_addr, adapter->sg_buf_size);
err = bus_dmamap_load(rp[ch]->ch_tag, rp[ch]->ch_map,
rp[ch]->buf_addr, adapter->sg_buf_size, callback_dma,
&hw_addr, BUS_DMA_NOWAIT);
if (err) {
device_printf(dev, "bus_dmamap_load(%s[%d]) failed.\n",
dir, ch);
return (err);
}
rp[ch]->buf_hw_addr = hw_addr;
rp[ch]->num_sg = 1;
rp[ch]->state = SUME_RIFFA_CHAN_STATE_IDLE;
rp[ch]->rtag = SUME_INIT_RTAG;
}
return (0);
}
static int
sume_probe_riffa_buffers(struct sume_adapter *adapter)
{
int error;
error = sume_probe_riffa_buffer(adapter, &adapter->recv, "recv");
if (error)
return (error);
error = sume_probe_riffa_buffer(adapter, &adapter->send, "send");
return (error);
}
static void
sume_sysctl_init(struct sume_adapter *adapter)
{
device_t dev = adapter->dev;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct sysctl_oid *tmp_tree;
char namebuf[MAX_IFC_NAME_LEN];
int i;
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "sume", CTLFLAG_RW,
0, "SUME top-level tree");
if (tree == NULL) {
device_printf(dev, "SYSCTL_ADD_NODE failed.\n");
return;
}
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug", CTLFLAG_RW,
&adapter->sume_debug, 0, "debug int leaf");
/* total RX error stats */
SYSCTL_ADD_U64(ctx, child, OID_AUTO, "rx_epkts",
CTLFLAG_RD, &adapter->packets_err, 0, "rx errors");
SYSCTL_ADD_U64(ctx, child, OID_AUTO, "rx_ebytes",
CTLFLAG_RD, &adapter->bytes_err, 0, "rx error bytes");
for (i = SUME_NPORTS - 1; i >= 0; i--) {
if_t ifp = adapter->ifp[i];
if (ifp == NULL)
continue;
struct nf_priv *nf_priv = if_getsoftc(ifp);
snprintf(namebuf, MAX_IFC_NAME_LEN, "%s%d",
SUME_ETH_DEVICE_NAME, nf_priv->unit);
tmp_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RW, 0, "SUME ifc tree");
if (tmp_tree == NULL) {
device_printf(dev, "SYSCTL_ADD_NODE failed.\n");
return;
}
/* Packets dropped by down interface. */
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"ifc_down_bytes", CTLFLAG_RD,
&nf_priv->stats.ifc_down_bytes, 0, "ifc_down bytes");
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"ifc_down_packets", CTLFLAG_RD,
&nf_priv->stats.ifc_down_packets, 0, "ifc_down packets");
/* HW RX stats */
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"hw_rx_packets", CTLFLAG_RD, &nf_priv->stats.hw_rx_packets,
0, "hw_rx packets");
/* HW TX stats */
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"hw_tx_packets", CTLFLAG_RD, &nf_priv->stats.hw_tx_packets,
0, "hw_tx packets");
/* RX stats */
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"rx_bytes", CTLFLAG_RD, &nf_priv->stats.rx_bytes, 0,
"rx bytes");
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"rx_dropped", CTLFLAG_RD, &nf_priv->stats.rx_dropped, 0,
"rx dropped");
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"rx_packets", CTLFLAG_RD, &nf_priv->stats.rx_packets, 0,
"rx packets");
/* TX stats */
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"tx_bytes", CTLFLAG_RD, &nf_priv->stats.tx_bytes, 0,
"tx bytes");
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"tx_dropped", CTLFLAG_RD, &nf_priv->stats.tx_dropped, 0,
"tx dropped");
SYSCTL_ADD_U64(ctx, SYSCTL_CHILDREN(tmp_tree), OID_AUTO,
"tx_packets", CTLFLAG_RD, &nf_priv->stats.tx_packets, 0,
"tx packets");
}
}
static void
sume_local_timer(void *arg)
{
struct sume_adapter *adapter = arg;
if (!adapter->running)
return;
taskqueue_enqueue(adapter->tq, &adapter->stat_task);
SUME_LOCK(adapter);
if (adapter->send[SUME_RIFFA_CHANNEL_DATA]->state !=
SUME_RIFFA_CHAN_STATE_IDLE && ++adapter->wd_counter >= 3) {
/* Resetting interfaces if stuck for 3 seconds. */
device_printf(adapter->dev, "TX stuck, resetting adapter.\n");
read_reg(adapter, RIFFA_INFO_REG_OFF);
adapter->send[SUME_RIFFA_CHANNEL_DATA]->state =
SUME_RIFFA_CHAN_STATE_IDLE;
adapter->wd_counter = 0;
check_tx_queues(adapter);
}
SUME_UNLOCK(adapter);
callout_reset(&adapter->timer, 1 * hz, sume_local_timer, adapter);
}
static void
sume_get_stats(void *context, int pending)
{
struct sume_adapter *adapter = context;
int i;
for (i = 0; i < SUME_NPORTS; i++) {
if_t ifp = adapter->ifp[i];
if (if_getflags(ifp) & IFF_UP) {
struct nf_priv *nf_priv = if_getsoftc(ifp);
struct sume_ifreq sifr;
sume_update_link_status(ifp);
/* Get RX counter. */
sifr.addr = SUME_STAT_RX_ADDR(nf_priv->port);
sifr.val = 0;
if (!get_modreg_value(nf_priv, &sifr))
nf_priv->stats.hw_rx_packets += sifr.val;
/* Get TX counter. */
sifr.addr = SUME_STAT_TX_ADDR(nf_priv->port);
sifr.val = 0;
if (!get_modreg_value(nf_priv, &sifr))
nf_priv->stats.hw_tx_packets += sifr.val;
}
}
}
static int
sume_attach(device_t dev)
{
struct sume_adapter *adapter = device_get_softc(dev);
adapter->dev = dev;
int error, i;
mtx_init(&adapter->lock, "Global lock", NULL, MTX_DEF);
adapter->running = 0;
/* OK finish up RIFFA. */
error = sume_probe_riffa_pci(adapter);
if (error != 0)
goto error;
error = sume_probe_riffa_buffers(adapter);
if (error != 0)
goto error;
/* Now do the network interfaces. */
- for (i = 0; i < SUME_NPORTS; i++) {
- error = sume_ifp_alloc(adapter, i);
- if (error != 0)
- goto error;
- }
+ for (i = 0; i < SUME_NPORTS; i++)
+ sume_ifp_alloc(adapter, i);
/* Register stats and register sysctls. */
sume_sysctl_init(adapter);
/* Reset the HW. */
read_reg(adapter, RIFFA_INFO_REG_OFF);
/* Ready to go, "enable" IRQ. */
adapter->running = 1;
callout_init(&adapter->timer, 1);
TASK_INIT(&adapter->stat_task, 0, sume_get_stats, adapter);
adapter->tq = taskqueue_create("sume_stats", M_NOWAIT,
taskqueue_thread_enqueue, &adapter->tq);
taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s stattaskq",
device_get_nameunit(adapter->dev));
callout_reset(&adapter->timer, 1 * hz, sume_local_timer, adapter);
return (0);
error:
sume_detach(dev);
return (error);
}
static void
sume_remove_riffa_buffer(const struct sume_adapter *adapter,
struct riffa_chnl_dir **pp)
{
int ch;
for (ch = 0; ch < SUME_RIFFA_CHANNELS; ch++) {
if (pp[ch] == NULL)
continue;
if (pp[ch]->buf_hw_addr != 0) {
bus_dmamem_free(pp[ch]->ch_tag, pp[ch]->buf_addr,
pp[ch]->ch_map);
pp[ch]->buf_hw_addr = 0;
}
free(pp[ch], M_SUME);
}
}
static void
sume_remove_riffa_buffers(struct sume_adapter *adapter)
{
if (adapter->send != NULL) {
sume_remove_riffa_buffer(adapter, adapter->send);
free(adapter->send, M_SUME);
adapter->send = NULL;
}
if (adapter->recv != NULL) {
sume_remove_riffa_buffer(adapter, adapter->recv);
free(adapter->recv, M_SUME);
adapter->recv = NULL;
}
}
static int
sume_detach(device_t dev)
{
struct sume_adapter *adapter = device_get_softc(dev);
int i;
struct nf_priv *nf_priv;
KASSERT(mtx_initialized(&adapter->lock), ("SUME mutex not "
"initialized"));
adapter->running = 0;
/* Drain the stats callout and task queue. */
callout_drain(&adapter->timer);
if (adapter->tq) {
taskqueue_drain(adapter->tq, &adapter->stat_task);
taskqueue_free(adapter->tq);
}
for (i = 0; i < SUME_NPORTS; i++) {
if_t ifp = adapter->ifp[i];
if (ifp == NULL)
continue;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nf_priv = if_getsoftc(ifp);
if (if_getflags(ifp) & IFF_UP)
if_down(ifp);
ifmedia_removeall(&nf_priv->media);
free_unr(unr, nf_priv->unit);
if_setflagbits(ifp, 0, IFF_UP);
ether_ifdetach(ifp);
if_free(ifp);
free(nf_priv, M_SUME);
}
sume_remove_riffa_buffers(adapter);
if (adapter->irq.tag)
bus_teardown_intr(dev, adapter->irq.res, adapter->irq.tag);
if (adapter->irq.res)
bus_release_resource(dev, SYS_RES_IRQ, adapter->irq.rid,
adapter->irq.res);
pci_release_msi(dev);
if (adapter->bar0_addr)
bus_release_resource(dev, SYS_RES_MEMORY, adapter->rid,
adapter->bar0_addr);
mtx_destroy(&adapter->lock);
return (0);
}
static int
mod_event(module_t mod, int cmd, void *arg)
{
switch (cmd) {
case MOD_LOAD:
unr = new_unrhdr(0, INT_MAX, NULL);
break;
case MOD_UNLOAD:
delete_unrhdr(unr);
break;
}
return (0);
}
DRIVER_MODULE(sume, pci, sume_driver, mod_event, NULL);
MODULE_VERSION(sume, 1);
diff --git a/sys/dev/ti/if_ti.c b/sys/dev/ti/if_ti.c
index 6f88862d8009..14f7d353303f 100644
--- a/sys/dev/ti/if_ti.c
+++ b/sys/dev/ti/if_ti.c
@@ -1,4042 +1,4037 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD.
* Manuals, sample driver and firmware source kits are available
* from http://www.alteon.com/support/openkits.
*
* Written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
/*
* The Alteon Networks Tigon chip contains an embedded R4000 CPU,
* gigabit MAC, dual DMA channels and a PCI interface unit. NICs
* using the Tigon may have anywhere from 512K to 2MB of SRAM. The
* Tigon supports hardware IP, TCP and UCP checksumming, multicast
* filtering and jumbo (9014 byte) frames. The hardware is largely
* controlled by firmware, which must be loaded into the NIC during
* initialization.
*
* The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware
* revision, which supports new features such as extended commands,
* extended jumbo receive ring descriptors and a mini receive ring.
*
* Alteon Networks is to be commended for releasing such a vast amount
* of development material for the Tigon NIC without requiring an NDA
* (although they really should have done it a long time ago). With
* any luck, the other vendors will finally wise up and follow Alteon's
* stellar example.
*
* The firmware for the Tigon 1 and 2 NICs is compiled directly into
* this driver by #including it as a C header file. This bloats the
* driver somewhat, but it's the easiest method considering that the
* driver code and firmware code need to be kept in sync. The source
* for the firmware is not provided with the FreeBSD distribution since
* compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3.
*
* The following people deserve special thanks:
* - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board
* for testing
* - Raymond Lee of Netgear, for providing a pair of Netgear
* GA620 Tigon 2 boards for testing
* - Ulf Zimmermann, for bringing the GA260 to my attention and
* convincing me to write this driver.
* - Andrew Gallatin for providing FreeBSD/Alpha support.
*/
#include <sys/cdefs.h>
#include "opt_ti.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/queue.h>
#include <sys/conf.h>
#include <sys/sf_buf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#ifdef TI_SF_BUF_JUMBO
#include <vm/vm.h>
#include <vm/vm_page.h>
#endif
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <sys/tiio.h>
#include <dev/ti/if_tireg.h>
#include <dev/ti/ti_fw.h>
#include <dev/ti/ti_fw2.h>
#include <sys/sysctl.h>
#define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
/*
* We can only turn on header splitting if we're using extended receive
* BDs.
*/
#if defined(TI_JUMBO_HDRSPLIT) && !defined(TI_SF_BUF_JUMBO)
#error "options TI_JUMBO_HDRSPLIT requires TI_SF_BUF_JUMBO"
#endif /* TI_JUMBO_HDRSPLIT && !TI_SF_BUF_JUMBO */
typedef enum {
TI_SWAP_HTON,
TI_SWAP_NTOH
} ti_swap_type;
/*
* Various supported device vendors/types and their names.
*/
static const struct ti_type ti_devs[] = {
{ ALT_VENDORID, ALT_DEVICEID_ACENIC,
"Alteon AceNIC 1000baseSX Gigabit Ethernet" },
{ ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER,
"Alteon AceNIC 1000baseT Gigabit Ethernet" },
{ TC_VENDORID, TC_DEVICEID_3C985,
"3Com 3c985-SX Gigabit Ethernet" },
{ NG_VENDORID, NG_DEVICEID_GA620,
"Netgear GA620 1000baseSX Gigabit Ethernet" },
{ NG_VENDORID, NG_DEVICEID_GA620T,
"Netgear GA620 1000baseT Gigabit Ethernet" },
{ SGI_VENDORID, SGI_DEVICEID_TIGON,
"Silicon Graphics Gigabit Ethernet" },
{ DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX,
"Farallon PN9000SX Gigabit Ethernet" },
{ 0, 0, NULL }
};
static d_open_t ti_open;
static d_close_t ti_close;
static d_ioctl_t ti_ioctl2;
static struct cdevsw ti_cdevsw = {
.d_version = D_VERSION,
.d_flags = 0,
.d_open = ti_open,
.d_close = ti_close,
.d_ioctl = ti_ioctl2,
.d_name = "ti",
};
static int ti_probe(device_t);
static int ti_attach(device_t);
static int ti_detach(device_t);
static void ti_txeof(struct ti_softc *);
static void ti_rxeof(struct ti_softc *);
static int ti_encap(struct ti_softc *, struct mbuf **);
static void ti_intr(void *);
static void ti_start(if_t);
static void ti_start_locked(if_t);
static int ti_ioctl(if_t, u_long, caddr_t);
static uint64_t ti_get_counter(if_t, ift_counter);
static void ti_init(void *);
static void ti_init_locked(void *);
static void ti_init2(struct ti_softc *);
static void ti_stop(struct ti_softc *);
static void ti_watchdog(void *);
static int ti_shutdown(device_t);
static int ti_ifmedia_upd(if_t);
static int ti_ifmedia_upd_locked(struct ti_softc *);
static void ti_ifmedia_sts(if_t, struct ifmediareq *);
static uint32_t ti_eeprom_putbyte(struct ti_softc *, int);
static uint8_t ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *);
static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int);
static u_int ti_add_mcast(void *, struct sockaddr_dl *, u_int);
static u_int ti_del_mcast(void *, struct sockaddr_dl *, u_int);
static void ti_setmulti(struct ti_softc *);
static void ti_mem_read(struct ti_softc *, uint32_t, uint32_t, void *);
static void ti_mem_write(struct ti_softc *, uint32_t, uint32_t, void *);
static void ti_mem_zero(struct ti_softc *, uint32_t, uint32_t);
static int ti_copy_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t, int,
int);
static int ti_copy_scratch(struct ti_softc *, uint32_t, uint32_t, caddr_t,
int, int, int);
static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type);
static void ti_loadfw(struct ti_softc *);
static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int);
static void ti_handle_events(struct ti_softc *);
static void ti_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int ti_dma_alloc(struct ti_softc *);
static void ti_dma_free(struct ti_softc *);
static int ti_dma_ring_alloc(struct ti_softc *, bus_size_t, bus_size_t,
bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
static void ti_dma_ring_free(struct ti_softc *, bus_dma_tag_t *, uint8_t **,
bus_dmamap_t, bus_addr_t *);
static int ti_newbuf_std(struct ti_softc *, int);
static int ti_newbuf_mini(struct ti_softc *, int);
static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
static int ti_init_rx_ring_std(struct ti_softc *);
static void ti_free_rx_ring_std(struct ti_softc *);
static int ti_init_rx_ring_jumbo(struct ti_softc *);
static void ti_free_rx_ring_jumbo(struct ti_softc *);
static int ti_init_rx_ring_mini(struct ti_softc *);
static void ti_free_rx_ring_mini(struct ti_softc *);
static void ti_free_tx_ring(struct ti_softc *);
static int ti_init_tx_ring(struct ti_softc *);
static void ti_discard_std(struct ti_softc *, int);
#ifndef TI_SF_BUF_JUMBO
static void ti_discard_jumbo(struct ti_softc *, int);
#endif
static void ti_discard_mini(struct ti_softc *, int);
static int ti_64bitslot_war(struct ti_softc *);
static int ti_chipinit(struct ti_softc *);
static int ti_gibinit(struct ti_softc *);
#ifdef TI_JUMBO_HDRSPLIT
static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len,
int idx);
#endif /* TI_JUMBO_HDRSPLIT */
static void ti_sysctl_node(struct ti_softc *);
static device_method_t ti_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ti_probe),
DEVMETHOD(device_attach, ti_attach),
DEVMETHOD(device_detach, ti_detach),
DEVMETHOD(device_shutdown, ti_shutdown),
{ 0, 0 }
};
static driver_t ti_driver = {
"ti",
ti_methods,
sizeof(struct ti_softc)
};
DRIVER_MODULE(ti, pci, ti_driver, 0, 0);
MODULE_DEPEND(ti, pci, 1, 1, 1);
MODULE_DEPEND(ti, ether, 1, 1, 1);
/*
* Send an instruction or address to the EEPROM, check for ACK.
*/
static uint32_t
ti_eeprom_putbyte(struct ti_softc *sc, int byte)
{
int i, ack = 0;
/*
* Make sure we're in TX mode.
*/
TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
/*
* Feed in each bit and stobe the clock.
*/
for (i = 0x80; i; i >>= 1) {
if (byte & i) {
TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
} else {
TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
}
DELAY(1);
TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
DELAY(1);
TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
}
/*
* Turn off TX mode.
*/
TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
/*
* Check for ack.
*/
TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN;
TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
return (ack);
}
/*
* Read a byte of data stored in the EEPROM at address 'addr.'
* We have to send two address bytes since the EEPROM can hold
* more than 256 bytes of data.
*/
static uint8_t
ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest)
{
int i;
uint8_t byte = 0;
EEPROM_START;
/*
* Send write control code to EEPROM.
*/
if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
device_printf(sc->ti_dev,
"failed to send write command, status: %x\n",
CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
return (1);
}
/*
* Send first byte of address of byte we want to read.
*/
if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) {
device_printf(sc->ti_dev, "failed to send address, status: %x\n",
CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
return (1);
}
/*
* Send second byte address of byte we want to read.
*/
if (ti_eeprom_putbyte(sc, addr & 0xFF)) {
device_printf(sc->ti_dev, "failed to send address, status: %x\n",
CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
return (1);
}
EEPROM_STOP;
EEPROM_START;
/*
* Send read control code to EEPROM.
*/
if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
device_printf(sc->ti_dev,
"failed to send read command, status: %x\n",
CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
return (1);
}
/*
* Start reading bits from EEPROM.
*/
TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
for (i = 0x80; i; i >>= 1) {
TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
DELAY(1);
if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN)
byte |= i;
TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
DELAY(1);
}
EEPROM_STOP;
/*
* No ACK generated for read, so just return byte.
*/
*dest = byte;
return (0);
}
/*
* Read a sequence of bytes from the EEPROM.
*/
static int
ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt)
{
int err = 0, i;
uint8_t byte = 0;
for (i = 0; i < cnt; i++) {
err = ti_eeprom_getbyte(sc, off + i, &byte);
if (err)
break;
*(dest + i) = byte;
}
return (err ? 1 : 0);
}
/*
* NIC memory read function.
* Can be used to copy data from NIC local memory.
*/
static void
ti_mem_read(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
{
int segptr, segsize, cnt;
char *ptr;
segptr = addr;
cnt = len;
ptr = buf;
while (cnt) {
if (cnt < TI_WINLEN)
segsize = cnt;
else
segsize = TI_WINLEN - (segptr % TI_WINLEN);
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
segsize / 4);
ptr += segsize;
segptr += segsize;
cnt -= segsize;
}
}
/*
* NIC memory write function.
* Can be used to copy data into NIC local memory.
*/
static void
ti_mem_write(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
{
int segptr, segsize, cnt;
char *ptr;
segptr = addr;
cnt = len;
ptr = buf;
while (cnt) {
if (cnt < TI_WINLEN)
segsize = cnt;
else
segsize = TI_WINLEN - (segptr % TI_WINLEN);
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
segsize / 4);
ptr += segsize;
segptr += segsize;
cnt -= segsize;
}
}
/*
* NIC memory read function.
* Can be used to clear a section of NIC local memory.
*/
static void
ti_mem_zero(struct ti_softc *sc, uint32_t addr, uint32_t len)
{
int segptr, segsize, cnt;
segptr = addr;
cnt = len;
while (cnt) {
if (cnt < TI_WINLEN)
segsize = cnt;
else
segsize = TI_WINLEN - (segptr % TI_WINLEN);
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4);
segptr += segsize;
cnt -= segsize;
}
}
static int
ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
caddr_t buf, int useraddr, int readdata)
{
int segptr, segsize, cnt;
caddr_t ptr;
uint32_t origwin;
int error, resid, segresid;
int first_pass;
TI_LOCK_ASSERT(sc);
error = 0;
/*
* At the moment, we don't handle non-aligned cases, we just bail.
* If this proves to be a problem, it will be fixed.
*/
if (readdata == 0 && (tigon_addr & 0x3) != 0) {
device_printf(sc->ti_dev, "%s: tigon address %#x isn't "
"word-aligned\n", __func__, tigon_addr);
device_printf(sc->ti_dev, "%s: unaligned writes aren't "
"yet supported\n", __func__);
return (EINVAL);
}
segptr = tigon_addr & ~0x3;
segresid = tigon_addr - segptr;
/*
* This is the non-aligned amount left over that we'll need to
* copy.
*/
resid = len & 0x3;
/* Add in the left over amount at the front of the buffer */
resid += segresid;
cnt = len & ~0x3;
/*
* If resid + segresid is >= 4, add multiples of 4 to the count and
* decrease the residual by that much.
*/
cnt += resid & ~0x3;
resid -= resid & ~0x3;
ptr = buf;
first_pass = 1;
/*
* Save the old window base value.
*/
origwin = CSR_READ_4(sc, TI_WINBASE);
while (cnt != 0 && error == 0) {
bus_size_t ti_offset;
if (cnt < TI_WINLEN)
segsize = cnt;
else
segsize = TI_WINLEN - (segptr % TI_WINLEN);
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1));
if (readdata) {
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2);
if (useraddr) {
/*
* Yeah, this is a little on the kludgy
* side, but at least this code is only
* used for debugging.
*/
ti_bcopy_swap(sc->ti_membuf, sc->ti_membuf2,
segsize, TI_SWAP_NTOH);
TI_UNLOCK(sc);
if (first_pass) {
error = copyout(
&sc->ti_membuf2[segresid], ptr,
segsize - segresid);
first_pass = 0;
} else
error = copyout(sc->ti_membuf2, ptr,
segsize);
TI_LOCK(sc);
} else {
if (first_pass) {
ti_bcopy_swap(sc->ti_membuf,
sc->ti_membuf2, segsize,
TI_SWAP_NTOH);
TI_UNLOCK(sc);
bcopy(&sc->ti_membuf2[segresid], ptr,
segsize - segresid);
TI_LOCK(sc);
first_pass = 0;
} else
ti_bcopy_swap(sc->ti_membuf, ptr,
segsize, TI_SWAP_NTOH);
}
} else {
if (useraddr) {
TI_UNLOCK(sc);
error = copyin(ptr, sc->ti_membuf2, segsize);
TI_LOCK(sc);
ti_bcopy_swap(sc->ti_membuf2, sc->ti_membuf,
segsize, TI_SWAP_HTON);
} else
ti_bcopy_swap(ptr, sc->ti_membuf, segsize,
TI_SWAP_HTON);
if (error == 0) {
bus_space_write_region_4(sc->ti_btag,
sc->ti_bhandle, ti_offset,
(uint32_t *)sc->ti_membuf, segsize >> 2);
}
}
segptr += segsize;
ptr += segsize;
cnt -= segsize;
}
/*
* Handle leftover, non-word-aligned bytes.
*/
if (resid != 0 && error == 0) {
uint32_t tmpval, tmpval2;
bus_size_t ti_offset;
/*
* Set the segment pointer.
*/
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1));
/*
* First, grab whatever is in our source/destination.
* We'll obviously need this for reads, but also for
* writes, since we'll be doing read/modify/write.
*/
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
ti_offset, &tmpval, 1);
/*
* Next, translate this from little-endian to big-endian
* (at least on i386 boxes).
*/
tmpval2 = ntohl(tmpval);
if (readdata) {
/*
* If we're reading, just copy the leftover number
* of bytes from the host byte order buffer to
* the user's buffer.
*/
if (useraddr) {
TI_UNLOCK(sc);
error = copyout(&tmpval2, ptr, resid);
TI_LOCK(sc);
} else
bcopy(&tmpval2, ptr, resid);
} else {
/*
* If we're writing, first copy the bytes to be
* written into the network byte order buffer,
* leaving the rest of the buffer with whatever was
* originally in there. Then, swap the bytes
* around into host order and write them out.
*
* XXX KDM the read side of this has been verified
* to work, but the write side of it has not been
* verified. So user beware.
*/
if (useraddr) {
TI_UNLOCK(sc);
error = copyin(ptr, &tmpval2, resid);
TI_LOCK(sc);
} else
bcopy(ptr, &tmpval2, resid);
if (error == 0) {
tmpval = htonl(tmpval2);
bus_space_write_region_4(sc->ti_btag,
sc->ti_bhandle, ti_offset, &tmpval, 1);
}
}
}
CSR_WRITE_4(sc, TI_WINBASE, origwin);
return (error);
}
static int
ti_copy_scratch(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
caddr_t buf, int useraddr, int readdata, int cpu)
{
uint32_t segptr;
int cnt, error;
uint32_t tmpval, tmpval2;
caddr_t ptr;
TI_LOCK_ASSERT(sc);
/*
* At the moment, we don't handle non-aligned cases, we just bail.
* If this proves to be a problem, it will be fixed.
*/
if (tigon_addr & 0x3) {
device_printf(sc->ti_dev, "%s: tigon address %#x "
"isn't word-aligned\n", __func__, tigon_addr);
return (EINVAL);
}
if (len & 0x3) {
device_printf(sc->ti_dev, "%s: transfer length %d "
"isn't word-aligned\n", __func__, len);
return (EINVAL);
}
segptr = tigon_addr;
cnt = len;
ptr = buf;
while (cnt && error == 0) {
CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr);
if (readdata) {
tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu));
tmpval = ntohl(tmpval2);
/*
* Note: I've used this debugging interface
* extensively with Alteon's 12.3.15 firmware,
* compiled with GCC 2.7.2.1 and binutils 2.9.1.
*
* When you compile the firmware without
* optimization, which is necessary sometimes in
* order to properly step through it, you sometimes
* read out a bogus value of 0xc0017c instead of
* whatever was supposed to be in that scratchpad
* location. That value is on the stack somewhere,
* but I've never been able to figure out what was
* causing the problem.
*
* The address seems to pop up in random places,
* often not in the same place on two subsequent
* reads.
*
* In any case, the underlying data doesn't seem
* to be affected, just the value read out.
*
* KDM, 3/7/2000
*/
if (tmpval2 == 0xc0017c)
device_printf(sc->ti_dev, "found 0xc0017c at "
"%#x (tmpval2)\n", segptr);
if (tmpval == 0xc0017c)
device_printf(sc->ti_dev, "found 0xc0017c at "
"%#x (tmpval)\n", segptr);
if (useraddr)
error = copyout(&tmpval, ptr, 4);
else
bcopy(&tmpval, ptr, 4);
} else {
if (useraddr)
error = copyin(ptr, &tmpval2, 4);
else
bcopy(ptr, &tmpval2, 4);
if (error == 0) {
tmpval = htonl(tmpval2);
CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu),
tmpval);
}
}
cnt -= 4;
segptr += 4;
ptr += 4;
}
return (error);
}
static int
ti_bcopy_swap(const void *src, void *dst, size_t len, ti_swap_type swap_type)
{
const uint8_t *tmpsrc;
uint8_t *tmpdst;
size_t tmplen;
if (len & 0x3) {
printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", len);
return (-1);
}
tmpsrc = src;
tmpdst = dst;
tmplen = len;
while (tmplen) {
if (swap_type == TI_SWAP_NTOH)
*(uint32_t *)tmpdst = ntohl(*(const uint32_t *)tmpsrc);
else
*(uint32_t *)tmpdst = htonl(*(const uint32_t *)tmpsrc);
tmpsrc += 4;
tmpdst += 4;
tmplen -= 4;
}
return (0);
}
/*
* Load firmware image into the NIC. Check that the firmware revision
* is acceptable and see if we want the firmware for the Tigon 1 or
* Tigon 2.
*/
static void
ti_loadfw(struct ti_softc *sc)
{
TI_LOCK_ASSERT(sc);
switch (sc->ti_hwrev) {
case TI_HWREV_TIGON:
if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR ||
tigonFwReleaseMinor != TI_FIRMWARE_MINOR ||
tigonFwReleaseFix != TI_FIRMWARE_FIX) {
device_printf(sc->ti_dev, "firmware revision mismatch; "
"want %d.%d.%d, got %d.%d.%d\n",
TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
TI_FIRMWARE_FIX, tigonFwReleaseMajor,
tigonFwReleaseMinor, tigonFwReleaseFix);
return;
}
ti_mem_write(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText);
ti_mem_write(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData);
ti_mem_write(sc, tigonFwRodataAddr, tigonFwRodataLen,
tigonFwRodata);
ti_mem_zero(sc, tigonFwBssAddr, tigonFwBssLen);
ti_mem_zero(sc, tigonFwSbssAddr, tigonFwSbssLen);
CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr);
break;
case TI_HWREV_TIGON_II:
if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR ||
tigon2FwReleaseMinor != TI_FIRMWARE_MINOR ||
tigon2FwReleaseFix != TI_FIRMWARE_FIX) {
device_printf(sc->ti_dev, "firmware revision mismatch; "
"want %d.%d.%d, got %d.%d.%d\n",
TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
TI_FIRMWARE_FIX, tigon2FwReleaseMajor,
tigon2FwReleaseMinor, tigon2FwReleaseFix);
return;
}
ti_mem_write(sc, tigon2FwTextAddr, tigon2FwTextLen,
tigon2FwText);
ti_mem_write(sc, tigon2FwDataAddr, tigon2FwDataLen,
tigon2FwData);
ti_mem_write(sc, tigon2FwRodataAddr, tigon2FwRodataLen,
tigon2FwRodata);
ti_mem_zero(sc, tigon2FwBssAddr, tigon2FwBssLen);
ti_mem_zero(sc, tigon2FwSbssAddr, tigon2FwSbssLen);
CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr);
break;
default:
device_printf(sc->ti_dev,
"can't load firmware: unknown hardware rev\n");
break;
}
}
/*
* Send the NIC a command via the command ring.
*/
static void
ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
{
int index;
index = sc->ti_cmd_saved_prodidx;
CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
TI_INC(index, TI_CMD_RING_CNT);
CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
sc->ti_cmd_saved_prodidx = index;
}
/*
* Send the NIC an extended command. The 'len' parameter specifies the
* number of command slots to include after the initial command.
*/
static void
ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len)
{
int index;
int i;
index = sc->ti_cmd_saved_prodidx;
CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
TI_INC(index, TI_CMD_RING_CNT);
for (i = 0; i < len; i++) {
CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
*(uint32_t *)(&arg[i * 4]));
TI_INC(index, TI_CMD_RING_CNT);
}
CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
sc->ti_cmd_saved_prodidx = index;
}
/*
* Handle events that have triggered interrupts.
*/
static void
ti_handle_events(struct ti_softc *sc)
{
struct ti_event_desc *e;
if (sc->ti_rdata.ti_event_ring == NULL)
return;
bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_POSTREAD);
while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
e = &sc->ti_rdata.ti_event_ring[sc->ti_ev_saved_considx];
switch (TI_EVENT_EVENT(e)) {
case TI_EV_LINKSTAT_CHANGED:
sc->ti_linkstat = TI_EVENT_CODE(e);
if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
if_setbaudrate(sc->ti_ifp, IF_Mbps(100));
if (bootverbose)
device_printf(sc->ti_dev,
"10/100 link up\n");
} else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
if_setbaudrate(sc->ti_ifp, IF_Gbps(1UL));
if (bootverbose)
device_printf(sc->ti_dev,
"gigabit link up\n");
} else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
if_link_state_change(sc->ti_ifp,
LINK_STATE_DOWN);
if_setbaudrate(sc->ti_ifp, 0);
if (bootverbose)
device_printf(sc->ti_dev,
"link down\n");
}
break;
case TI_EV_ERROR:
if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
device_printf(sc->ti_dev, "invalid command\n");
else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD)
device_printf(sc->ti_dev, "unknown command\n");
else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG)
device_printf(sc->ti_dev, "bad config data\n");
break;
case TI_EV_FIRMWARE_UP:
ti_init2(sc);
break;
case TI_EV_STATS_UPDATED:
case TI_EV_RESET_JUMBO_RING:
case TI_EV_MCAST_UPDATED:
/* Who cares. */
break;
default:
device_printf(sc->ti_dev, "unknown event: %d\n",
TI_EVENT_EVENT(e));
break;
}
/* Advance the consumer index. */
TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
}
bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_PREREAD);
}
struct ti_dmamap_arg {
bus_addr_t ti_busaddr;
};
static void
ti_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct ti_dmamap_arg *ctx;
if (error)
return;
KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
ctx = arg;
ctx->ti_busaddr = segs->ds_addr;
}
static int
ti_dma_ring_alloc(struct ti_softc *sc, bus_size_t alignment, bus_size_t maxsize,
bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
const char *msg)
{
struct ti_dmamap_arg ctx;
int error;
error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag,
alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
if (error != 0) {
device_printf(sc->ti_dev,
"could not create %s dma tag\n", msg);
return (error);
}
/* Allocate DMA'able memory for ring. */
error = bus_dmamem_alloc(*tag, (void **)ring,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
if (error != 0) {
device_printf(sc->ti_dev,
"could not allocate DMA'able memory for %s\n", msg);
return (error);
}
/* Load the address of the ring. */
ctx.ti_busaddr = 0;
error = bus_dmamap_load(*tag, *map, *ring, maxsize, ti_dma_map_addr,
&ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->ti_dev,
"could not load DMA'able memory for %s\n", msg);
return (error);
}
*paddr = ctx.ti_busaddr;
return (0);
}
static void
ti_dma_ring_free(struct ti_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
bus_dmamap_t map, bus_addr_t *paddr)
{
if (*paddr != 0) {
bus_dmamap_unload(*tag, map);
*paddr = 0;
}
if (*ring != NULL) {
bus_dmamem_free(*tag, *ring, map);
*ring = NULL;
}
if (*tag) {
bus_dma_tag_destroy(*tag);
*tag = NULL;
}
}
static int
ti_dma_alloc(struct ti_softc *sc)
{
bus_addr_t lowaddr;
int i, error;
lowaddr = BUS_SPACE_MAXADDR;
if (sc->ti_dac == 0)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
error = bus_dma_tag_create(bus_get_dma_tag(sc->ti_dev), 1, 0, lowaddr,
BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
&sc->ti_cdata.ti_parent_tag);
if (error != 0) {
device_printf(sc->ti_dev,
"could not allocate parent dma tag\n");
return (ENOMEM);
}
error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_gib),
&sc->ti_cdata.ti_gib_tag, (uint8_t **)&sc->ti_rdata.ti_info,
&sc->ti_cdata.ti_gib_map, &sc->ti_rdata.ti_info_paddr, "GIB");
if (error)
return (error);
/* Producer/consumer status */
error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_status),
&sc->ti_cdata.ti_status_tag, (uint8_t **)&sc->ti_rdata.ti_status,
&sc->ti_cdata.ti_status_map, &sc->ti_rdata.ti_status_paddr,
"event ring");
if (error)
return (error);
/* Event ring */
error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_EVENT_RING_SZ,
&sc->ti_cdata.ti_event_ring_tag,
(uint8_t **)&sc->ti_rdata.ti_event_ring,
&sc->ti_cdata.ti_event_ring_map, &sc->ti_rdata.ti_event_ring_paddr,
"event ring");
if (error)
return (error);
/* Command ring lives in shared memory so no need to create DMA area. */
/* Standard RX ring */
error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_STD_RX_RING_SZ,
&sc->ti_cdata.ti_rx_std_ring_tag,
(uint8_t **)&sc->ti_rdata.ti_rx_std_ring,
&sc->ti_cdata.ti_rx_std_ring_map,
&sc->ti_rdata.ti_rx_std_ring_paddr, "RX ring");
if (error)
return (error);
/* Jumbo RX ring */
error = ti_dma_ring_alloc(sc, TI_JUMBO_RING_ALIGN, TI_JUMBO_RX_RING_SZ,
&sc->ti_cdata.ti_rx_jumbo_ring_tag,
(uint8_t **)&sc->ti_rdata.ti_rx_jumbo_ring,
&sc->ti_cdata.ti_rx_jumbo_ring_map,
&sc->ti_rdata.ti_rx_jumbo_ring_paddr, "jumbo RX ring");
if (error)
return (error);
/* RX return ring */
error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_RX_RETURN_RING_SZ,
&sc->ti_cdata.ti_rx_return_ring_tag,
(uint8_t **)&sc->ti_rdata.ti_rx_return_ring,
&sc->ti_cdata.ti_rx_return_ring_map,
&sc->ti_rdata.ti_rx_return_ring_paddr, "RX return ring");
if (error)
return (error);
/* Create DMA tag for standard RX mbufs. */
error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
MCLBYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_std_tag);
if (error) {
device_printf(sc->ti_dev, "could not allocate RX dma tag\n");
return (error);
}
/* Create DMA tag for jumbo RX mbufs. */
#ifdef TI_SF_BUF_JUMBO
/*
* The VM system will take care of providing aligned pages. Alignment
* is set to 1 here so that busdma resources won't be wasted.
*/
error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE * 4, 4,
PAGE_SIZE, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag);
#else
error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1,
MJUM9BYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag);
#endif
if (error) {
device_printf(sc->ti_dev,
"could not allocate jumbo RX dma tag\n");
return (error);
}
/* Create DMA tag for TX mbufs. */
error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1,
0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * TI_MAXTXSEGS, TI_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
&sc->ti_cdata.ti_tx_tag);
if (error) {
device_printf(sc->ti_dev, "could not allocate TX dma tag\n");
return (ENOMEM);
}
/* Create DMA maps for RX buffers. */
for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0,
&sc->ti_cdata.ti_rx_std_maps[i]);
if (error) {
device_printf(sc->ti_dev,
"could not create DMA map for RX\n");
return (error);
}
}
error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0,
&sc->ti_cdata.ti_rx_std_sparemap);
if (error) {
device_printf(sc->ti_dev,
"could not create spare DMA map for RX\n");
return (error);
}
/* Create DMA maps for jumbo RX buffers. */
for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0,
&sc->ti_cdata.ti_rx_jumbo_maps[i]);
if (error) {
device_printf(sc->ti_dev,
"could not create DMA map for jumbo RX\n");
return (error);
}
}
error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0,
&sc->ti_cdata.ti_rx_jumbo_sparemap);
if (error) {
device_printf(sc->ti_dev,
"could not create spare DMA map for jumbo RX\n");
return (error);
}
/* Create DMA maps for TX buffers. */
for (i = 0; i < TI_TX_RING_CNT; i++) {
error = bus_dmamap_create(sc->ti_cdata.ti_tx_tag, 0,
&sc->ti_cdata.ti_txdesc[i].tx_dmamap);
if (error) {
device_printf(sc->ti_dev,
"could not create DMA map for TX\n");
return (ENOMEM);
}
}
/* Mini ring and TX ring is not available on Tigon 1. */
if (sc->ti_hwrev == TI_HWREV_TIGON)
return (0);
/* TX ring */
error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_TX_RING_SZ,
&sc->ti_cdata.ti_tx_ring_tag, (uint8_t **)&sc->ti_rdata.ti_tx_ring,
&sc->ti_cdata.ti_tx_ring_map, &sc->ti_rdata.ti_tx_ring_paddr,
"TX ring");
if (error)
return (error);
/* Mini RX ring */
error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_MINI_RX_RING_SZ,
&sc->ti_cdata.ti_rx_mini_ring_tag,
(uint8_t **)&sc->ti_rdata.ti_rx_mini_ring,
&sc->ti_cdata.ti_rx_mini_ring_map,
&sc->ti_rdata.ti_rx_mini_ring_paddr, "mini RX ring");
if (error)
return (error);
/* Create DMA tag for mini RX mbufs. */
error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
MHLEN, 0, NULL, NULL, &sc->ti_cdata.ti_rx_mini_tag);
if (error) {
device_printf(sc->ti_dev,
"could not allocate mini RX dma tag\n");
return (error);
}
/* Create DMA maps for mini RX buffers. */
for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0,
&sc->ti_cdata.ti_rx_mini_maps[i]);
if (error) {
device_printf(sc->ti_dev,
"could not create DMA map for mini RX\n");
return (error);
}
}
error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0,
&sc->ti_cdata.ti_rx_mini_sparemap);
if (error) {
device_printf(sc->ti_dev,
"could not create spare DMA map for mini RX\n");
return (error);
}
return (0);
}
static void
ti_dma_free(struct ti_softc *sc)
{
int i;
/* Destroy DMA maps for RX buffers. */
for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_std_maps[i]) {
bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag,
sc->ti_cdata.ti_rx_std_maps[i]);
sc->ti_cdata.ti_rx_std_maps[i] = NULL;
}
}
if (sc->ti_cdata.ti_rx_std_sparemap) {
bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag,
sc->ti_cdata.ti_rx_std_sparemap);
sc->ti_cdata.ti_rx_std_sparemap = NULL;
}
if (sc->ti_cdata.ti_rx_std_tag) {
bus_dma_tag_destroy(sc->ti_cdata.ti_rx_std_tag);
sc->ti_cdata.ti_rx_std_tag = NULL;
}
/* Destroy DMA maps for jumbo RX buffers. */
for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_jumbo_maps[i]) {
bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag,
sc->ti_cdata.ti_rx_jumbo_maps[i]);
sc->ti_cdata.ti_rx_jumbo_maps[i] = NULL;
}
}
if (sc->ti_cdata.ti_rx_jumbo_sparemap) {
bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag,
sc->ti_cdata.ti_rx_jumbo_sparemap);
sc->ti_cdata.ti_rx_jumbo_sparemap = NULL;
}
if (sc->ti_cdata.ti_rx_jumbo_tag) {
bus_dma_tag_destroy(sc->ti_cdata.ti_rx_jumbo_tag);
sc->ti_cdata.ti_rx_jumbo_tag = NULL;
}
/* Destroy DMA maps for mini RX buffers. */
for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_mini_maps[i]) {
bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag,
sc->ti_cdata.ti_rx_mini_maps[i]);
sc->ti_cdata.ti_rx_mini_maps[i] = NULL;
}
}
if (sc->ti_cdata.ti_rx_mini_sparemap) {
bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag,
sc->ti_cdata.ti_rx_mini_sparemap);
sc->ti_cdata.ti_rx_mini_sparemap = NULL;
}
if (sc->ti_cdata.ti_rx_mini_tag) {
bus_dma_tag_destroy(sc->ti_cdata.ti_rx_mini_tag);
sc->ti_cdata.ti_rx_mini_tag = NULL;
}
/* Destroy DMA maps for TX buffers. */
for (i = 0; i < TI_TX_RING_CNT; i++) {
if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) {
bus_dmamap_destroy(sc->ti_cdata.ti_tx_tag,
sc->ti_cdata.ti_txdesc[i].tx_dmamap);
sc->ti_cdata.ti_txdesc[i].tx_dmamap = NULL;
}
}
if (sc->ti_cdata.ti_tx_tag) {
bus_dma_tag_destroy(sc->ti_cdata.ti_tx_tag);
sc->ti_cdata.ti_tx_tag = NULL;
}
/* Destroy standard RX ring. */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_std_ring_tag,
(void *)&sc->ti_rdata.ti_rx_std_ring,
sc->ti_cdata.ti_rx_std_ring_map,
&sc->ti_rdata.ti_rx_std_ring_paddr);
/* Destroy jumbo RX ring. */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_jumbo_ring_tag,
(void *)&sc->ti_rdata.ti_rx_jumbo_ring,
sc->ti_cdata.ti_rx_jumbo_ring_map,
&sc->ti_rdata.ti_rx_jumbo_ring_paddr);
/* Destroy mini RX ring. */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_mini_ring_tag,
(void *)&sc->ti_rdata.ti_rx_mini_ring,
sc->ti_cdata.ti_rx_mini_ring_map,
&sc->ti_rdata.ti_rx_mini_ring_paddr);
/* Destroy RX return ring. */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_return_ring_tag,
(void *)&sc->ti_rdata.ti_rx_return_ring,
sc->ti_cdata.ti_rx_return_ring_map,
&sc->ti_rdata.ti_rx_return_ring_paddr);
/* Destroy TX ring. */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_tx_ring_tag,
(void *)&sc->ti_rdata.ti_tx_ring, sc->ti_cdata.ti_tx_ring_map,
&sc->ti_rdata.ti_tx_ring_paddr);
/* Destroy status block. */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_status_tag,
(void *)&sc->ti_rdata.ti_status, sc->ti_cdata.ti_status_map,
&sc->ti_rdata.ti_status_paddr);
/* Destroy event ring. */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_event_ring_tag,
(void *)&sc->ti_rdata.ti_event_ring,
sc->ti_cdata.ti_event_ring_map, &sc->ti_rdata.ti_event_ring_paddr);
/* Destroy GIB */
ti_dma_ring_free(sc, &sc->ti_cdata.ti_gib_tag,
(void *)&sc->ti_rdata.ti_info, sc->ti_cdata.ti_gib_map,
&sc->ti_rdata.ti_info_paddr);
/* Destroy the parent tag. */
if (sc->ti_cdata.ti_parent_tag) {
bus_dma_tag_destroy(sc->ti_cdata.ti_parent_tag);
sc->ti_cdata.ti_parent_tag = NULL;
}
}
/*
* Intialize a standard receive ring descriptor.
*/
static int
ti_newbuf_std(struct ti_softc *sc, int i)
{
bus_dmamap_t map;
bus_dma_segment_t segs[1];
struct mbuf *m;
struct ti_rx_desc *r;
int error, nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_std_tag,
sc->ti_cdata.ti_rx_std_sparemap, m, segs, &nsegs, 0);
if (error != 0) {
m_freem(m);
return (error);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag,
sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag,
sc->ti_cdata.ti_rx_std_maps[i]);
}
map = sc->ti_cdata.ti_rx_std_maps[i];
sc->ti_cdata.ti_rx_std_maps[i] = sc->ti_cdata.ti_rx_std_sparemap;
sc->ti_cdata.ti_rx_std_sparemap = map;
sc->ti_cdata.ti_rx_std_chain[i] = m;
r = &sc->ti_rdata.ti_rx_std_ring[i];
ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
r->ti_len = segs[0].ds_len;
r->ti_type = TI_BDTYPE_RECV_BD;
r->ti_flags = 0;
r->ti_vlan_tag = 0;
r->ti_tcp_udp_cksum = 0;
if (if_getcapenable(sc->ti_ifp) & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag,
sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_PREREAD);
return (0);
}
/*
* Intialize a mini receive ring descriptor. This only applies to
* the Tigon 2.
*/
static int
ti_newbuf_mini(struct ti_softc *sc, int i)
{
bus_dmamap_t map;
bus_dma_segment_t segs[1];
struct mbuf *m;
struct ti_rx_desc *r;
int error, nsegs;
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MHLEN;
m_adj(m, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_mini_tag,
sc->ti_cdata.ti_rx_mini_sparemap, m, segs, &nsegs, 0);
if (error != 0) {
m_freem(m);
return (error);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag,
sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag,
sc->ti_cdata.ti_rx_mini_maps[i]);
}
map = sc->ti_cdata.ti_rx_mini_maps[i];
sc->ti_cdata.ti_rx_mini_maps[i] = sc->ti_cdata.ti_rx_mini_sparemap;
sc->ti_cdata.ti_rx_mini_sparemap = map;
sc->ti_cdata.ti_rx_mini_chain[i] = m;
r = &sc->ti_rdata.ti_rx_mini_ring[i];
ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
r->ti_len = segs[0].ds_len;
r->ti_type = TI_BDTYPE_RECV_BD;
r->ti_flags = TI_BDFLAG_MINI_RING;
r->ti_vlan_tag = 0;
r->ti_tcp_udp_cksum = 0;
if (if_getcapenable(sc->ti_ifp) & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag,
sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_PREREAD);
return (0);
}
#ifndef TI_SF_BUF_JUMBO
/*
* Initialize a jumbo receive ring descriptor. This allocates
* a jumbo buffer from the pool managed internally by the driver.
*/
static int
ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *dummy)
{
bus_dmamap_t map;
bus_dma_segment_t segs[1];
struct mbuf *m;
struct ti_rx_desc *r;
int error, nsegs;
(void)dummy;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
m_adj(m, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag,
sc->ti_cdata.ti_rx_jumbo_sparemap, m, segs, &nsegs, 0);
if (error != 0) {
m_freem(m);
return (error);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag,
sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag,
sc->ti_cdata.ti_rx_jumbo_maps[i]);
}
map = sc->ti_cdata.ti_rx_jumbo_maps[i];
sc->ti_cdata.ti_rx_jumbo_maps[i] = sc->ti_cdata.ti_rx_jumbo_sparemap;
sc->ti_cdata.ti_rx_jumbo_sparemap = map;
sc->ti_cdata.ti_rx_jumbo_chain[i] = m;
r = &sc->ti_rdata.ti_rx_jumbo_ring[i];
ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
r->ti_len = segs[0].ds_len;
r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
r->ti_flags = TI_BDFLAG_JUMBO_RING;
r->ti_vlan_tag = 0;
r->ti_tcp_udp_cksum = 0;
if (if_getcapenable(sc->ti_ifp) & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag,
sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_PREREAD);
return (0);
}
#else
#if (PAGE_SIZE == 4096)
#define NPAYLOAD 2
#else
#define NPAYLOAD 1
#endif
#define TCP_HDR_LEN (52 + sizeof(struct ether_header))
#define UDP_HDR_LEN (28 + sizeof(struct ether_header))
#define NFS_HDR_LEN (UDP_HDR_LEN)
static int HDR_LEN = TCP_HDR_LEN;
/*
* Initialize a jumbo receive ring descriptor. This allocates
* a jumbo buffer from the pool managed internally by the driver.
*/
static int
ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
{
bus_dmamap_t map;
struct mbuf *cur, *m_new = NULL;
struct mbuf *m[3] = {NULL, NULL, NULL};
struct ti_rx_desc_ext *r;
vm_page_t frame;
/* 1 extra buf to make nobufs easy*/
struct sf_buf *sf[3] = {NULL, NULL, NULL};
int i;
bus_dma_segment_t segs[4];
int nsegs;
if (m_old != NULL) {
m_new = m_old;
cur = m_old->m_next;
for (i = 0; i <= NPAYLOAD; i++){
m[i] = cur;
cur = cur->m_next;
}
} else {
/* Allocate the mbufs. */
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->ti_dev, "mbuf allocation failed "
"-- packet dropped!\n");
goto nobufs;
}
MGET(m[NPAYLOAD], M_NOWAIT, MT_DATA);
if (m[NPAYLOAD] == NULL) {
device_printf(sc->ti_dev, "cluster mbuf allocation "
"failed -- packet dropped!\n");
goto nobufs;
}
if (!(MCLGET(m[NPAYLOAD], M_NOWAIT))) {
device_printf(sc->ti_dev, "mbuf allocation failed "
"-- packet dropped!\n");
goto nobufs;
}
m[NPAYLOAD]->m_len = MCLBYTES;
for (i = 0; i < NPAYLOAD; i++){
MGET(m[i], M_NOWAIT, MT_DATA);
if (m[i] == NULL) {
device_printf(sc->ti_dev, "mbuf allocation "
"failed -- packet dropped!\n");
goto nobufs;
}
frame = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED);
if (frame == NULL) {
device_printf(sc->ti_dev, "buffer allocation "
"failed -- packet dropped!\n");
printf(" index %d page %d\n", idx, i);
goto nobufs;
}
sf[i] = sf_buf_alloc(frame, SFB_NOWAIT);
if (sf[i] == NULL) {
vm_page_unwire_noq(frame);
vm_page_free(frame);
device_printf(sc->ti_dev, "buffer allocation "
"failed -- packet dropped!\n");
printf(" index %d page %d\n", idx, i);
goto nobufs;
}
}
for (i = 0; i < NPAYLOAD; i++){
/* Attach the buffer to the mbuf. */
m[i]->m_data = (void *)sf_buf_kva(sf[i]);
m[i]->m_len = PAGE_SIZE;
MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE,
sf_mext_free, (void*)sf_buf_kva(sf[i]), sf[i],
0, EXT_DISPOSABLE);
m[i]->m_next = m[i+1];
}
/* link the buffers to the header */
m_new->m_next = m[0];
m_new->m_data += ETHER_ALIGN;
if (sc->ti_hdrsplit)
m_new->m_len = MHLEN - ETHER_ALIGN;
else
m_new->m_len = HDR_LEN;
m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len;
}
/* Set up the descriptor. */
r = &sc->ti_rdata.ti_rx_jumbo_ring[idx];
sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new;
map = sc->ti_cdata.ti_rx_jumbo_maps[i];
if (bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag, map, m_new,
segs, &nsegs, 0))
return (ENOBUFS);
if ((nsegs < 1) || (nsegs > 4))
return (ENOBUFS);
ti_hostaddr64(&r->ti_addr0, segs[0].ds_addr);
r->ti_len0 = m_new->m_len;
ti_hostaddr64(&r->ti_addr1, segs[1].ds_addr);
r->ti_len1 = PAGE_SIZE;
ti_hostaddr64(&r->ti_addr2, segs[2].ds_addr);
r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */
if (PAGE_SIZE == 4096) {
ti_hostaddr64(&r->ti_addr3, segs[3].ds_addr);
r->ti_len3 = MCLBYTES;
} else {
r->ti_len3 = 0;
}
r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD;
if (if_getcapenable(sc->ti_ifp) & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM;
r->ti_idx = idx;
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map, BUS_DMASYNC_PREREAD);
return (0);
nobufs:
/*
* Warning! :
* This can only be called before the mbufs are strung together.
* If the mbufs are strung together, m_freem() will free the chain,
* so that the later mbufs will be freed multiple times.
*/
if (m_new)
m_freem(m_new);
for (i = 0; i < 3; i++) {
if (m[i])
m_freem(m[i]);
if (sf[i])
sf_mext_free((void *)sf_buf_kva(sf[i]), sf[i]);
}
return (ENOBUFS);
}
#endif
/*
* The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
* that's 1MB or memory, which is a lot. For now, we fill only the first
* 256 ring entries and hope that our CPU is fast enough to keep up with
* the NIC.
*/
static int
ti_init_rx_ring_std(struct ti_softc *sc)
{
int i;
struct ti_cmd_desc cmd;
for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
if (ti_newbuf_std(sc, i) != 0)
return (ENOBUFS);
}
sc->ti_std = TI_STD_RX_RING_CNT - 1;
TI_UPDATE_STDPROD(sc, TI_STD_RX_RING_CNT - 1);
return (0);
}
static void
ti_free_rx_ring_std(struct ti_softc *sc)
{
bus_dmamap_t map;
int i;
for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
map = sc->ti_cdata.ti_rx_std_maps[i];
bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag, map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag, map);
m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
sc->ti_cdata.ti_rx_std_chain[i] = NULL;
}
}
bzero(sc->ti_rdata.ti_rx_std_ring, TI_STD_RX_RING_SZ);
bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
}
static int
ti_init_rx_ring_jumbo(struct ti_softc *sc)
{
struct ti_cmd_desc cmd;
int i;
for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
if (ti_newbuf_jumbo(sc, i, NULL) != 0)
return (ENOBUFS);
}
sc->ti_jumbo = TI_JUMBO_RX_RING_CNT - 1;
TI_UPDATE_JUMBOPROD(sc, TI_JUMBO_RX_RING_CNT - 1);
return (0);
}
static void
ti_free_rx_ring_jumbo(struct ti_softc *sc)
{
bus_dmamap_t map;
int i;
for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
map = sc->ti_cdata.ti_rx_jumbo_maps[i];
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map);
m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
}
}
bzero(sc->ti_rdata.ti_rx_jumbo_ring, TI_JUMBO_RX_RING_SZ);
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
}
static int
ti_init_rx_ring_mini(struct ti_softc *sc)
{
int i;
for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
if (ti_newbuf_mini(sc, i) != 0)
return (ENOBUFS);
}
sc->ti_mini = TI_MINI_RX_RING_CNT - 1;
TI_UPDATE_MINIPROD(sc, TI_MINI_RX_RING_CNT - 1);
return (0);
}
static void
ti_free_rx_ring_mini(struct ti_softc *sc)
{
bus_dmamap_t map;
int i;
if (sc->ti_rdata.ti_rx_mini_ring == NULL)
return;
for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
map = sc->ti_cdata.ti_rx_mini_maps[i];
bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag, map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag, map);
m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
}
}
bzero(sc->ti_rdata.ti_rx_mini_ring, TI_MINI_RX_RING_SZ);
bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE);
}
static void
ti_free_tx_ring(struct ti_softc *sc)
{
struct ti_txdesc *txd;
int i;
if (sc->ti_rdata.ti_tx_ring == NULL)
return;
for (i = 0; i < TI_TX_RING_CNT; i++) {
txd = &sc->ti_cdata.ti_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ti_cdata.ti_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ);
bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
}
static int
ti_init_tx_ring(struct ti_softc *sc)
{
struct ti_txdesc *txd;
int i;
STAILQ_INIT(&sc->ti_cdata.ti_txfreeq);
STAILQ_INIT(&sc->ti_cdata.ti_txbusyq);
for (i = 0; i < TI_TX_RING_CNT; i++) {
txd = &sc->ti_cdata.ti_txdesc[i];
STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
}
sc->ti_txcnt = 0;
sc->ti_tx_saved_considx = 0;
sc->ti_tx_saved_prodidx = 0;
CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0);
return (0);
}
/*
* The Tigon 2 firmware has a new way to add/delete multicast addresses,
* but we have to support the old way too so that Tigon 1 cards will
* work.
*/
static u_int
ti_add_mcast(void *arg, struct sockaddr_dl *sdl, u_int count)
{
struct ti_softc *sc = arg;
struct ti_cmd_desc cmd;
uint16_t *m;
uint32_t ext[2] = {0, 0};
m = (uint16_t *)LLADDR(sdl);
switch (sc->ti_hwrev) {
case TI_HWREV_TIGON:
CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0);
break;
case TI_HWREV_TIGON_II:
ext[0] = htons(m[0]);
ext[1] = (htons(m[1]) << 16) | htons(m[2]);
TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2);
break;
default:
device_printf(sc->ti_dev, "unknown hwrev\n");
return (0);
}
return (1);
}
static u_int
ti_del_mcast(void *arg, struct sockaddr_dl *sdl, u_int count)
{
struct ti_softc *sc = arg;
struct ti_cmd_desc cmd;
uint16_t *m;
uint32_t ext[2] = {0, 0};
m = (uint16_t *)LLADDR(sdl);
switch (sc->ti_hwrev) {
case TI_HWREV_TIGON:
CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0);
break;
case TI_HWREV_TIGON_II:
ext[0] = htons(m[0]);
ext[1] = (htons(m[1]) << 16) | htons(m[2]);
TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2);
break;
default:
device_printf(sc->ti_dev, "unknown hwrev\n");
return (0);
}
return (1);
}
/*
* Configure the Tigon's multicast address filter.
*
* The actual multicast table management is a bit of a pain, thanks to
* slight brain damage on the part of both Alteon and us. With our
* multicast code, we are only alerted when the multicast address table
* changes and at that point we only have the current list of addresses:
* we only know the current state, not the previous state, so we don't
* actually know what addresses were removed or added. The firmware has
* state, but we can't get our grubby mits on it, and there is no 'delete
* all multicast addresses' command. Hence, we have to maintain our own
* state so we know what addresses have been programmed into the NIC at
* any given time.
*/
static void
ti_setmulti(struct ti_softc *sc)
{
if_t ifp;
struct ti_cmd_desc cmd;
uint32_t intrs;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
if (if_getflags(ifp) & IFF_ALLMULTI) {
TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0);
return;
} else {
TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0);
}
/* Disable interrupts. */
intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
/* First, zot all the existing filters. */
if_foreach_llmaddr(ifp, ti_del_mcast, sc);
/* Now program new ones. */
if_foreach_llmaddr(ifp, ti_add_mcast, sc);
/* Re-enable interrupts. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
}
/*
* Check to see if the BIOS has configured us for a 64 bit slot when
* we aren't actually in one. If we detect this condition, we can work
* around it on the Tigon 2 by setting a bit in the PCI state register,
* but for the Tigon 1 we must give up and abort the interface attach.
*/
static int
ti_64bitslot_war(struct ti_softc *sc)
{
if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
CSR_WRITE_4(sc, 0x600, 0);
CSR_WRITE_4(sc, 0x604, 0);
CSR_WRITE_4(sc, 0x600, 0x5555AAAA);
if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) {
if (sc->ti_hwrev == TI_HWREV_TIGON)
return (EINVAL);
else {
TI_SETBIT(sc, TI_PCI_STATE,
TI_PCISTATE_32BIT_BUS);
return (0);
}
}
}
return (0);
}
/*
* Do endian, PCI and DMA initialization. Also check the on-board ROM
* self-test results.
*/
static int
ti_chipinit(struct ti_softc *sc)
{
uint32_t cacheline;
uint32_t pci_writemax = 0;
uint32_t hdrsplit;
/* Initialize link to down state. */
sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
/* Set endianness before we access any non-PCI registers. */
#if 0 && BYTE_ORDER == BIG_ENDIAN
CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24));
#else
CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24));
#endif
/* Check the ROM failed bit to see if self-tests passed. */
if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) {
device_printf(sc->ti_dev, "board self-diagnostics failed!\n");
return (ENODEV);
}
/* Halt the CPU. */
TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT);
/* Figure out the hardware revision. */
switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) {
case TI_REV_TIGON_I:
sc->ti_hwrev = TI_HWREV_TIGON;
break;
case TI_REV_TIGON_II:
sc->ti_hwrev = TI_HWREV_TIGON_II;
break;
default:
device_printf(sc->ti_dev, "unsupported chip revision\n");
return (ENODEV);
}
/* Do special setup for Tigon 2. */
if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT);
TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K);
TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS);
}
/*
* We don't have firmware source for the Tigon 1, so Tigon 1 boards
* can't do header splitting.
*/
#ifdef TI_JUMBO_HDRSPLIT
if (sc->ti_hwrev != TI_HWREV_TIGON)
sc->ti_hdrsplit = 1;
else
device_printf(sc->ti_dev,
"can't do header splitting on a Tigon I board\n");
#endif /* TI_JUMBO_HDRSPLIT */
/* Set up the PCI state register. */
CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD);
if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT);
}
/* Clear the read/write max DMA parameters. */
TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA|
TI_PCISTATE_READ_MAXDMA));
/* Get cache line size. */
cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF;
/*
* If the system has set enabled the PCI memory write
* and invalidate command in the command register, set
* the write max parameter accordingly. This is necessary
* to use MWI with the Tigon 2.
*/
if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) {
switch (cacheline) {
case 1:
case 4:
case 8:
case 16:
case 32:
case 64:
break;
default:
/* Disable PCI memory write and invalidate. */
if (bootverbose)
device_printf(sc->ti_dev, "cache line size %d"
" not supported; disabling PCI MWI\n",
cacheline);
CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc,
TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN);
break;
}
}
TI_SETBIT(sc, TI_PCI_STATE, pci_writemax);
/* This sets the min dma param all the way up (0xff). */
TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA);
if (sc->ti_hdrsplit)
hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT;
else
hdrsplit = 0;
/* Configure DMA variables. */
#if BYTE_ORDER == BIG_ENDIAN
CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD |
TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD |
TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit);
#else /* BYTE_ORDER */
CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA|
TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO|
TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit);
#endif /* BYTE_ORDER */
/*
* Only allow 1 DMA channel to be active at a time.
* I don't think this is a good idea, but without it
* the firmware racks up lots of nicDmaReadRingFull
* errors. This is not compatible with hardware checksums.
*/
if ((if_getcapenable(sc->ti_ifp) & (IFCAP_TXCSUM | IFCAP_RXCSUM)) == 0)
TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE);
/* Recommended settings from Tigon manual. */
CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W);
CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W);
if (ti_64bitslot_war(sc)) {
device_printf(sc->ti_dev, "bios thinks we're in a 64 bit slot, "
"but we aren't");
return (EINVAL);
}
return (0);
}
/*
* Initialize the general information block and firmware, and
* start the CPU(s) running.
*/
static int
ti_gibinit(struct ti_softc *sc)
{
if_t ifp;
struct ti_rcb *rcb;
int i;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
/* Disable interrupts for now. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
/* Tell the chip where to find the general information block. */
CSR_WRITE_4(sc, TI_GCR_GENINFO_HI,
(uint64_t)sc->ti_rdata.ti_info_paddr >> 32);
CSR_WRITE_4(sc, TI_GCR_GENINFO_LO,
sc->ti_rdata.ti_info_paddr & 0xFFFFFFFF);
/* Load the firmware into SRAM. */
ti_loadfw(sc);
/* Set up the contents of the general info and ring control blocks. */
/* Set up the event ring and producer pointer. */
bzero(sc->ti_rdata.ti_event_ring, TI_EVENT_RING_SZ);
rcb = &sc->ti_rdata.ti_info->ti_ev_rcb;
ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_event_ring_paddr);
rcb->ti_flags = 0;
ti_hostaddr64(&sc->ti_rdata.ti_info->ti_ev_prodidx_ptr,
sc->ti_rdata.ti_status_paddr +
offsetof(struct ti_status, ti_ev_prodidx_r));
sc->ti_ev_prodidx.ti_idx = 0;
CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
sc->ti_ev_saved_considx = 0;
/* Set up the command ring and producer mailbox. */
rcb = &sc->ti_rdata.ti_info->ti_cmd_rcb;
ti_hostaddr64(&rcb->ti_hostaddr, TI_GCR_NIC_ADDR(TI_GCR_CMDRING));
rcb->ti_flags = 0;
rcb->ti_max_len = 0;
for (i = 0; i < TI_CMD_RING_CNT; i++) {
CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0);
}
CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0);
CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0);
sc->ti_cmd_saved_prodidx = 0;
/*
* Assign the address of the stats refresh buffer.
* We re-use the current stats buffer for this to
* conserve memory.
*/
bzero(&sc->ti_rdata.ti_info->ti_stats, sizeof(struct ti_stats));
ti_hostaddr64(&sc->ti_rdata.ti_info->ti_refresh_stats_ptr,
sc->ti_rdata.ti_info_paddr + offsetof(struct ti_gib, ti_stats));
/* Set up the standard receive ring. */
rcb = &sc->ti_rdata.ti_info->ti_std_rx_rcb;
ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_std_ring_paddr);
rcb->ti_max_len = TI_FRAMELEN;
rcb->ti_flags = 0;
if (if_getcapenable(ifp) & IFCAP_RXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
/* Set up the jumbo receive ring. */
rcb = &sc->ti_rdata.ti_info->ti_jumbo_rx_rcb;
ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_jumbo_ring_paddr);
#ifndef TI_SF_BUF_JUMBO
rcb->ti_max_len = MJUM9BYTES - ETHER_ALIGN;
rcb->ti_flags = 0;
#else
rcb->ti_max_len = PAGE_SIZE;
rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD;
#endif
if (if_getcapenable(ifp) & IFCAP_RXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
/*
* Set up the mini ring. Only activated on the
* Tigon 2 but the slot in the config block is
* still there on the Tigon 1.
*/
rcb = &sc->ti_rdata.ti_info->ti_mini_rx_rcb;
ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_mini_ring_paddr);
rcb->ti_max_len = MHLEN - ETHER_ALIGN;
if (sc->ti_hwrev == TI_HWREV_TIGON)
rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
else
rcb->ti_flags = 0;
if (if_getcapenable(ifp) & IFCAP_RXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
/*
* Set up the receive return ring.
*/
rcb = &sc->ti_rdata.ti_info->ti_return_rcb;
ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_return_ring_paddr);
rcb->ti_flags = 0;
rcb->ti_max_len = TI_RETURN_RING_CNT;
ti_hostaddr64(&sc->ti_rdata.ti_info->ti_return_prodidx_ptr,
sc->ti_rdata.ti_status_paddr +
offsetof(struct ti_status, ti_return_prodidx_r));
/*
* Set up the tx ring. Note: for the Tigon 2, we have the option
* of putting the transmit ring in the host's address space and
* letting the chip DMA it instead of leaving the ring in the NIC's
* memory and accessing it through the shared memory region. We
* do this for the Tigon 2, but it doesn't work on the Tigon 1,
* so we have to revert to the shared memory scheme if we detect
* a Tigon 1 chip.
*/
CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
if (sc->ti_rdata.ti_tx_ring != NULL)
bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ);
rcb = &sc->ti_rdata.ti_info->ti_tx_rcb;
if (sc->ti_hwrev == TI_HWREV_TIGON)
rcb->ti_flags = 0;
else
rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
rcb->ti_max_len = TI_TX_RING_CNT;
if (sc->ti_hwrev == TI_HWREV_TIGON)
ti_hostaddr64(&rcb->ti_hostaddr, TI_TX_RING_BASE);
else
ti_hostaddr64(&rcb->ti_hostaddr,
sc->ti_rdata.ti_tx_ring_paddr);
ti_hostaddr64(&sc->ti_rdata.ti_info->ti_tx_considx_ptr,
sc->ti_rdata.ti_status_paddr +
offsetof(struct ti_status, ti_tx_considx_r));
bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->ti_cdata.ti_status_tag, sc->ti_cdata.ti_status_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
sc->ti_cdata.ti_event_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (sc->ti_rdata.ti_tx_ring != NULL)
bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
/* Set up tunables */
#if 0
if (if_getmtu(ifp) > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
(sc->ti_rx_coal_ticks / 10));
else
#endif
CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks);
CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks);
CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds);
CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds);
CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio);
/* Turn interrupts on. */
CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0);
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
/* Start CPU. */
TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP));
return (0);
}
/*
* Probe for a Tigon chip. Check the PCI vendor and device IDs
* against our list and return its name if we find a match.
*/
static int
ti_probe(device_t dev)
{
const struct ti_type *t;
t = ti_devs;
while (t->ti_name != NULL) {
if ((pci_get_vendor(dev) == t->ti_vid) &&
(pci_get_device(dev) == t->ti_did)) {
device_set_desc(dev, t->ti_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
static int
ti_attach(device_t dev)
{
if_t ifp;
struct ti_softc *sc;
int error = 0, rid;
u_char eaddr[6];
sc = device_get_softc(dev);
sc->ti_dev = dev;
mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->ti_watchdog, &sc->ti_mtx, 0);
ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts);
ifp = sc->ti_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_sethwassist(ifp, TI_CSUM_FEATURES);
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM);
if_setcapenable(ifp, if_getcapabilities(sc->ti_ifp));
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
rid = PCIR_BAR(0);
sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->ti_res == NULL) {
device_printf(dev, "couldn't map memory\n");
error = ENXIO;
goto fail;
}
sc->ti_btag = rman_get_bustag(sc->ti_res);
sc->ti_bhandle = rman_get_bushandle(sc->ti_res);
/* Allocate interrupt */
rid = 0;
sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->ti_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
if (ti_chipinit(sc)) {
device_printf(dev, "chip initialization failed\n");
error = ENXIO;
goto fail;
}
/* Zero out the NIC's on-board SRAM. */
ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
/* Init again -- zeroing memory may have clobbered some registers. */
if (ti_chipinit(sc)) {
device_printf(dev, "chip initialization failed\n");
error = ENXIO;
goto fail;
}
/*
* Get station address from the EEPROM. Note: the manual states
* that the MAC address is at offset 0x8c, however the data is
* stored as two longwords (since that's how it's loaded into
* the NIC). This means the MAC address is actually preceded
* by two zero bytes. We need to skip over those.
*/
if (ti_read_eeprom(sc, eaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
device_printf(dev, "failed to read station address\n");
error = ENXIO;
goto fail;
}
/* Allocate working area for memory dump. */
sc->ti_membuf = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF, M_NOWAIT);
sc->ti_membuf2 = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF,
M_NOWAIT);
if (sc->ti_membuf == NULL || sc->ti_membuf2 == NULL) {
device_printf(dev, "cannot allocate memory buffer\n");
error = ENOMEM;
goto fail;
}
if ((error = ti_dma_alloc(sc)) != 0)
goto fail;
/*
* We really need a better way to tell a 1000baseTX card
* from a 1000baseSX one, since in theory there could be
* OEMed 1000baseTX cards from lame vendors who aren't
* clever enough to change the PCI ID. For the moment
* though, the AceNIC is the only copper card available.
*/
if (pci_get_vendor(dev) == ALT_VENDORID &&
pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER)
sc->ti_copper = 1;
/* Ok, it's not the only copper card available. */
if (pci_get_vendor(dev) == NG_VENDORID &&
pci_get_device(dev) == NG_DEVICEID_GA620T)
sc->ti_copper = 1;
/* Set default tunable values. */
ti_sysctl_node(sc);
/* Set up ifnet structure */
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, ti_ioctl);
if_setstartfn(ifp, ti_start);
if_setinitfn(ifp, ti_init);
if_setgetcounterfn(ifp, ti_get_counter);
if_setbaudrate(ifp, IF_Gbps(1UL));
if_setsendqlen(ifp, TI_TX_RING_CNT - 1);
if_setsendqready(ifp);
/* Set up ifmedia support. */
if (sc->ti_copper) {
/*
* Copper cards allow manual 10/100 mode selection,
* but not manual 1000baseTX mode selection. Why?
* Because currently there's no way to specify the
* master/slave setting through the firmware interface,
* so Alteon decided to just bag it and handle it
* via autonegotiation.
*/
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
ifmedia_add(&sc->ifmedia,
IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
ifmedia_add(&sc->ifmedia,
IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL);
ifmedia_add(&sc->ifmedia,
IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
} else {
/* Fiber cards don't support 10/100 modes. */
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
ifmedia_add(&sc->ifmedia,
IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
}
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
/*
* We're assuming here that card initialization is a sequential
* thing. If it isn't, multiple cards probing at the same time
* could stomp on the list of softcs here.
*/
/* Register the device */
sc->dev = make_dev(&ti_cdevsw, device_get_unit(dev), UID_ROOT,
GID_OPERATOR, 0600, "ti%d", device_get_unit(dev));
sc->dev->si_drv1 = sc;
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM |
IFCAP_VLAN_HWTAGGING, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Tell the upper layer we support VLAN over-sized frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Driver supports link state tracking. */
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
if_setcapenablebit(ifp, IFCAP_LINKSTATE, 0);
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE,
NULL, ti_intr, sc, &sc->ti_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
goto fail;
}
fail:
if (error)
ti_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
ti_detach(device_t dev)
{
struct ti_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
if (sc->dev)
destroy_dev(sc->dev);
KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized"));
ifp = sc->ti_ifp;
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
TI_LOCK(sc);
ti_stop(sc);
TI_UNLOCK(sc);
}
/* These should only be active if attach succeeded */
callout_drain(&sc->ti_watchdog);
bus_generic_detach(dev);
ti_dma_free(sc);
ifmedia_removeall(&sc->ifmedia);
if (sc->ti_intrhand)
bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand);
if (sc->ti_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq);
if (sc->ti_res) {
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
sc->ti_res);
}
if (ifp)
if_free(ifp);
if (sc->ti_membuf)
free(sc->ti_membuf, M_DEVBUF);
if (sc->ti_membuf2)
free(sc->ti_membuf2, M_DEVBUF);
mtx_destroy(&sc->ti_mtx);
return (0);
}
#ifdef TI_JUMBO_HDRSPLIT
/*
* If hdr_len is 0, that means that header splitting wasn't done on
* this packet for some reason. The two most likely reasons are that
* the protocol isn't a supported protocol for splitting, or this
* packet had a fragment offset that wasn't 0.
*
* The header length, if it is non-zero, will always be the length of
* the headers on the packet, but that length could be longer than the
* first mbuf. So we take the minimum of the two as the actual
* length.
*/
static __inline void
ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx)
{
int i = 0;
int lengths[4] = {0, 0, 0, 0};
struct mbuf *m, *mp;
if (hdr_len != 0)
top->m_len = min(hdr_len, top->m_len);
pkt_len -= top->m_len;
lengths[i++] = top->m_len;
mp = top;
for (m = top->m_next; m && pkt_len; m = m->m_next) {
m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len);
pkt_len -= m->m_len;
lengths[i++] = m->m_len;
mp = m;
}
#if 0
if (hdr_len != 0)
printf("got split packet: ");
else
printf("got non-split packet: ");
printf("%d,%d,%d,%d = %d\n", lengths[0],
lengths[1], lengths[2], lengths[3],
lengths[0] + lengths[1] + lengths[2] +
lengths[3]);
#endif
if (pkt_len)
panic("header splitting didn't");
if (m) {
m_freem(m);
mp->m_next = NULL;
}
if (mp->m_next != NULL)
panic("ti_hdr_split: last mbuf in chain should be null");
}
#endif /* TI_JUMBO_HDRSPLIT */
static void
ti_discard_std(struct ti_softc *sc, int i)
{
struct ti_rx_desc *r;
r = &sc->ti_rdata.ti_rx_std_ring[i];
r->ti_len = MCLBYTES - ETHER_ALIGN;
r->ti_type = TI_BDTYPE_RECV_BD;
r->ti_flags = 0;
r->ti_vlan_tag = 0;
r->ti_tcp_udp_cksum = 0;
if (if_getcapenable(sc->ti_ifp) & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
}
static void
ti_discard_mini(struct ti_softc *sc, int i)
{
struct ti_rx_desc *r;
r = &sc->ti_rdata.ti_rx_mini_ring[i];
r->ti_len = MHLEN - ETHER_ALIGN;
r->ti_type = TI_BDTYPE_RECV_BD;
r->ti_flags = TI_BDFLAG_MINI_RING;
r->ti_vlan_tag = 0;
r->ti_tcp_udp_cksum = 0;
if (if_getcapenable(sc->ti_ifp) & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
}
#ifndef TI_SF_BUF_JUMBO
static void
ti_discard_jumbo(struct ti_softc *sc, int i)
{
struct ti_rx_desc *r;
r = &sc->ti_rdata.ti_rx_jumbo_ring[i];
r->ti_len = MJUM9BYTES - ETHER_ALIGN;
r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
r->ti_flags = TI_BDFLAG_JUMBO_RING;
r->ti_vlan_tag = 0;
r->ti_tcp_udp_cksum = 0;
if (if_getcapenable(sc->ti_ifp) & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
}
#endif
/*
* Frame reception handling. This is called if there's a frame
* on the receive return list.
*
* Note: we have to be able to handle three possibilities here:
* 1) the frame is from the mini receive ring (can only happen)
* on Tigon 2 boards)
* 2) the frame is from the jumbo receive ring
* 3) the frame is from the standard receive ring
*/
static void
ti_rxeof(struct ti_softc *sc)
{
if_t ifp;
#ifdef TI_SF_BUF_JUMBO
bus_dmamap_t map;
#endif
struct ti_cmd_desc cmd;
int jumbocnt, minicnt, stdcnt, ti_len;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
if (if_getmtu(ifp) > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
if (sc->ti_rdata.ti_rx_mini_ring != NULL)
bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag,
sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
jumbocnt = minicnt = stdcnt = 0;
while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) {
struct ti_rx_desc *cur_rx;
uint32_t rxidx;
struct mbuf *m = NULL;
uint16_t vlan_tag = 0;
int have_tag = 0;
cur_rx =
&sc->ti_rdata.ti_rx_return_ring[sc->ti_rx_saved_considx];
rxidx = cur_rx->ti_idx;
ti_len = cur_rx->ti_len;
TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT);
if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) {
have_tag = 1;
vlan_tag = cur_rx->ti_vlan_tag;
}
if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) {
jumbocnt++;
TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT);
m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx];
#ifndef TI_SF_BUF_JUMBO
if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
ti_discard_jumbo(sc, rxidx);
continue;
}
if (ti_newbuf_jumbo(sc, rxidx, NULL) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
ti_discard_jumbo(sc, rxidx);
continue;
}
m->m_len = ti_len;
#else /* !TI_SF_BUF_JUMBO */
sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL;
map = sc->ti_cdata.ti_rx_jumbo_maps[rxidx];
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map);
if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
continue;
}
if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
continue;
}
#ifdef TI_JUMBO_HDRSPLIT
if (sc->ti_hdrsplit)
ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr),
ti_len, rxidx);
else
#endif /* TI_JUMBO_HDRSPLIT */
m_adj(m, ti_len - m->m_pkthdr.len);
#endif /* TI_SF_BUF_JUMBO */
} else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) {
minicnt++;
TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT);
m = sc->ti_cdata.ti_rx_mini_chain[rxidx];
if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
ti_discard_mini(sc, rxidx);
continue;
}
if (ti_newbuf_mini(sc, rxidx) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
ti_discard_mini(sc, rxidx);
continue;
}
m->m_len = ti_len;
} else {
stdcnt++;
TI_INC(sc->ti_std, TI_STD_RX_RING_CNT);
m = sc->ti_cdata.ti_rx_std_chain[rxidx];
if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
ti_discard_std(sc, rxidx);
continue;
}
if (ti_newbuf_std(sc, rxidx) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
ti_discard_std(sc, rxidx);
continue;
}
m->m_len = ti_len;
}
m->m_pkthdr.len = ti_len;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
if (cur_rx->ti_flags & TI_BDFLAG_IP_CKSUM) {
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
}
if (cur_rx->ti_flags & TI_BDFLAG_TCP_UDP_CKSUM) {
m->m_pkthdr.csum_data =
cur_rx->ti_tcp_udp_cksum;
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
}
}
/*
* If we received a packet with a vlan tag,
* tag it before passing the packet upward.
*/
if (have_tag) {
m->m_pkthdr.ether_vtag = vlan_tag;
m->m_flags |= M_VLANTAG;
}
TI_UNLOCK(sc);
if_input(ifp, m);
TI_LOCK(sc);
}
bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag,
sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_PREREAD);
/* Only necessary on the Tigon 1. */
if (sc->ti_hwrev == TI_HWREV_TIGON)
CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX,
sc->ti_rx_saved_considx);
if (stdcnt > 0) {
bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
TI_UPDATE_STDPROD(sc, sc->ti_std);
}
if (minicnt > 0) {
bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE);
TI_UPDATE_MINIPROD(sc, sc->ti_mini);
}
if (jumbocnt > 0) {
bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
}
}
static void
ti_txeof(struct ti_softc *sc)
{
struct ti_txdesc *txd;
struct ti_tx_desc txdesc;
struct ti_tx_desc *cur_tx = NULL;
if_t ifp;
int idx;
ifp = sc->ti_ifp;
txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
if (txd == NULL)
return;
if (sc->ti_rdata.ti_tx_ring != NULL)
bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_POSTWRITE);
/*
* Go through our tx ring and free mbufs for those
* frames that have been sent.
*/
for (idx = sc->ti_tx_saved_considx; idx != sc->ti_tx_considx.ti_idx;
TI_INC(idx, TI_TX_RING_CNT)) {
if (sc->ti_hwrev == TI_HWREV_TIGON) {
ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc),
sizeof(txdesc), &txdesc);
cur_tx = &txdesc;
} else
cur_tx = &sc->ti_rdata.ti_tx_ring[idx];
sc->ti_txcnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if ((cur_tx->ti_flags & TI_BDFLAG_END) == 0)
continue;
bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
m_freem(txd->tx_m);
txd->tx_m = NULL;
STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txbusyq, tx_q);
STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q);
txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
}
sc->ti_tx_saved_considx = idx;
if (sc->ti_txcnt == 0)
sc->ti_timer = 0;
}
static void
ti_intr(void *xsc)
{
struct ti_softc *sc;
if_t ifp;
sc = xsc;
TI_LOCK(sc);
ifp = sc->ti_ifp;
/* Make sure this is really our interrupt. */
if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) {
TI_UNLOCK(sc);
return;
}
/* Ack interrupt and stop others from occurring. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
bus_dmamap_sync(sc->ti_cdata.ti_status_tag,
sc->ti_cdata.ti_status_map, BUS_DMASYNC_POSTREAD);
/* Check RX return ring producer/consumer */
ti_rxeof(sc);
/* Check TX ring producer/consumer */
ti_txeof(sc);
bus_dmamap_sync(sc->ti_cdata.ti_status_tag,
sc->ti_cdata.ti_status_map, BUS_DMASYNC_PREREAD);
}
ti_handle_events(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Re-enable interrupts. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
if (!if_sendq_empty(ifp))
ti_start_locked(ifp);
}
TI_UNLOCK(sc);
}
static uint64_t
ti_get_counter(if_t ifp, ift_counter cnt)
{
switch (cnt) {
case IFCOUNTER_COLLISIONS:
{
struct ti_softc *sc;
struct ti_stats *s;
uint64_t rv;
sc = if_getsoftc(ifp);
s = &sc->ti_rdata.ti_info->ti_stats;
TI_LOCK(sc);
bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
sc->ti_cdata.ti_gib_map, BUS_DMASYNC_POSTREAD);
rv = s->dot3StatsSingleCollisionFrames +
s->dot3StatsMultipleCollisionFrames +
s->dot3StatsExcessiveCollisions +
s->dot3StatsLateCollisions;
bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
sc->ti_cdata.ti_gib_map, BUS_DMASYNC_PREREAD);
TI_UNLOCK(sc);
return (rv);
}
default:
return (if_get_counter_default(ifp, cnt));
}
}
/*
* Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
* pointers to descriptors.
*/
static int
ti_encap(struct ti_softc *sc, struct mbuf **m_head)
{
struct ti_txdesc *txd;
struct ti_tx_desc *f;
struct ti_tx_desc txdesc;
struct mbuf *m;
bus_dma_segment_t txsegs[TI_MAXTXSEGS];
uint16_t csum_flags;
int error, frag, i, nseg;
if ((txd = STAILQ_FIRST(&sc->ti_cdata.ti_txfreeq)) == NULL)
return (ENOBUFS);
error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
*m_head, txsegs, &nseg, 0);
if (error == EFBIG) {
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
if (error) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nseg == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
if (sc->ti_txcnt + nseg >= TI_TX_RING_CNT) {
bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap);
return (ENOBUFS);
}
bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
m = *m_head;
csum_flags = 0;
if (m->m_pkthdr.csum_flags & CSUM_IP)
csum_flags |= TI_BDFLAG_IP_CKSUM;
if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM;
frag = sc->ti_tx_saved_prodidx;
for (i = 0; i < nseg; i++) {
if (sc->ti_hwrev == TI_HWREV_TIGON) {
bzero(&txdesc, sizeof(txdesc));
f = &txdesc;
} else
f = &sc->ti_rdata.ti_tx_ring[frag];
ti_hostaddr64(&f->ti_addr, txsegs[i].ds_addr);
f->ti_len = txsegs[i].ds_len;
f->ti_flags = csum_flags;
if (m->m_flags & M_VLANTAG) {
f->ti_flags |= TI_BDFLAG_VLAN_TAG;
f->ti_vlan_tag = m->m_pkthdr.ether_vtag;
} else {
f->ti_vlan_tag = 0;
}
if (sc->ti_hwrev == TI_HWREV_TIGON)
ti_mem_write(sc, TI_TX_RING_BASE + frag *
sizeof(txdesc), sizeof(txdesc), &txdesc);
TI_INC(frag, TI_TX_RING_CNT);
}
sc->ti_tx_saved_prodidx = frag;
/* set TI_BDFLAG_END on the last descriptor */
frag = (frag + TI_TX_RING_CNT - 1) % TI_TX_RING_CNT;
if (sc->ti_hwrev == TI_HWREV_TIGON) {
txdesc.ti_flags |= TI_BDFLAG_END;
ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc),
sizeof(txdesc), &txdesc);
} else
sc->ti_rdata.ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END;
STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txfreeq, tx_q);
STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txbusyq, txd, tx_q);
txd->tx_m = m;
sc->ti_txcnt += nseg;
return (0);
}
static void
ti_start(if_t ifp)
{
struct ti_softc *sc;
sc = if_getsoftc(ifp);
TI_LOCK(sc);
ti_start_locked(ifp);
TI_UNLOCK(sc);
}
/*
* Main transmit routine. To avoid having to do mbuf copies, we put pointers
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
ti_start_locked(if_t ifp)
{
struct ti_softc *sc;
struct mbuf *m_head = NULL;
int enq = 0;
sc = if_getsoftc(ifp);
for (; !if_sendq_empty(ifp) &&
sc->ti_txcnt < (TI_TX_RING_CNT - 16);) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (ti_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
if (sc->ti_rdata.ti_tx_ring != NULL)
bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
/* Transmit */
CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, sc->ti_tx_saved_prodidx);
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->ti_timer = 5;
}
}
static void
ti_init(void *xsc)
{
struct ti_softc *sc;
sc = xsc;
TI_LOCK(sc);
ti_init_locked(sc);
TI_UNLOCK(sc);
}
static void
ti_init_locked(void *xsc)
{
struct ti_softc *sc = xsc;
if (if_getdrvflags(sc->ti_ifp) & IFF_DRV_RUNNING)
return;
/* Cancel pending I/O and flush buffers. */
ti_stop(sc);
/* Init the gen info block, ring control blocks and firmware. */
if (ti_gibinit(sc)) {
device_printf(sc->ti_dev, "initialization failure\n");
return;
}
}
static void ti_init2(struct ti_softc *sc)
{
struct ti_cmd_desc cmd;
if_t ifp;
uint8_t *ea;
struct ifmedia *ifm;
int tmp;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
/* Specify MTU and interface index. */
CSR_WRITE_4(sc, TI_GCR_IFINDEX, device_get_unit(sc->ti_dev));
CSR_WRITE_4(sc, TI_GCR_IFMTU, if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0);
/* Load our MAC address. */
ea = if_getlladdr(sc->ti_ifp);
CSR_WRITE_4(sc, TI_GCR_PAR0, (ea[0] << 8) | ea[1]);
CSR_WRITE_4(sc, TI_GCR_PAR1,
(ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]);
TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0);
/* Enable or disable promiscuous mode as needed. */
if (if_getflags(ifp) & IFF_PROMISC) {
TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0);
} else {
TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0);
}
/* Program multicast filter. */
ti_setmulti(sc);
/*
* If this is a Tigon 1, we should tell the
* firmware to use software packet filtering.
*/
if (sc->ti_hwrev == TI_HWREV_TIGON) {
TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0);
}
/* Init RX ring. */
if (ti_init_rx_ring_std(sc) != 0) {
/* XXX */
device_printf(sc->ti_dev, "no memory for std Rx buffers.\n");
return;
}
/* Init jumbo RX ring. */
if (if_getmtu(ifp) > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) {
if (ti_init_rx_ring_jumbo(sc) != 0) {
/* XXX */
device_printf(sc->ti_dev,
"no memory for jumbo Rx buffers.\n");
return;
}
}
/*
* If this is a Tigon 2, we can also configure the
* mini ring.
*/
if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
if (ti_init_rx_ring_mini(sc) != 0) {
/* XXX */
device_printf(sc->ti_dev,
"no memory for mini Rx buffers.\n");
return;
}
}
CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0);
sc->ti_rx_saved_considx = 0;
/* Init TX ring. */
ti_init_tx_ring(sc);
/* Tell firmware we're alive. */
TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0);
/* Enable host interrupts. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
/*
* Make sure to set media properly. We have to do this
* here since we have to issue commands in order to set
* the link negotiation and we can't issue commands until
* the firmware is running.
*/
ifm = &sc->ifmedia;
tmp = ifm->ifm_media;
ifm->ifm_media = ifm->ifm_cur->ifm_media;
ti_ifmedia_upd_locked(sc);
ifm->ifm_media = tmp;
}
/*
* Set media options.
*/
static int
ti_ifmedia_upd(if_t ifp)
{
struct ti_softc *sc;
int error;
sc = if_getsoftc(ifp);
TI_LOCK(sc);
error = ti_ifmedia_upd_locked(sc);
TI_UNLOCK(sc);
return (error);
}
static int
ti_ifmedia_upd_locked(struct ti_softc *sc)
{
struct ifmedia *ifm;
struct ti_cmd_desc cmd;
uint32_t flowctl;
ifm = &sc->ifmedia;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
flowctl = 0;
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
/*
* Transmit flow control doesn't work on the Tigon 1.
*/
flowctl = TI_GLNK_RX_FLOWCTL_Y;
/*
* Transmit flow control can also cause problems on the
* Tigon 2, apparently with both the copper and fiber
* boards. The symptom is that the interface will just
* hang. This was reproduced with Alteon 180 switches.
*/
#if 0
if (sc->ti_hwrev != TI_HWREV_TIGON)
flowctl |= TI_GLNK_TX_FLOWCTL_Y;
#endif
CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
TI_GLNK_FULL_DUPLEX| flowctl |
TI_GLNK_AUTONEGENB|TI_GLNK_ENB);
flowctl = TI_LNK_RX_FLOWCTL_Y;
#if 0
if (sc->ti_hwrev != TI_HWREV_TIGON)
flowctl |= TI_LNK_TX_FLOWCTL_Y;
#endif
CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB|
TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl |
TI_LNK_AUTONEGENB|TI_LNK_ENB);
TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
TI_CMD_CODE_NEGOTIATE_BOTH, 0);
break;
case IFM_1000_SX:
case IFM_1000_T:
flowctl = TI_GLNK_RX_FLOWCTL_Y;
#if 0
if (sc->ti_hwrev != TI_HWREV_TIGON)
flowctl |= TI_GLNK_TX_FLOWCTL_Y;
#endif
CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
flowctl |TI_GLNK_ENB);
CSR_WRITE_4(sc, TI_GCR_LINK, 0);
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX);
}
TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
TI_CMD_CODE_NEGOTIATE_GIGABIT, 0);
break;
case IFM_100_FX:
case IFM_10_FL:
case IFM_100_TX:
case IFM_10_T:
flowctl = TI_LNK_RX_FLOWCTL_Y;
#if 0
if (sc->ti_hwrev != TI_HWREV_TIGON)
flowctl |= TI_LNK_TX_FLOWCTL_Y;
#endif
CSR_WRITE_4(sc, TI_GCR_GLINK, 0);
CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl);
if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX ||
IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB);
} else {
TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB);
}
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX);
} else {
TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX);
}
TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
TI_CMD_CODE_NEGOTIATE_10_100, 0);
break;
}
return (0);
}
/*
* Report current media status.
*/
static void
ti_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct ti_softc *sc;
uint32_t media = 0;
sc = if_getsoftc(ifp);
TI_LOCK(sc);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
TI_UNLOCK(sc);
return;
}
ifmr->ifm_status |= IFM_ACTIVE;
if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
media = CSR_READ_4(sc, TI_GCR_GLINK_STAT);
if (sc->ti_copper)
ifmr->ifm_active |= IFM_1000_T;
else
ifmr->ifm_active |= IFM_1000_SX;
if (media & TI_GLNK_FULL_DUPLEX)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
} else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
media = CSR_READ_4(sc, TI_GCR_LINK_STAT);
if (sc->ti_copper) {
if (media & TI_LNK_100MB)
ifmr->ifm_active |= IFM_100_TX;
if (media & TI_LNK_10MB)
ifmr->ifm_active |= IFM_10_T;
} else {
if (media & TI_LNK_100MB)
ifmr->ifm_active |= IFM_100_FX;
if (media & TI_LNK_10MB)
ifmr->ifm_active |= IFM_10_FL;
}
if (media & TI_LNK_FULL_DUPLEX)
ifmr->ifm_active |= IFM_FDX;
if (media & TI_LNK_HALF_DUPLEX)
ifmr->ifm_active |= IFM_HDX;
}
TI_UNLOCK(sc);
}
static int
ti_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct ti_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct ti_cmd_desc cmd;
int mask, error = 0;
switch (command) {
case SIOCSIFMTU:
TI_LOCK(sc);
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > TI_JUMBO_MTU)
error = EINVAL;
else {
if_setmtu(ifp, ifr->ifr_mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ti_init_locked(sc);
}
}
TI_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
TI_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
/*
* If only the state of the PROMISC flag changed,
* then just use the 'set promisc mode' command
* instead of reinitializing the entire NIC. Doing
* a full re-init means reloading the firmware and
* waiting for it to start up, which may take a
* second or two.
*/
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
if_getflags(ifp) & IFF_PROMISC &&
!(sc->ti_if_flags & IFF_PROMISC)) {
TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
TI_CMD_CODE_PROMISC_ENB, 0);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
!(if_getflags(ifp) & IFF_PROMISC) &&
sc->ti_if_flags & IFF_PROMISC) {
TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
TI_CMD_CODE_PROMISC_DIS, 0);
} else
ti_init_locked(sc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
ti_stop(sc);
}
}
sc->ti_if_flags = if_getflags(ifp);
TI_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
TI_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
ti_setmulti(sc);
TI_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
break;
case SIOCSIFCAP:
TI_LOCK(sc);
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, TI_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, TI_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & (IFCAP_TXCSUM | IFCAP_RXCSUM |
IFCAP_VLAN_HWTAGGING)) != 0) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ti_init_locked(sc);
}
}
TI_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
ti_open(struct cdev *dev, int flags, int fmt, struct thread *td)
{
struct ti_softc *sc;
sc = dev->si_drv1;
if (sc == NULL)
return (ENODEV);
TI_LOCK(sc);
sc->ti_flags |= TI_FLAG_DEBUGING;
TI_UNLOCK(sc);
return (0);
}
static int
ti_close(struct cdev *dev, int flag, int fmt, struct thread *td)
{
struct ti_softc *sc;
sc = dev->si_drv1;
if (sc == NULL)
return (ENODEV);
TI_LOCK(sc);
sc->ti_flags &= ~TI_FLAG_DEBUGING;
TI_UNLOCK(sc);
return (0);
}
/*
* This ioctl routine goes along with the Tigon character device.
*/
static int
ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
struct ti_softc *sc;
int error;
sc = dev->si_drv1;
if (sc == NULL)
return (ENODEV);
error = 0;
switch (cmd) {
case TIIOCGETSTATS:
{
struct ti_stats *outstats;
outstats = (struct ti_stats *)addr;
TI_LOCK(sc);
bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
sc->ti_cdata.ti_gib_map, BUS_DMASYNC_POSTREAD);
bcopy(&sc->ti_rdata.ti_info->ti_stats, outstats,
sizeof(struct ti_stats));
bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
sc->ti_cdata.ti_gib_map, BUS_DMASYNC_PREREAD);
TI_UNLOCK(sc);
break;
}
case TIIOCGETPARAMS:
{
struct ti_params *params;
params = (struct ti_params *)addr;
TI_LOCK(sc);
params->ti_stat_ticks = sc->ti_stat_ticks;
params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks;
params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks;
params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds;
params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds;
params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio;
params->param_mask = TI_PARAM_ALL;
TI_UNLOCK(sc);
break;
}
case TIIOCSETPARAMS:
{
struct ti_params *params;
params = (struct ti_params *)addr;
TI_LOCK(sc);
if (params->param_mask & TI_PARAM_STAT_TICKS) {
sc->ti_stat_ticks = params->ti_stat_ticks;
CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
}
if (params->param_mask & TI_PARAM_RX_COAL_TICKS) {
sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks;
CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
sc->ti_rx_coal_ticks);
}
if (params->param_mask & TI_PARAM_TX_COAL_TICKS) {
sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks;
CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS,
sc->ti_tx_coal_ticks);
}
if (params->param_mask & TI_PARAM_RX_COAL_BDS) {
sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds;
CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD,
sc->ti_rx_max_coal_bds);
}
if (params->param_mask & TI_PARAM_TX_COAL_BDS) {
sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds;
CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD,
sc->ti_tx_max_coal_bds);
}
if (params->param_mask & TI_PARAM_TX_BUF_RATIO) {
sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio;
CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO,
sc->ti_tx_buf_ratio);
}
TI_UNLOCK(sc);
break;
}
case TIIOCSETTRACE: {
ti_trace_type trace_type;
trace_type = *(ti_trace_type *)addr;
/*
* Set tracing to whatever the user asked for. Setting
* this register to 0 should have the effect of disabling
* tracing.
*/
TI_LOCK(sc);
CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type);
TI_UNLOCK(sc);
break;
}
case TIIOCGETTRACE: {
struct ti_trace_buf *trace_buf;
uint32_t trace_start, cur_trace_ptr, trace_len;
trace_buf = (struct ti_trace_buf *)addr;
TI_LOCK(sc);
trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START);
cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR);
trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN);
#if 0
if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, "
"trace_len = %d\n", trace_start,
cur_trace_ptr, trace_len);
if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n",
trace_buf->buf_len);
#endif
error = ti_copy_mem(sc, trace_start, min(trace_len,
trace_buf->buf_len), (caddr_t)trace_buf->buf, 1, 1);
if (error == 0) {
trace_buf->fill_len = min(trace_len,
trace_buf->buf_len);
if (cur_trace_ptr < trace_start)
trace_buf->cur_trace_ptr =
trace_start - cur_trace_ptr;
else
trace_buf->cur_trace_ptr =
cur_trace_ptr - trace_start;
} else
trace_buf->fill_len = 0;
TI_UNLOCK(sc);
break;
}
/*
* For debugging, five ioctls are needed:
* ALT_ATTACH
* ALT_READ_TG_REG
* ALT_WRITE_TG_REG
* ALT_READ_TG_MEM
* ALT_WRITE_TG_MEM
*/
case ALT_ATTACH:
/*
* From what I can tell, Alteon's Solaris Tigon driver
* only has one character device, so you have to attach
* to the Tigon board you're interested in. This seems
* like a not-so-good way to do things, since unless you
* subsequently specify the unit number of the device
* you're interested in every ioctl, you'll only be
* able to debug one board at a time.
*/
break;
case ALT_READ_TG_MEM:
case ALT_WRITE_TG_MEM:
{
struct tg_mem *mem_param;
uint32_t sram_end, scratch_end;
mem_param = (struct tg_mem *)addr;
if (sc->ti_hwrev == TI_HWREV_TIGON) {
sram_end = TI_END_SRAM_I;
scratch_end = TI_END_SCRATCH_I;
} else {
sram_end = TI_END_SRAM_II;
scratch_end = TI_END_SCRATCH_II;
}
/*
* For now, we'll only handle accessing regular SRAM,
* nothing else.
*/
TI_LOCK(sc);
if (mem_param->tgAddr >= TI_BEG_SRAM &&
mem_param->tgAddr + mem_param->len <= sram_end) {
/*
* In this instance, we always copy to/from user
* space, so the user space argument is set to 1.
*/
error = ti_copy_mem(sc, mem_param->tgAddr,
mem_param->len, mem_param->userAddr, 1,
cmd == ALT_READ_TG_MEM ? 1 : 0);
} else if (mem_param->tgAddr >= TI_BEG_SCRATCH &&
mem_param->tgAddr <= scratch_end) {
error = ti_copy_scratch(sc, mem_param->tgAddr,
mem_param->len, mem_param->userAddr, 1,
cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_A);
} else if (mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG &&
mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG) {
if (sc->ti_hwrev == TI_HWREV_TIGON) {
if_printf(sc->ti_ifp,
"invalid memory range for Tigon I\n");
error = EINVAL;
break;
}
error = ti_copy_scratch(sc, mem_param->tgAddr -
TI_SCRATCH_DEBUG_OFF, mem_param->len,
mem_param->userAddr, 1,
cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_B);
} else {
if_printf(sc->ti_ifp, "memory address %#x len %d is "
"out of supported range\n",
mem_param->tgAddr, mem_param->len);
error = EINVAL;
}
TI_UNLOCK(sc);
break;
}
case ALT_READ_TG_REG:
case ALT_WRITE_TG_REG:
{
struct tg_reg *regs;
uint32_t tmpval;
regs = (struct tg_reg *)addr;
/*
* Make sure the address in question isn't out of range.
*/
if (regs->addr > TI_REG_MAX) {
error = EINVAL;
break;
}
TI_LOCK(sc);
if (cmd == ALT_READ_TG_REG) {
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
regs->addr, &tmpval, 1);
regs->data = ntohl(tmpval);
#if 0
if ((regs->addr == TI_CPU_STATE)
|| (regs->addr == TI_CPU_CTL_B)) {
if_printf(sc->ti_ifp, "register %#x = %#x\n",
regs->addr, tmpval);
}
#endif
} else {
tmpval = htonl(regs->data);
bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
regs->addr, &tmpval, 1);
}
TI_UNLOCK(sc);
break;
}
default:
error = ENOTTY;
break;
}
return (error);
}
static void
ti_watchdog(void *arg)
{
struct ti_softc *sc;
if_t ifp;
sc = arg;
TI_LOCK_ASSERT(sc);
callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc);
if (sc->ti_timer == 0 || --sc->ti_timer > 0)
return;
/*
* When we're debugging, the chip is often stopped for long periods
* of time, and that would normally cause the watchdog timer to fire.
* Since that impedes debugging, we don't want to do that.
*/
if (sc->ti_flags & TI_FLAG_DEBUGING)
return;
ifp = sc->ti_ifp;
if_printf(ifp, "watchdog timeout -- resetting\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
ti_init_locked(sc);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
ti_stop(struct ti_softc *sc)
{
if_t ifp;
struct ti_cmd_desc cmd;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
/* Disable host interrupts. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
/*
* Tell firmware we're shutting down.
*/
TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0);
/* Halt and reinitialize. */
if (ti_chipinit(sc) == 0) {
ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
/* XXX ignore init errors. */
ti_chipinit(sc);
}
/* Free the RX lists. */
ti_free_rx_ring_std(sc);
/* Free jumbo RX list. */
ti_free_rx_ring_jumbo(sc);
/* Free mini RX list. */
ti_free_rx_ring_mini(sc);
/* Free TX buffers. */
ti_free_tx_ring(sc);
sc->ti_ev_prodidx.ti_idx = 0;
sc->ti_return_prodidx.ti_idx = 0;
sc->ti_tx_considx.ti_idx = 0;
sc->ti_tx_saved_considx = TI_TXCONS_UNSET;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
callout_stop(&sc->ti_watchdog);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
ti_shutdown(device_t dev)
{
struct ti_softc *sc;
sc = device_get_softc(dev);
TI_LOCK(sc);
ti_chipinit(sc);
TI_UNLOCK(sc);
return (0);
}
static void
ti_sysctl_node(struct ti_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child;
char tname[32];
ctx = device_get_sysctl_ctx(sc->ti_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ti_dev));
/* Use DAC */
sc->ti_dac = 1;
snprintf(tname, sizeof(tname), "dev.ti.%d.dac",
device_get_unit(sc->ti_dev));
TUNABLE_INT_FETCH(tname, &sc->ti_dac);
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_coal_ticks", CTLFLAG_RW,
&sc->ti_rx_coal_ticks, 0, "Receive coalcesced ticks");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_max_coal_bds", CTLFLAG_RW,
&sc->ti_rx_max_coal_bds, 0, "Receive max coalcesced BDs");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_coal_ticks", CTLFLAG_RW,
&sc->ti_tx_coal_ticks, 0, "Send coalcesced ticks");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_max_coal_bds", CTLFLAG_RW,
&sc->ti_tx_max_coal_bds, 0, "Send max coalcesced BDs");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_buf_ratio", CTLFLAG_RW,
&sc->ti_tx_buf_ratio, 0,
"Ratio of NIC memory devoted to TX buffer");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stat_ticks", CTLFLAG_RW,
&sc->ti_stat_ticks, 0,
"Number of clock ticks for statistics update interval");
/* Pull in device tunables. */
sc->ti_rx_coal_ticks = 170;
resource_int_value(device_get_name(sc->ti_dev),
device_get_unit(sc->ti_dev), "rx_coal_ticks",
&sc->ti_rx_coal_ticks);
sc->ti_rx_max_coal_bds = 64;
resource_int_value(device_get_name(sc->ti_dev),
device_get_unit(sc->ti_dev), "rx_max_coal_bds",
&sc->ti_rx_max_coal_bds);
sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
resource_int_value(device_get_name(sc->ti_dev),
device_get_unit(sc->ti_dev), "tx_coal_ticks",
&sc->ti_tx_coal_ticks);
sc->ti_tx_max_coal_bds = 32;
resource_int_value(device_get_name(sc->ti_dev),
device_get_unit(sc->ti_dev), "tx_max_coal_bds",
&sc->ti_tx_max_coal_bds);
sc->ti_tx_buf_ratio = 21;
resource_int_value(device_get_name(sc->ti_dev),
device_get_unit(sc->ti_dev), "tx_buf_ratio",
&sc->ti_tx_buf_ratio);
sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
resource_int_value(device_get_name(sc->ti_dev),
device_get_unit(sc->ti_dev), "stat_ticks",
&sc->ti_stat_ticks);
}
diff --git a/sys/dev/tsec/if_tsec.c b/sys/dev/tsec/if_tsec.c
index 1c03110e1889..80e42116ed9f 100644
--- a/sys/dev/tsec/if_tsec.c
+++ b/sys/dev/tsec/if_tsec.c
@@ -1,1927 +1,1921 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski
* Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
*/
#include <sys/cdefs.h>
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/tsec/if_tsec.h>
#include <dev/tsec/if_tsecreg.h>
static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
const char *dname);
static void tsec_dma_ctl(struct tsec_softc *sc, int state);
static void tsec_encap(if_t ifp, struct tsec_softc *sc,
struct mbuf *m0, uint16_t fcb_flags, int *start_tx);
static void tsec_free_dma(struct tsec_softc *sc);
static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr);
static int tsec_ifmedia_upd(if_t ifp);
static void tsec_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf **mbufp, uint32_t *paddr);
static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
int nseg, int error);
static void tsec_intrs_ctl(struct tsec_softc *sc, int state);
static void tsec_init(void *xsc);
static void tsec_init_locked(struct tsec_softc *sc);
static int tsec_ioctl(if_t ifp, u_long command, caddr_t data);
static void tsec_reset_mac(struct tsec_softc *sc);
static void tsec_setfilter(struct tsec_softc *sc);
static void tsec_set_mac_address(struct tsec_softc *sc);
static void tsec_start(if_t ifp);
static void tsec_start_locked(if_t ifp);
static void tsec_stop(struct tsec_softc *sc);
static void tsec_tick(void *arg);
static void tsec_watchdog(struct tsec_softc *sc);
static void tsec_add_sysctls(struct tsec_softc *sc);
static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS);
static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS);
static void tsec_set_rxic(struct tsec_softc *sc);
static void tsec_set_txic(struct tsec_softc *sc);
static int tsec_receive_intr_locked(struct tsec_softc *sc, int count);
static void tsec_transmit_intr_locked(struct tsec_softc *sc);
static void tsec_error_intr_locked(struct tsec_softc *sc, int count);
static void tsec_offload_setup(struct tsec_softc *sc);
static void tsec_offload_process_frame(struct tsec_softc *sc,
struct mbuf *m);
static void tsec_setup_multicast(struct tsec_softc *sc);
static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu);
DRIVER_MODULE(miibus, tsec, miibus_driver, 0, 0);
MODULE_DEPEND(tsec, ether, 1, 1, 1);
MODULE_DEPEND(tsec, miibus, 1, 1, 1);
struct mtx tsec_phy_mtx;
int
tsec_attach(struct tsec_softc *sc)
{
uint8_t hwaddr[ETHER_ADDR_LEN];
if_t ifp;
int error = 0;
int i;
/* Initialize global (because potentially shared) MII lock */
if (!mtx_initialized(&tsec_phy_mtx))
mtx_init(&tsec_phy_mtx, "tsec mii", NULL, MTX_DEF);
/* Reset all TSEC counters */
TSEC_TX_RX_COUNTERS_INIT(sc);
/* Stop DMA engine if enabled by firmware */
tsec_dma_ctl(sc, 0);
/* Reset MAC */
tsec_reset_mac(sc);
/* Disable interrupts for now */
tsec_intrs_ctl(sc, 0);
/* Configure defaults for interrupts coalescing */
sc->rx_ic_time = 768;
sc->rx_ic_count = 16;
sc->tx_ic_time = 768;
sc->tx_ic_count = 16;
tsec_set_rxic(sc);
tsec_set_txic(sc);
tsec_add_sysctls(sc);
/* Allocate a busdma tag and DMA safe memory for TX descriptors. */
error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag,
&sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
(void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
if (error) {
tsec_detach(sc);
return (ENXIO);
}
/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag,
&sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
(void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
if (error) {
tsec_detach(sc);
return (ENXIO);
}
/* Allocate a busdma tag for TX mbufs. */
error = bus_dma_tag_create(NULL, /* parent */
TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */
TSEC_TX_MAX_DMA_SEGS, /* nsegments */
MCLBYTES, 0, /* maxsegsz, flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->tsec_tx_mtag); /* dmat */
if (error) {
device_printf(sc->dev, "failed to allocate busdma tag "
"(tx mbufs)\n");
tsec_detach(sc);
return (ENXIO);
}
/* Allocate a busdma tag for RX mbufs. */
error = bus_dma_tag_create(NULL, /* parent */
TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, 0, /* maxsegsz, flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&sc->tsec_rx_mtag); /* dmat */
if (error) {
device_printf(sc->dev, "failed to allocate busdma tag "
"(rx mbufs)\n");
tsec_detach(sc);
return (ENXIO);
}
/* Create TX busdma maps */
for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
error = bus_dmamap_create(sc->tsec_tx_mtag, 0,
&sc->tx_bufmap[i].map);
if (error) {
device_printf(sc->dev, "failed to init TX ring\n");
tsec_detach(sc);
return (ENXIO);
}
sc->tx_bufmap[i].map_initialized = 1;
}
/* Create RX busdma maps and zero mbuf handlers */
for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
error = bus_dmamap_create(sc->tsec_rx_mtag, 0,
&sc->rx_data[i].map);
if (error) {
device_printf(sc->dev, "failed to init RX ring\n");
tsec_detach(sc);
return (ENXIO);
}
sc->rx_data[i].mbuf = NULL;
}
/* Create mbufs for RX buffers */
for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
&sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
if (error) {
device_printf(sc->dev, "can't load rx DMA map %d, "
"error = %d\n", i, error);
tsec_detach(sc);
return (error);
}
}
/* Create network interface for upper layers */
ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->dev, "if_alloc() failed\n");
- tsec_detach(sc);
- return (ENOMEM);
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
if_setinitfn(ifp, tsec_init);
if_setstartfn(ifp, tsec_start);
if_setioctlfn(ifp, tsec_ioctl);
if_setsendqlen(ifp, TSEC_TX_NUM_DESC - 1);
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
if (sc->is_etsec)
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
/* Advertise that polling is supported */
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/* Attach PHY(s) */
error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd,
tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY,
0);
if (error) {
device_printf(sc->dev, "attaching PHYs failed\n");
if_free(ifp);
sc->tsec_ifp = NULL;
tsec_detach(sc);
return (error);
}
sc->tsec_mii = device_get_softc(sc->tsec_miibus);
/* Set MAC address */
tsec_get_hwaddr(sc, hwaddr);
ether_ifattach(ifp, hwaddr);
return (0);
}
int
tsec_detach(struct tsec_softc *sc)
{
if (sc->tsec_ifp != NULL) {
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING)
ether_poll_deregister(sc->tsec_ifp);
#endif
/* Stop TSEC controller and free TX queue */
if (sc->sc_rres)
tsec_shutdown(sc->dev);
/* Detach network interface */
ether_ifdetach(sc->tsec_ifp);
if_free(sc->tsec_ifp);
sc->tsec_ifp = NULL;
}
/* Free DMA resources */
tsec_free_dma(sc);
return (0);
}
int
tsec_shutdown(device_t dev)
{
struct tsec_softc *sc;
sc = device_get_softc(dev);
TSEC_GLOBAL_LOCK(sc);
tsec_stop(sc);
TSEC_GLOBAL_UNLOCK(sc);
return (0);
}
int
tsec_suspend(device_t dev)
{
/* TODO not implemented! */
return (0);
}
int
tsec_resume(device_t dev)
{
/* TODO not implemented! */
return (0);
}
static void
tsec_init(void *xsc)
{
struct tsec_softc *sc = xsc;
TSEC_GLOBAL_LOCK(sc);
tsec_init_locked(sc);
TSEC_GLOBAL_UNLOCK(sc);
}
static int
tsec_mii_wait(struct tsec_softc *sc, uint32_t flags)
{
int timeout;
/*
* The status indicators are not set immediately after a command.
* Discard the first value.
*/
TSEC_PHY_READ(sc, TSEC_REG_MIIMIND);
timeout = TSEC_READ_RETRY;
while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout)
DELAY(TSEC_READ_DELAY);
return (timeout == 0);
}
static void
tsec_init_locked(struct tsec_softc *sc)
{
struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
if_t ifp = sc->tsec_ifp;
uint32_t val, i;
int timeout;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
TSEC_GLOBAL_LOCK_ASSERT(sc);
tsec_stop(sc);
/*
* These steps are according to the MPC8555E PowerQUICCIII RM:
* 14.7 Initialization/Application Information
*/
/* Step 1: soft reset MAC */
tsec_reset_mac(sc);
/* Step 2: Initialize MACCFG2 */
TSEC_WRITE(sc, TSEC_REG_MACCFG2,
TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */
TSEC_MACCFG2_PADCRC | /* PAD/CRC append */
TSEC_MACCFG2_GMII | /* I/F Mode bit */
TSEC_MACCFG2_PRECNT /* Preamble count = 7 */
);
/* Step 3: Initialize ECNTRL
* While the documentation states that R100M is ignored if RPM is
* not set, it does seem to be needed to get the orange boxes to
* work (which have a Marvell 88E1111 PHY). Go figure.
*/
/*
* XXX kludge - use circumstancial evidence to program ECNTRL
* correctly. Ideally we need some board information to guide
* us here.
*/
i = TSEC_READ(sc, TSEC_REG_ID2);
val = (i & 0xffff)
? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */
: TSEC_ECNTRL_R100M; /* Orange + CDS */
TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
/* Step 4: Initialize MAC station address */
tsec_set_mac_address(sc);
/*
* Step 5: Assign a Physical address to the TBI so as to not conflict
* with the external PHY physical address
*/
TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
TSEC_PHY_LOCK(sc);
/* Step 6: Reset the management interface */
TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
/* Step 7: Setup the MII Mgmt clock speed */
TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
/* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY);
TSEC_PHY_UNLOCK(sc);
if (timeout) {
if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
return;
}
/* Step 9: Setup the MII Mgmt */
mii_mediachg(sc->tsec_mii);
/* Step 10: Clear IEVENT register */
TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
/* Step 11: Enable interrupts */
#ifdef DEVICE_POLLING
/*
* ...only if polling is not turned on. Disable interrupts explicitly
* if polling is enabled.
*/
if (if_getcapenable(ifp) & IFCAP_POLLING )
tsec_intrs_ctl(sc, 0);
else
#endif /* DEVICE_POLLING */
tsec_intrs_ctl(sc, 1);
/* Step 12: Initialize IADDRn */
TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
/* Step 13: Initialize GADDRn */
TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
/* Step 14: Initialize RCTRL */
TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
/* Step 15: Initialize DMACTRL */
tsec_dma_ctl(sc, 1);
/* Step 16: Initialize FIFO_PAUSE_CTRL */
TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
/*
* Step 17: Initialize transmit/receive descriptor rings.
* Initialize TBASE and RBASE.
*/
TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
tx_desc[i].bufptr = 0;
tx_desc[i].length = 0;
tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ?
TSEC_TXBD_W : 0);
}
bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
rx_desc[i].bufptr = sc->rx_data[i].paddr;
rx_desc[i].length = 0;
rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0);
}
bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Step 18: Initialize the maximum receive buffer length */
TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES);
/* Step 19: Configure ethernet frame sizes */
TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE);
tsec_set_mtu(sc, if_getmtu(ifp));
/* Step 20: Enable Rx and RxBD sdata snooping */
TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
/* Step 21: Reset collision counters in hardware */
TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
/* Step 22: Mask all CAM interrupts */
TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
/* Step 23: Enable Rx and Tx */
val = TSEC_READ(sc, TSEC_REG_MACCFG1);
val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
/* Step 24: Reset TSEC counters for Tx and Rx rings */
TSEC_TX_RX_COUNTERS_INIT(sc);
/* Step 25: Setup TCP/IP Off-Load engine */
if (sc->is_etsec)
tsec_offload_setup(sc);
/* Step 26: Setup multicast filters */
tsec_setup_multicast(sc);
/* Step 27: Activate network interface */
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->tsec_if_flags = if_getflags(ifp);
sc->tsec_watchdog = 0;
/* Schedule watchdog timeout */
callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
}
static void
tsec_set_mac_address(struct tsec_softc *sc)
{
uint32_t macbuf[2] = { 0, 0 };
char *macbufp, *curmac;
int i;
TSEC_GLOBAL_LOCK_ASSERT(sc);
KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
("tsec_set_mac_address: (%d <= %zd", ETHER_ADDR_LEN,
sizeof(macbuf)));
macbufp = (char *)macbuf;
curmac = (char *)if_getlladdr(sc->tsec_ifp);
/* Correct order of MAC address bytes */
for (i = 1; i <= ETHER_ADDR_LEN; i++)
macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
/* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
}
/*
* DMA control function, if argument state is:
* 0 - DMA engine will be disabled
* 1 - DMA engine will be enabled
*/
static void
tsec_dma_ctl(struct tsec_softc *sc, int state)
{
device_t dev;
uint32_t dma_flags, timeout;
dev = sc->dev;
dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
switch (state) {
case 0:
/* Temporarily clear stop graceful stop bits. */
tsec_dma_ctl(sc, 1000);
/* Set it again */
dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
break;
case 1000:
case 1:
/* Set write with response (WWR), wait (WOP) and snoop bits */
dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
DMACTRL_WWR | DMACTRL_WOP);
/* Clear graceful stop bits */
dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
break;
default:
device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
state);
}
TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
switch (state) {
case 0:
/* Wait for DMA stop */
timeout = TSEC_READ_RETRY;
while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
(TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
DELAY(TSEC_READ_DELAY);
if (timeout == 0)
device_printf(dev, "tsec_dma_ctl(): timeout!\n");
break;
case 1:
/* Restart transmission function */
TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
}
}
/*
* Interrupts control function, if argument state is:
* 0 - all TSEC interrupts will be masked
* 1 - all TSEC interrupts will be unmasked
*/
static void
tsec_intrs_ctl(struct tsec_softc *sc, int state)
{
device_t dev;
dev = sc->dev;
switch (state) {
case 0:
TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
break;
case 1:
TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN |
TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN |
TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN |
TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN);
break;
default:
device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
state);
}
}
static void
tsec_reset_mac(struct tsec_softc *sc)
{
uint32_t maccfg1_flags;
/* Set soft reset bit */
maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
/* Clear soft reset bit */
maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
}
static void
tsec_watchdog(struct tsec_softc *sc)
{
if_t ifp;
TSEC_GLOBAL_LOCK_ASSERT(sc);
if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0)
return;
ifp = sc->tsec_ifp;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_printf(ifp, "watchdog timeout\n");
tsec_stop(sc);
tsec_init_locked(sc);
}
static void
tsec_start(if_t ifp)
{
struct tsec_softc *sc = if_getsoftc(ifp);
TSEC_TRANSMIT_LOCK(sc);
tsec_start_locked(ifp);
TSEC_TRANSMIT_UNLOCK(sc);
}
static void
tsec_start_locked(if_t ifp)
{
struct tsec_softc *sc;
struct mbuf *m0;
struct tsec_tx_fcb *tx_fcb;
int csum_flags;
int start_tx;
uint16_t fcb_flags;
sc = if_getsoftc(ifp);
start_tx = 0;
TSEC_TRANSMIT_LOCK_ASSERT(sc);
if (sc->tsec_link == 0)
return;
bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (;;) {
if (TSEC_FREE_TX_DESC(sc) < TSEC_TX_MAX_DMA_SEGS) {
/* No free descriptors */
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
/* Get packet from the queue */
m0 = if_dequeue(ifp);
if (m0 == NULL)
break;
/* Insert TCP/IP Off-load frame control block */
fcb_flags = 0;
csum_flags = m0->m_pkthdr.csum_flags;
if (csum_flags) {
M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT);
if (m0 == NULL)
break;
if (csum_flags & CSUM_IP)
fcb_flags |= TSEC_TX_FCB_IP4 |
TSEC_TX_FCB_CSUM_IP;
if (csum_flags & CSUM_TCP)
fcb_flags |= TSEC_TX_FCB_TCP |
TSEC_TX_FCB_CSUM_TCP_UDP;
if (csum_flags & CSUM_UDP)
fcb_flags |= TSEC_TX_FCB_UDP |
TSEC_TX_FCB_CSUM_TCP_UDP;
tx_fcb = mtod(m0, struct tsec_tx_fcb *);
tx_fcb->flags = fcb_flags;
tx_fcb->l3_offset = ETHER_HDR_LEN;
tx_fcb->l4_offset = sizeof(struct ip);
}
tsec_encap(ifp, sc, m0, fcb_flags, &start_tx);
}
bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (start_tx) {
/* Enable transmitter and watchdog timer */
TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
sc->tsec_watchdog = 5;
}
}
static void
tsec_encap(if_t ifp, struct tsec_softc *sc, struct mbuf *m0,
uint16_t fcb_flags, int *start_tx)
{
bus_dma_segment_t segs[TSEC_TX_MAX_DMA_SEGS];
int error, i, nsegs;
struct tsec_bufmap *tx_bufmap;
uint32_t tx_idx;
uint16_t flags;
TSEC_TRANSMIT_LOCK_ASSERT(sc);
tx_idx = sc->tx_idx_head;
tx_bufmap = &sc->tx_bufmap[tx_idx];
/* Create mapping in DMA memory */
error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
/* Too many segments! Defrag and try again. */
struct mbuf *m = m_defrag(m0, M_NOWAIT);
if (m == NULL) {
m_freem(m0);
return;
}
m0 = m;
error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
tx_bufmap->map, m0, segs, &nsegs, BUS_DMA_NOWAIT);
}
if (error != 0) {
/* Give up. */
m_freem(m0);
return;
}
bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map,
BUS_DMASYNC_PREWRITE);
tx_bufmap->mbuf = m0;
/*
* Fill in the TX descriptors back to front so that READY bit in first
* descriptor is set last.
*/
tx_idx = (tx_idx + (uint32_t)nsegs) & (TSEC_TX_NUM_DESC - 1);
sc->tx_idx_head = tx_idx;
flags = TSEC_TXBD_L | TSEC_TXBD_I | TSEC_TXBD_R | TSEC_TXBD_TC;
for (i = nsegs - 1; i >= 0; i--) {
struct tsec_desc *tx_desc;
tx_idx = (tx_idx - 1) & (TSEC_TX_NUM_DESC - 1);
tx_desc = &sc->tsec_tx_vaddr[tx_idx];
tx_desc->length = segs[i].ds_len;
tx_desc->bufptr = segs[i].ds_addr;
if (i == 0) {
wmb();
if (fcb_flags != 0)
flags |= TSEC_TXBD_TOE;
}
/*
* Set flags:
* - wrap
* - checksum
* - ready to send
* - transmit the CRC sequence after the last data byte
* - interrupt after the last buffer
*/
tx_desc->flags = (tx_idx == (TSEC_TX_NUM_DESC - 1) ?
TSEC_TXBD_W : 0) | flags;
flags &= ~(TSEC_TXBD_L | TSEC_TXBD_I);
}
BPF_MTAP(ifp, m0);
*start_tx = 1;
}
static void
tsec_setfilter(struct tsec_softc *sc)
{
if_t ifp;
uint32_t flags;
ifp = sc->tsec_ifp;
flags = TSEC_READ(sc, TSEC_REG_RCTRL);
/* Promiscuous mode */
if (if_getflags(ifp) & IFF_PROMISC)
flags |= TSEC_RCTRL_PROM;
else
flags &= ~TSEC_RCTRL_PROM;
TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
}
#ifdef DEVICE_POLLING
static poll_handler_t tsec_poll;
static int
tsec_poll(if_t ifp, enum poll_cmd cmd, int count)
{
uint32_t ie;
struct tsec_softc *sc = if_getsoftc(ifp);
int rx_npkts;
rx_npkts = 0;
TSEC_GLOBAL_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
TSEC_GLOBAL_UNLOCK(sc);
return (rx_npkts);
}
if (cmd == POLL_AND_CHECK_STATUS) {
tsec_error_intr_locked(sc, count);
/* Clear all events reported */
ie = TSEC_READ(sc, TSEC_REG_IEVENT);
TSEC_WRITE(sc, TSEC_REG_IEVENT, ie);
}
tsec_transmit_intr_locked(sc);
TSEC_GLOBAL_TO_RECEIVE_LOCK(sc);
rx_npkts = tsec_receive_intr_locked(sc, count);
TSEC_RECEIVE_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static int
tsec_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct tsec_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
int mask, error = 0;
switch (command) {
case SIOCSIFMTU:
TSEC_GLOBAL_LOCK(sc);
if (tsec_set_mtu(sc, ifr->ifr_mtu))
if_setmtu(ifp, ifr->ifr_mtu);
else
error = EINVAL;
TSEC_GLOBAL_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
TSEC_GLOBAL_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((sc->tsec_if_flags ^ if_getflags(ifp)) &
IFF_PROMISC)
tsec_setfilter(sc);
if ((sc->tsec_if_flags ^ if_getflags(ifp)) &
IFF_ALLMULTI)
tsec_setup_multicast(sc);
} else
tsec_init_locked(sc);
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
tsec_stop(sc);
sc->tsec_if_flags = if_getflags(ifp);
TSEC_GLOBAL_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
TSEC_GLOBAL_LOCK(sc);
tsec_setup_multicast(sc);
TSEC_GLOBAL_UNLOCK(sc);
}
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media,
command);
break;
case SIOCSIFCAP:
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if ((mask & IFCAP_HWCSUM) && sc->is_etsec) {
TSEC_GLOBAL_LOCK(sc);
if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
tsec_offload_setup(sc);
TSEC_GLOBAL_UNLOCK(sc);
}
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(tsec_poll, ifp);
if (error)
return (error);
TSEC_GLOBAL_LOCK(sc);
/* Disable interrupts */
tsec_intrs_ctl(sc, 0);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
TSEC_GLOBAL_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
TSEC_GLOBAL_LOCK(sc);
/* Enable interrupts */
tsec_intrs_ctl(sc, 1);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
TSEC_GLOBAL_UNLOCK(sc);
}
}
#endif
break;
default:
error = ether_ioctl(ifp, command, data);
}
/* Flush buffers if not empty */
if (if_getflags(ifp) & IFF_UP)
tsec_start(ifp);
return (error);
}
static int
tsec_ifmedia_upd(if_t ifp)
{
struct tsec_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
TSEC_TRANSMIT_LOCK(sc);
mii = sc->tsec_mii;
mii_mediachg(mii);
TSEC_TRANSMIT_UNLOCK(sc);
return (0);
}
static void
tsec_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct tsec_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
TSEC_TRANSMIT_LOCK(sc);
mii = sc->tsec_mii;
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
TSEC_TRANSMIT_UNLOCK(sc);
}
static int
tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
uint32_t *paddr)
{
struct mbuf *new_mbuf;
bus_dma_segment_t seg[1];
int error, nsegs;
KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
if (new_mbuf == NULL)
return (ENOBUFS);
new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
if (*mbufp) {
bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(tag, map);
}
error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
BUS_DMA_NOWAIT);
KASSERT(nsegs == 1, ("Too many segments returned!"));
if (nsegs != 1 || error)
panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
#if 0
if (error) {
printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
error);
m_freem(new_mbuf);
return (ENOBUFS);
}
#endif
#if 0
KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
("Wrong alignment of RX buffer!"));
#endif
bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
(*mbufp) = new_mbuf;
(*paddr) = seg->ds_addr;
return (0);
}
static void
tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
u_int32_t *paddr;
KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
paddr = arg;
*paddr = segs->ds_addr;
}
static int
tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
{
int error;
/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
error = bus_dma_tag_create(NULL, /* parent */
PAGE_SIZE, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
dsize, 1, /* maxsize, nsegments */
dsize, 0, /* maxsegsz, flags */
NULL, NULL, /* lockfunc, lockfuncarg */
dtag); /* dmat */
if (error) {
device_printf(dev, "failed to allocate busdma %s tag\n",
dname);
(*vaddr) = NULL;
return (ENXIO);
}
error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
dmap);
if (error) {
device_printf(dev, "failed to allocate %s DMA safe memory\n",
dname);
bus_dma_tag_destroy(*dtag);
(*vaddr) = NULL;
return (ENXIO);
}
error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize,
tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "cannot get address of the %s "
"descriptors\n", dname);
bus_dmamem_free(*dtag, *vaddr, *dmap);
bus_dma_tag_destroy(*dtag);
(*vaddr) = NULL;
return (ENXIO);
}
return (0);
}
static void
tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
{
if (vaddr == NULL)
return;
/* Unmap descriptors from DMA memory */
bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD |
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dtag, dmap);
/* Free descriptors memory */
bus_dmamem_free(dtag, vaddr, dmap);
/* Destroy descriptors tag */
bus_dma_tag_destroy(dtag);
}
static void
tsec_free_dma(struct tsec_softc *sc)
{
int i;
/* Free TX maps */
for (i = 0; i < TSEC_TX_NUM_DESC; i++)
if (sc->tx_bufmap[i].map_initialized)
bus_dmamap_destroy(sc->tsec_tx_mtag,
sc->tx_bufmap[i].map);
/* Destroy tag for TX mbufs */
bus_dma_tag_destroy(sc->tsec_tx_mtag);
/* Free RX mbufs and maps */
for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
if (sc->rx_data[i].mbuf) {
/* Unload buffer from DMA */
bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->tsec_rx_mtag,
sc->rx_data[i].map);
/* Free buffer */
m_freem(sc->rx_data[i].mbuf);
}
/* Destroy map for this buffer */
if (sc->rx_data[i].map != NULL)
bus_dmamap_destroy(sc->tsec_rx_mtag,
sc->rx_data[i].map);
}
/* Destroy tag for RX mbufs */
bus_dma_tag_destroy(sc->tsec_rx_mtag);
/* Unload TX/RX descriptors */
tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
sc->tsec_tx_vaddr);
tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
sc->tsec_rx_vaddr);
}
static void
tsec_stop(struct tsec_softc *sc)
{
if_t ifp;
uint32_t tmpval;
TSEC_GLOBAL_LOCK_ASSERT(sc);
ifp = sc->tsec_ifp;
/* Disable interface and watchdog timer */
callout_stop(&sc->tsec_callout);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->tsec_watchdog = 0;
/* Disable all interrupts and stop DMA */
tsec_intrs_ctl(sc, 0);
tsec_dma_ctl(sc, 0);
/* Remove pending data from TX queue */
while (sc->tx_idx_tail != sc->tx_idx_head) {
bus_dmamap_sync(sc->tsec_tx_mtag,
sc->tx_bufmap[sc->tx_idx_tail].map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->tsec_tx_mtag,
sc->tx_bufmap[sc->tx_idx_tail].map);
m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf);
sc->tx_idx_tail = (sc->tx_idx_tail + 1)
& (TSEC_TX_NUM_DESC - 1);
}
/* Disable RX and TX */
tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
DELAY(10);
}
static void
tsec_tick(void *arg)
{
struct tsec_softc *sc = arg;
if_t ifp;
int link;
TSEC_GLOBAL_LOCK(sc);
tsec_watchdog(sc);
ifp = sc->tsec_ifp;
link = sc->tsec_link;
mii_tick(sc->tsec_mii);
if (link == 0 && sc->tsec_link == 1 &&
(!if_sendq_empty(ifp)))
tsec_start_locked(ifp);
/* Schedule another timeout one second from now. */
callout_reset(&sc->tsec_callout, hz, tsec_tick, sc);
TSEC_GLOBAL_UNLOCK(sc);
}
/*
* This is the core RX routine. It replenishes mbufs in the descriptor and
* sends data which have been dma'ed into host memory to upper layer.
*
* Loops at most count times if count is > 0, or until done if count < 0.
*/
static int
tsec_receive_intr_locked(struct tsec_softc *sc, int count)
{
struct tsec_desc *rx_desc;
if_t ifp;
struct rx_data_type *rx_data;
struct mbuf *m;
uint32_t i;
int c, rx_npkts;
uint16_t flags;
TSEC_RECEIVE_LOCK_ASSERT(sc);
ifp = sc->tsec_ifp;
rx_data = sc->rx_data;
rx_npkts = 0;
bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (c = 0; ; c++) {
if (count >= 0 && count-- == 0)
break;
rx_desc = TSEC_GET_CUR_RX_DESC(sc);
flags = rx_desc->flags;
/* Check if there is anything to receive */
if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) {
/*
* Avoid generating another interrupt
*/
if (flags & TSEC_RXBD_E)
TSEC_WRITE(sc, TSEC_REG_IEVENT,
TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
/*
* We didn't consume current descriptor and have to
* return it to the queue
*/
TSEC_BACK_CUR_RX_DESC(sc);
break;
}
if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
rx_desc->length = 0;
rx_desc->flags = (rx_desc->flags &
~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I;
if (sc->frame != NULL) {
m_free(sc->frame);
sc->frame = NULL;
}
continue;
}
/* Ok... process frame */
i = TSEC_GET_CUR_RX_DESC_CNT(sc);
m = rx_data[i].mbuf;
m->m_len = rx_desc->length;
if (sc->frame != NULL) {
if ((flags & TSEC_RXBD_L) != 0)
m->m_len -= m_length(sc->frame, NULL);
m->m_flags &= ~M_PKTHDR;
m_cat(sc->frame, m);
} else {
sc->frame = m;
}
m = NULL;
if ((flags & TSEC_RXBD_L) != 0) {
m = sc->frame;
sc->frame = NULL;
}
if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
&rx_data[i].mbuf, &rx_data[i].paddr)) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
/*
* We ran out of mbufs; didn't consume current
* descriptor and have to return it to the queue.
*/
TSEC_BACK_CUR_RX_DESC(sc);
break;
}
/* Attach new buffer to descriptor and clear flags */
rx_desc->bufptr = rx_data[i].paddr;
rx_desc->length = 0;
rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
TSEC_RXBD_E | TSEC_RXBD_I;
if (m != NULL) {
m->m_pkthdr.rcvif = ifp;
m_fixhdr(m);
m_adj(m, -ETHER_CRC_LEN);
if (sc->is_etsec)
tsec_offload_process_frame(sc, m);
TSEC_RECEIVE_UNLOCK(sc);
if_input(ifp, m);
TSEC_RECEIVE_LOCK(sc);
rx_npkts++;
}
}
bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Make sure TSEC receiver is not halted.
*
* Various conditions can stop the TSEC receiver, but not all are
* signaled and handled by error interrupt, so make sure the receiver
* is running. Writing to TSEC_REG_RSTAT restarts the receiver when
* halted, and is harmless if already running.
*/
TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
return (rx_npkts);
}
void
tsec_receive_intr(void *arg)
{
struct tsec_softc *sc = arg;
TSEC_RECEIVE_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) {
TSEC_RECEIVE_UNLOCK(sc);
return;
}
#endif
/* Confirm the interrupt was received by driver */
TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
tsec_receive_intr_locked(sc, -1);
TSEC_RECEIVE_UNLOCK(sc);
}
static void
tsec_transmit_intr_locked(struct tsec_softc *sc)
{
if_t ifp;
uint32_t tx_idx;
TSEC_TRANSMIT_LOCK_ASSERT(sc);
ifp = sc->tsec_ifp;
/* Update collision statistics */
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL));
/* Reset collision counters in hardware */
TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
tx_idx = sc->tx_idx_tail;
while (tx_idx != sc->tx_idx_head) {
struct tsec_desc *tx_desc;
struct tsec_bufmap *tx_bufmap;
tx_desc = &sc->tsec_tx_vaddr[tx_idx];
if (tx_desc->flags & TSEC_TXBD_R) {
break;
}
tx_bufmap = &sc->tx_bufmap[tx_idx];
tx_idx = (tx_idx + 1) & (TSEC_TX_NUM_DESC - 1);
if (tx_bufmap->mbuf == NULL)
continue;
/*
* This is the last buf in this packet, so unmap and free it.
*/
bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map);
m_freem(tx_bufmap->mbuf);
tx_bufmap->mbuf = NULL;
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
sc->tx_idx_tail = tx_idx;
bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
tsec_start_locked(ifp);
if (sc->tx_idx_tail == sc->tx_idx_head)
sc->tsec_watchdog = 0;
}
void
tsec_transmit_intr(void *arg)
{
struct tsec_softc *sc = arg;
TSEC_TRANSMIT_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) {
TSEC_TRANSMIT_UNLOCK(sc);
return;
}
#endif
/* Confirm the interrupt was received by driver */
TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
tsec_transmit_intr_locked(sc);
TSEC_TRANSMIT_UNLOCK(sc);
}
static void
tsec_error_intr_locked(struct tsec_softc *sc, int count)
{
if_t ifp;
uint32_t eflags;
TSEC_GLOBAL_LOCK_ASSERT(sc);
ifp = sc->tsec_ifp;
eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
/* Clear events bits in hardware */
TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
/* Check transmitter errors */
if (eflags & TSEC_IEVENT_TXE) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if (eflags & TSEC_IEVENT_LC)
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
}
/* Check for discarded frame due to a lack of buffers */
if (eflags & TSEC_IEVENT_BSY) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
}
if (if_getflags(ifp) & IFF_DEBUG)
if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n",
eflags);
if (eflags & TSEC_IEVENT_EBERR) {
if_printf(ifp, "System bus error occurred during"
"DMA transaction (flags: 0x%x)\n", eflags);
tsec_init_locked(sc);
}
if (eflags & TSEC_IEVENT_BABT)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if (eflags & TSEC_IEVENT_BABR)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
}
void
tsec_error_intr(void *arg)
{
struct tsec_softc *sc = arg;
TSEC_GLOBAL_LOCK(sc);
tsec_error_intr_locked(sc, -1);
TSEC_GLOBAL_UNLOCK(sc);
}
int
tsec_miibus_readreg(device_t dev, int phy, int reg)
{
struct tsec_softc *sc;
int timeout;
int rv;
sc = device_get_softc(dev);
TSEC_PHY_LOCK();
TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0);
TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
timeout = tsec_mii_wait(sc, TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY);
rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT);
TSEC_PHY_UNLOCK();
if (timeout)
device_printf(dev, "Timeout while reading from PHY!\n");
return (rv);
}
int
tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
{
struct tsec_softc *sc;
int timeout;
sc = device_get_softc(dev);
TSEC_PHY_LOCK();
TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value);
timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY);
TSEC_PHY_UNLOCK();
if (timeout)
device_printf(dev, "Timeout while writing to PHY!\n");
return (0);
}
void
tsec_miibus_statchg(device_t dev)
{
struct tsec_softc *sc;
struct mii_data *mii;
uint32_t ecntrl, id, tmp;
int link;
sc = device_get_softc(dev);
mii = sc->tsec_mii;
link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
tmp |= TSEC_MACCFG2_FULLDUPLEX;
else
tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
tmp |= TSEC_MACCFG2_GMII;
sc->tsec_link = link;
break;
case IFM_100_TX:
case IFM_10_T:
tmp |= TSEC_MACCFG2_MII;
sc->tsec_link = link;
break;
case IFM_NONE:
if (link)
device_printf(dev, "No speed selected but link "
"active!\n");
sc->tsec_link = 0;
return;
default:
sc->tsec_link = 0;
device_printf(dev, "Unknown speed (%d), link %s!\n",
IFM_SUBTYPE(mii->mii_media_active),
((link) ? "up" : "down"));
return;
}
TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
/* XXX kludge - use circumstantial evidence for reduced mode. */
id = TSEC_READ(sc, TSEC_REG_ID2);
if (id & 0xffff) {
ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
}
}
static void
tsec_add_sysctls(struct tsec_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
struct sysctl_oid *tree;
ctx = device_get_sysctl_ctx(sc->dev);
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "TSEC Interrupts coalescing");
children = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX,
tsec_sysctl_ic_time, "I", "IC RX time threshold (0-65535)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX,
tsec_sysctl_ic_count, "I", "IC RX frame count threshold (0-255)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX,
tsec_sysctl_ic_time, "I", "IC TX time threshold (0-65535)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX,
tsec_sysctl_ic_count, "I", "IC TX frame count threshold (0-255)");
}
/*
* With Interrupt Coalescing (IC) active, a transmit/receive frame
* interrupt is raised either upon:
*
* - threshold-defined period of time elapsed, or
* - threshold-defined number of frames is received/transmitted,
* whichever occurs first.
*
* The following sysctls regulate IC behaviour (for TX/RX separately):
*
* dev.tsec.<unit>.int_coal.rx_time
* dev.tsec.<unit>.int_coal.rx_count
* dev.tsec.<unit>.int_coal.tx_time
* dev.tsec.<unit>.int_coal.tx_count
*
* Values:
*
* - 0 for either time or count disables IC on the given TX/RX path
*
* - count: 1-255 (expresses frame count number; note that value of 1 is
* effectively IC off)
*
* - time: 1-65535 (value corresponds to a real time period and is
* expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer
* threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps,
* 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the
* TSEC reference manual.
*/
static int
tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS)
{
int error;
uint32_t time;
struct tsec_softc *sc = (struct tsec_softc *)arg1;
time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
error = sysctl_handle_int(oidp, &time, 0, req);
if (error != 0)
return (error);
if (time > 65535)
return (EINVAL);
TSEC_IC_LOCK(sc);
if (arg2 == TSEC_IC_RX) {
sc->rx_ic_time = time;
tsec_set_rxic(sc);
} else {
sc->tx_ic_time = time;
tsec_set_txic(sc);
}
TSEC_IC_UNLOCK(sc);
return (0);
}
static int
tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS)
{
int error;
uint32_t count;
struct tsec_softc *sc = (struct tsec_softc *)arg1;
count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count;
error = sysctl_handle_int(oidp, &count, 0, req);
if (error != 0)
return (error);
if (count > 255)
return (EINVAL);
TSEC_IC_LOCK(sc);
if (arg2 == TSEC_IC_RX) {
sc->rx_ic_count = count;
tsec_set_rxic(sc);
} else {
sc->tx_ic_count = count;
tsec_set_txic(sc);
}
TSEC_IC_UNLOCK(sc);
return (0);
}
static void
tsec_set_rxic(struct tsec_softc *sc)
{
uint32_t rxic_val;
if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0)
/* Disable RX IC */
rxic_val = 0;
else {
rxic_val = 0x80000000;
rxic_val |= (sc->rx_ic_count << 21);
rxic_val |= sc->rx_ic_time;
}
TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val);
}
static void
tsec_set_txic(struct tsec_softc *sc)
{
uint32_t txic_val;
if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0)
/* Disable TX IC */
txic_val = 0;
else {
txic_val = 0x80000000;
txic_val |= (sc->tx_ic_count << 21);
txic_val |= sc->tx_ic_time;
}
TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val);
}
static void
tsec_offload_setup(struct tsec_softc *sc)
{
if_t ifp = sc->tsec_ifp;
uint32_t reg;
TSEC_GLOBAL_LOCK_ASSERT(sc);
reg = TSEC_READ(sc, TSEC_REG_TCTRL);
reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN;
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassist(ifp, TSEC_CHECKSUM_FEATURES);
else
if_sethwassist(ifp, 0);
TSEC_WRITE(sc, TSEC_REG_TCTRL, reg);
reg = TSEC_READ(sc, TSEC_REG_RCTRL);
reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP);
reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX;
if (if_getcapenable(ifp) & IFCAP_RXCSUM)
reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN |
TSEC_RCTRL_PRSDEP_PARSE_L234;
TSEC_WRITE(sc, TSEC_REG_RCTRL, reg);
}
static void
tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m)
{
struct tsec_rx_fcb rx_fcb;
int csum_flags = 0;
int protocol, flags;
TSEC_RECEIVE_LOCK_ASSERT(sc);
m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb));
flags = rx_fcb.flags;
protocol = rx_fcb.protocol;
if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) {
csum_flags |= CSUM_IP_CHECKED;
if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0)
csum_flags |= CSUM_IP_VALID;
}
if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) &&
TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) &&
(flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) {
csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
}
m->m_pkthdr.csum_flags = csum_flags;
if (flags & TSEC_RX_FCB_VLAN) {
m->m_pkthdr.ether_vtag = rx_fcb.vlan;
m->m_flags |= M_VLANTAG;
}
m_adj(m, sizeof(struct tsec_rx_fcb));
}
static u_int
tsec_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t h, *hashtable = arg;
h = (ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 24) & 0xFF;
hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F));
return (1);
}
static void
tsec_setup_multicast(struct tsec_softc *sc)
{
uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
if_t ifp = sc->tsec_ifp;
int i;
TSEC_GLOBAL_LOCK_ASSERT(sc);
if (if_getflags(ifp) & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF);
return;
}
if_foreach_llmaddr(ifp, tsec_hash_maddr, &hashtable);
for (i = 0; i < 8; i++)
TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]);
}
static int
tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu)
{
mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
TSEC_GLOBAL_LOCK_ASSERT(sc);
if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) {
TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu);
return (mtu);
}
return (0);
}
diff --git a/sys/dev/usb/net/if_usie.c b/sys/dev/usb/net/if_usie.c
index 6f5c207ce42c..193ed956595a 100644
--- a/sys/dev/usb/net/if_usie.c
+++ b/sys/dev/usb/net/if_usie.c
@@ -1,1611 +1,1606 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011 Anybots Inc
* written by Akinori Furukoshi <moonlightakkiy@yahoo.ca>
* - ucom part is based on u3g.c
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
#include <sys/queue.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/socket.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_var.h>
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/netisr.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/udp.h>
#include <net80211/ieee80211_ioctl.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include <dev/usb/usb_cdc.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR usie_debug
#include <dev/usb/usb_debug.h>
#include <dev/usb/usb_process.h>
#include <dev/usb/usb_msctest.h>
#include <dev/usb/serial/usb_serial.h>
#include <dev/usb/net/if_usievar.h>
#ifdef USB_DEBUG
static int usie_debug = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, usie, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"sierra USB modem");
SYSCTL_INT(_hw_usb_usie, OID_AUTO, debug, CTLFLAG_RWTUN, &usie_debug, 0,
"usie debug level");
#endif
/* Sierra Wireless Direct IP modems */
static const STRUCT_USB_HOST_ID usie_devs[] = {
#define USIE_DEV(v, d) { \
USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##d) }
USIE_DEV(SIERRA, MC8700),
USIE_DEV(SIERRA, TRUINSTALL),
USIE_DEV(AIRPRIME, USB308),
#undef USIE_DEV
};
static device_probe_t usie_probe;
static device_attach_t usie_attach;
static device_detach_t usie_detach;
static void usie_free_softc(struct usie_softc *);
static void usie_free(struct ucom_softc *);
static void usie_uc_update_line_state(struct ucom_softc *, uint8_t);
static void usie_uc_cfg_get_status(struct ucom_softc *, uint8_t *, uint8_t *);
static void usie_uc_cfg_set_dtr(struct ucom_softc *, uint8_t);
static void usie_uc_cfg_set_rts(struct ucom_softc *, uint8_t);
static void usie_uc_cfg_open(struct ucom_softc *);
static void usie_uc_cfg_close(struct ucom_softc *);
static void usie_uc_start_read(struct ucom_softc *);
static void usie_uc_stop_read(struct ucom_softc *);
static void usie_uc_start_write(struct ucom_softc *);
static void usie_uc_stop_write(struct ucom_softc *);
static usb_callback_t usie_uc_tx_callback;
static usb_callback_t usie_uc_rx_callback;
static usb_callback_t usie_uc_status_callback;
static usb_callback_t usie_if_tx_callback;
static usb_callback_t usie_if_rx_callback;
static usb_callback_t usie_if_status_callback;
static void usie_if_sync_to(void *);
static void usie_if_sync_cb(void *, int);
static void usie_if_status_cb(void *, int);
static void usie_if_start(if_t);
static int usie_if_output(if_t, struct mbuf *,
const struct sockaddr *, struct route *);
static void usie_if_init(void *);
static void usie_if_stop(struct usie_softc *);
static int usie_if_ioctl(if_t, u_long, caddr_t);
static int usie_do_request(struct usie_softc *, struct usb_device_request *, void *);
static int usie_if_cmd(struct usie_softc *, uint8_t);
static void usie_cns_req(struct usie_softc *, uint32_t, uint16_t);
static void usie_cns_rsp(struct usie_softc *, struct usie_cns *);
static void usie_hip_rsp(struct usie_softc *, uint8_t *, uint32_t);
static int usie_driver_loaded(struct module *, int, void *);
static const struct usb_config usie_uc_config[USIE_UC_N_XFER] = {
[USIE_UC_STATUS] = {
.type = UE_INTERRUPT,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = 0, /* use wMaxPacketSize */
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = &usie_uc_status_callback,
},
[USIE_UC_RX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = USIE_BUFSIZE,
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,.proxy_buffer = 1,},
.callback = &usie_uc_rx_callback,
},
[USIE_UC_TX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = USIE_BUFSIZE,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = &usie_uc_tx_callback,
}
};
static const struct usb_config usie_if_config[USIE_IF_N_XFER] = {
[USIE_IF_STATUS] = {
.type = UE_INTERRUPT,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = 0, /* use wMaxPacketSize */
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = &usie_if_status_callback,
},
[USIE_IF_RX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = USIE_BUFSIZE,
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = &usie_if_rx_callback,
},
[USIE_IF_TX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = MAX(USIE_BUFSIZE, MCLBYTES),
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = &usie_if_tx_callback,
}
};
static device_method_t usie_methods[] = {
DEVMETHOD(device_probe, usie_probe),
DEVMETHOD(device_attach, usie_attach),
DEVMETHOD(device_detach, usie_detach),
DEVMETHOD_END
};
static driver_t usie_driver = {
.name = "usie",
.methods = usie_methods,
.size = sizeof(struct usie_softc),
};
static eventhandler_tag usie_etag;
DRIVER_MODULE(usie, uhub, usie_driver, usie_driver_loaded, NULL);
MODULE_DEPEND(usie, ucom, 1, 1, 1);
MODULE_DEPEND(usie, usb, 1, 1, 1);
MODULE_VERSION(usie, 1);
USB_PNP_HOST_INFO(usie_devs);
static const struct ucom_callback usie_uc_callback = {
.ucom_cfg_get_status = &usie_uc_cfg_get_status,
.ucom_cfg_set_dtr = &usie_uc_cfg_set_dtr,
.ucom_cfg_set_rts = &usie_uc_cfg_set_rts,
.ucom_cfg_open = &usie_uc_cfg_open,
.ucom_cfg_close = &usie_uc_cfg_close,
.ucom_start_read = &usie_uc_start_read,
.ucom_stop_read = &usie_uc_stop_read,
.ucom_start_write = &usie_uc_start_write,
.ucom_stop_write = &usie_uc_stop_write,
.ucom_free = &usie_free,
};
static void
usie_autoinst(void *arg, struct usb_device *udev,
struct usb_attach_arg *uaa)
{
struct usb_interface *iface;
struct usb_interface_descriptor *id;
struct usb_device_request req;
int err;
if (uaa->dev_state != UAA_DEV_READY)
return;
iface = usbd_get_iface(udev, 0);
if (iface == NULL)
return;
id = iface->idesc;
if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
return;
if (usbd_lookup_id_by_uaa(usie_devs, sizeof(usie_devs), uaa) != 0)
return; /* no device match */
if (bootverbose) {
DPRINTF("Ejecting %s %s\n",
usb_get_manufacturer(udev),
usb_get_product(udev));
}
req.bmRequestType = UT_VENDOR;
req.bRequest = UR_SET_INTERFACE;
USETW(req.wValue, UF_DEVICE_REMOTE_WAKEUP);
USETW(req.wIndex, UHF_PORT_CONNECTION);
USETW(req.wLength, 0);
/* at this moment there is no mutex */
err = usbd_do_request_flags(udev, NULL, &req,
NULL, 0, NULL, 250 /* ms */ );
/* success, mark the udev as disappearing */
if (err == 0)
uaa->dev_state = UAA_DEV_EJECTING;
}
static int
usie_probe(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != USIE_CNFG_INDEX)
return (ENXIO);
if (uaa->info.bIfaceIndex != USIE_IFACE_INDEX)
return (ENXIO);
if (uaa->info.bInterfaceClass != UICLASS_VENDOR)
return (ENXIO);
return (usbd_lookup_id_by_uaa(usie_devs, sizeof(usie_devs), uaa));
}
static int
usie_attach(device_t self)
{
struct usie_softc *sc = device_get_softc(self);
struct usb_attach_arg *uaa = device_get_ivars(self);
if_t ifp;
struct usb_interface *iface;
struct usb_interface_descriptor *id;
struct usb_device_request req;
int err;
uint16_t fwattr;
uint8_t iface_index;
uint8_t ifidx;
uint8_t start;
device_set_usb_desc(self);
sc->sc_udev = uaa->device;
sc->sc_dev = self;
mtx_init(&sc->sc_mtx, "usie", MTX_NETWORK_LOCK, MTX_DEF);
ucom_ref(&sc->sc_super_ucom);
TASK_INIT(&sc->sc_if_status_task, 0, usie_if_status_cb, sc);
TASK_INIT(&sc->sc_if_sync_task, 0, usie_if_sync_cb, sc);
usb_callout_init_mtx(&sc->sc_if_sync_ch, &sc->sc_mtx, 0);
mtx_lock(&sc->sc_mtx);
/* set power mode to D0 */
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = USIE_POWER;
USETW(req.wValue, 0);
USETW(req.wIndex, 0);
USETW(req.wLength, 0);
if (usie_do_request(sc, &req, NULL)) {
mtx_unlock(&sc->sc_mtx);
goto detach;
}
/* read fw attr */
fwattr = 0;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = USIE_FW_ATTR;
USETW(req.wValue, 0);
USETW(req.wIndex, 0);
USETW(req.wLength, sizeof(fwattr));
if (usie_do_request(sc, &req, &fwattr)) {
mtx_unlock(&sc->sc_mtx);
goto detach;
}
mtx_unlock(&sc->sc_mtx);
/* check DHCP supports */
DPRINTF("fwattr=%x\n", fwattr);
if (!(fwattr & USIE_FW_DHCP)) {
device_printf(self, "DHCP is not supported. A firmware upgrade might be needed.\n");
}
/* find available interfaces */
sc->sc_nucom = 0;
for (ifidx = 0; ifidx < USIE_IFACE_MAX; ifidx++) {
iface = usbd_get_iface(uaa->device, ifidx);
if (iface == NULL)
break;
id = usbd_get_interface_descriptor(iface);
if ((id == NULL) || (id->bInterfaceClass != UICLASS_VENDOR))
continue;
/* setup Direct IP transfer */
if (id->bInterfaceNumber >= 7 && id->bNumEndpoints == 3) {
sc->sc_if_ifnum = id->bInterfaceNumber;
iface_index = ifidx;
DPRINTF("ifnum=%d, ifidx=%d\n",
sc->sc_if_ifnum, ifidx);
err = usbd_transfer_setup(uaa->device,
&iface_index, sc->sc_if_xfer, usie_if_config,
USIE_IF_N_XFER, sc, &sc->sc_mtx);
if (err == 0)
continue;
device_printf(self,
"could not allocate USB transfers on "
"iface_index=%d, err=%s\n",
iface_index, usbd_errstr(err));
goto detach;
}
/* setup ucom */
if (sc->sc_nucom >= USIE_UCOM_MAX)
continue;
usbd_set_parent_iface(uaa->device, ifidx,
uaa->info.bIfaceIndex);
DPRINTF("NumEndpoints=%d bInterfaceNumber=%d\n",
id->bNumEndpoints, id->bInterfaceNumber);
if (id->bNumEndpoints == 2) {
sc->sc_uc_xfer[sc->sc_nucom][0] = NULL;
start = 1;
} else
start = 0;
err = usbd_transfer_setup(uaa->device, &ifidx,
sc->sc_uc_xfer[sc->sc_nucom] + start,
usie_uc_config + start, USIE_UC_N_XFER - start,
&sc->sc_ucom[sc->sc_nucom], &sc->sc_mtx);
if (err != 0) {
DPRINTF("usbd_transfer_setup error=%s\n", usbd_errstr(err));
continue;
}
mtx_lock(&sc->sc_mtx);
for (; start < USIE_UC_N_XFER; start++)
usbd_xfer_set_stall(sc->sc_uc_xfer[sc->sc_nucom][start]);
mtx_unlock(&sc->sc_mtx);
sc->sc_uc_ifnum[sc->sc_nucom] = id->bInterfaceNumber;
sc->sc_nucom++; /* found a port */
}
if (sc->sc_nucom == 0) {
device_printf(self, "no comports found\n");
goto detach;
}
err = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom,
sc->sc_nucom, sc, &usie_uc_callback, &sc->sc_mtx);
if (err != 0) {
DPRINTF("ucom_attach failed\n");
goto detach;
}
DPRINTF("Found %d interfaces.\n", sc->sc_nucom);
/* setup ifnet (Direct IP) */
sc->sc_ifp = ifp = if_alloc(IFT_OTHER);
-
- if (ifp == NULL) {
- device_printf(self, "Could not allocate a network interface\n");
- goto detach;
- }
if_initname(ifp, "usie", device_get_unit(self));
if_setsoftc(ifp, sc);
if_setmtu(ifp, USIE_MTU_MAX);
if_setflagbits(ifp, IFF_NOARP, 0);
if_setinitfn(ifp, usie_if_init);
if_setioctlfn(ifp, usie_if_ioctl);
if_setstartfn(ifp, usie_if_start);
if_setoutputfn(ifp, usie_if_output);
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
if_attach(ifp);
bpfattach(ifp, DLT_RAW, 0);
if (fwattr & USIE_PM_AUTO) {
usbd_set_power_mode(uaa->device, USB_POWER_MODE_SAVE);
DPRINTF("enabling automatic suspend and resume\n");
} else {
usbd_set_power_mode(uaa->device, USB_POWER_MODE_ON);
DPRINTF("USB power is always ON\n");
}
DPRINTF("device attached\n");
return (0);
detach:
usie_detach(self);
return (ENOMEM);
}
static int
usie_detach(device_t self)
{
struct usie_softc *sc = device_get_softc(self);
uint8_t x;
/* detach ifnet */
if (sc->sc_ifp != NULL) {
usie_if_stop(sc);
usbd_transfer_unsetup(sc->sc_if_xfer, USIE_IF_N_XFER);
bpfdetach(sc->sc_ifp);
if_detach(sc->sc_ifp);
if_free(sc->sc_ifp);
sc->sc_ifp = NULL;
}
/* detach ucom */
if (sc->sc_nucom > 0)
ucom_detach(&sc->sc_super_ucom, sc->sc_ucom);
/* stop all USB transfers */
usbd_transfer_unsetup(sc->sc_if_xfer, USIE_IF_N_XFER);
for (x = 0; x != USIE_UCOM_MAX; x++)
usbd_transfer_unsetup(sc->sc_uc_xfer[x], USIE_UC_N_XFER);
device_claim_softc(self);
usie_free_softc(sc);
return (0);
}
UCOM_UNLOAD_DRAIN(usie);
static void
usie_free_softc(struct usie_softc *sc)
{
if (ucom_unref(&sc->sc_super_ucom)) {
mtx_destroy(&sc->sc_mtx);
device_free_softc(sc);
}
}
static void
usie_free(struct ucom_softc *ucom)
{
usie_free_softc(ucom->sc_parent);
}
static void
usie_uc_update_line_state(struct ucom_softc *ucom, uint8_t ls)
{
struct usie_softc *sc = ucom->sc_parent;
struct usb_device_request req;
if (sc->sc_uc_xfer[ucom->sc_subunit][USIE_UC_STATUS] == NULL)
return;
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = USIE_LINK_STATE;
USETW(req.wValue, ls);
USETW(req.wIndex, sc->sc_uc_ifnum[ucom->sc_subunit]);
USETW(req.wLength, 0);
DPRINTF("sc_uc_ifnum=%d\n", sc->sc_uc_ifnum[ucom->sc_subunit]);
usie_do_request(sc, &req, NULL);
}
static void
usie_uc_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
{
struct usie_softc *sc = ucom->sc_parent;
*msr = sc->sc_msr;
*lsr = sc->sc_lsr;
}
static void
usie_uc_cfg_set_dtr(struct ucom_softc *ucom, uint8_t flag)
{
uint8_t dtr;
dtr = flag ? USIE_LS_DTR : 0;
usie_uc_update_line_state(ucom, dtr);
}
static void
usie_uc_cfg_set_rts(struct ucom_softc *ucom, uint8_t flag)
{
uint8_t rts;
rts = flag ? USIE_LS_RTS : 0;
usie_uc_update_line_state(ucom, rts);
}
static void
usie_uc_cfg_open(struct ucom_softc *ucom)
{
struct usie_softc *sc = ucom->sc_parent;
/* usbd_transfer_start() is NULL safe */
usbd_transfer_start(sc->sc_uc_xfer[ucom->sc_subunit][USIE_UC_STATUS]);
}
static void
usie_uc_cfg_close(struct ucom_softc *ucom)
{
struct usie_softc *sc = ucom->sc_parent;
usbd_transfer_stop(sc->sc_uc_xfer[ucom->sc_subunit][USIE_UC_STATUS]);
}
static void
usie_uc_start_read(struct ucom_softc *ucom)
{
struct usie_softc *sc = ucom->sc_parent;
usbd_transfer_start(sc->sc_uc_xfer[ucom->sc_subunit][USIE_UC_RX]);
}
static void
usie_uc_stop_read(struct ucom_softc *ucom)
{
struct usie_softc *sc = ucom->sc_parent;
usbd_transfer_stop(sc->sc_uc_xfer[ucom->sc_subunit][USIE_UC_RX]);
}
static void
usie_uc_start_write(struct ucom_softc *ucom)
{
struct usie_softc *sc = ucom->sc_parent;
usbd_transfer_start(sc->sc_uc_xfer[ucom->sc_subunit][USIE_UC_TX]);
}
static void
usie_uc_stop_write(struct ucom_softc *ucom)
{
struct usie_softc *sc = ucom->sc_parent;
usbd_transfer_stop(sc->sc_uc_xfer[ucom->sc_subunit][USIE_UC_TX]);
}
static void
usie_uc_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct ucom_softc *ucom = usbd_xfer_softc(xfer);
struct usie_softc *sc = ucom->sc_parent;
struct usb_page_cache *pc;
uint32_t actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
pc = usbd_xfer_get_frame(xfer, 0);
/* handle CnS response */
if (ucom == sc->sc_ucom && actlen >= USIE_HIPCNS_MIN) {
DPRINTF("transferred=%u\n", actlen);
/* check if it is really CnS reply */
usbd_copy_out(pc, 0, sc->sc_resp_temp, 1);
if (sc->sc_resp_temp[0] == USIE_HIP_FRM_CHR) {
/* verify actlen */
if (actlen > USIE_BUFSIZE)
actlen = USIE_BUFSIZE;
/* get complete message */
usbd_copy_out(pc, 0, sc->sc_resp_temp, actlen);
usie_hip_rsp(sc, sc->sc_resp_temp, actlen);
/* need to fall though */
goto tr_setup;
}
/* else call ucom_put_data() */
}
/* standard ucom transfer */
ucom_put_data(ucom, pc, 0, actlen);
/* fall though */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default: /* Error */
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
usie_uc_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct ucom_softc *ucom = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
uint32_t actlen;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
case USB_ST_SETUP:
tr_setup:
pc = usbd_xfer_get_frame(xfer, 0);
/* handle CnS request */
struct mbuf *m = usbd_xfer_get_priv(xfer);
if (m != NULL) {
usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len);
usbd_xfer_set_frame_len(xfer, 0, m->m_pkthdr.len);
usbd_xfer_set_priv(xfer, NULL);
usbd_transfer_submit(xfer);
m_freem(m);
break;
}
/* standard ucom transfer */
if (ucom_get_data(ucom, pc, 0, USIE_BUFSIZE, &actlen)) {
usbd_xfer_set_frame_len(xfer, 0, actlen);
usbd_transfer_submit(xfer);
}
break;
default: /* Error */
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
usie_uc_status_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct usb_page_cache *pc;
struct {
struct usb_device_request req;
uint16_t param;
} st;
uint32_t actlen;
uint16_t param;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(4, "info received, actlen=%u\n", actlen);
if (actlen < sizeof(st)) {
DPRINTF("data too short actlen=%u\n", actlen);
goto tr_setup;
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &st, sizeof(st));
if (st.req.bmRequestType == 0xa1 && st.req.bRequest == 0x20) {
struct ucom_softc *ucom = usbd_xfer_softc(xfer);
struct usie_softc *sc = ucom->sc_parent;
param = le16toh(st.param);
DPRINTF("param=%x\n", param);
sc->sc_msr = sc->sc_lsr = 0;
sc->sc_msr |= (param & USIE_DCD) ? SER_DCD : 0;
sc->sc_msr |= (param & USIE_DSR) ? SER_DSR : 0;
sc->sc_msr |= (param & USIE_RI) ? SER_RI : 0;
sc->sc_msr |= (param & USIE_CTS) ? 0 : SER_CTS;
sc->sc_msr |= (param & USIE_RTS) ? SER_RTS : 0;
sc->sc_msr |= (param & USIE_DTR) ? SER_DTR : 0;
}
/* fall though */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default: /* Error */
DPRINTF("USB transfer error, %s\n",
usbd_errstr(error));
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
usie_if_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct epoch_tracker et;
struct usie_softc *sc = usbd_xfer_softc(xfer);
if_t ifp = sc->sc_ifp;
struct mbuf *m0;
struct mbuf *m = NULL;
struct usie_desc *rxd;
uint32_t actlen;
uint16_t err;
uint16_t pkt;
uint16_t ipl;
uint16_t len;
uint16_t diff;
uint8_t pad;
uint8_t ipv;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(15, "rx done, actlen=%u\n", actlen);
if (actlen < sizeof(struct usie_hip)) {
DPRINTF("data too short %u\n", actlen);
goto tr_setup;
}
m = sc->sc_rxm;
sc->sc_rxm = NULL;
/* fall though */
case USB_ST_SETUP:
tr_setup:
if (sc->sc_rxm == NULL) {
sc->sc_rxm = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
MJUMPAGESIZE /* could be bigger than MCLBYTES */ );
}
if (sc->sc_rxm == NULL) {
DPRINTF("could not allocate Rx mbuf\n");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
usbd_xfer_set_stall(xfer);
usbd_xfer_set_frames(xfer, 0);
} else {
/*
* Directly loading a mbuf cluster into DMA to
* save some data copying. This works because
* there is only one cluster.
*/
usbd_xfer_set_frame_data(xfer, 0,
mtod(sc->sc_rxm, caddr_t), MIN(MJUMPAGESIZE, USIE_RXSZ_MAX));
usbd_xfer_set_frames(xfer, 1);
}
usbd_transfer_submit(xfer);
break;
default: /* Error */
DPRINTF("USB transfer error, %s\n", usbd_errstr(error));
if (error != USB_ERR_CANCELLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
goto tr_setup;
}
if (sc->sc_rxm != NULL) {
m_freem(sc->sc_rxm);
sc->sc_rxm = NULL;
}
break;
}
if (m == NULL)
return;
mtx_unlock(&sc->sc_mtx);
m->m_pkthdr.len = m->m_len = actlen;
err = pkt = 0;
/* HW can aggregate multiple frames in a single USB xfer */
NET_EPOCH_ENTER(et);
for (;;) {
rxd = mtod(m, struct usie_desc *);
len = be16toh(rxd->hip.len) & USIE_HIP_IP_LEN_MASK;
pad = (rxd->hip.id & USIE_HIP_PAD) ? 1 : 0;
ipl = (len - pad - ETHER_HDR_LEN);
if (ipl >= len) {
DPRINTF("Corrupt frame\n");
m_freem(m);
break;
}
diff = sizeof(struct usie_desc) + ipl + pad;
if (((rxd->hip.id & USIE_HIP_MASK) != USIE_HIP_IP) ||
(be16toh(rxd->desc_type) & USIE_TYPE_MASK) != USIE_IP_RX) {
DPRINTF("received wrong type of packet\n");
m->m_data += diff;
m->m_pkthdr.len = (m->m_len -= diff);
err++;
if (m->m_pkthdr.len > 0)
continue;
m_freem(m);
break;
}
switch (be16toh(rxd->ethhdr.ether_type)) {
case ETHERTYPE_IP:
ipv = NETISR_IP;
break;
#ifdef INET6
case ETHERTYPE_IPV6:
ipv = NETISR_IPV6;
break;
#endif
default:
DPRINTF("unsupported ether type\n");
err++;
break;
}
/* the last packet */
if (m->m_pkthdr.len <= diff) {
m->m_data += (sizeof(struct usie_desc) + pad);
m->m_pkthdr.len = m->m_len = ipl;
m->m_pkthdr.rcvif = ifp;
BPF_MTAP(sc->sc_ifp, m);
netisr_dispatch(ipv, m);
break;
}
/* copy aggregated frames to another mbuf */
m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m0 == NULL)) {
DPRINTF("could not allocate mbuf\n");
err++;
m_freem(m);
break;
}
m_copydata(m, sizeof(struct usie_desc) + pad, ipl, mtod(m0, caddr_t));
m0->m_pkthdr.rcvif = ifp;
m0->m_pkthdr.len = m0->m_len = ipl;
BPF_MTAP(sc->sc_ifp, m0);
netisr_dispatch(ipv, m0);
m->m_data += diff;
m->m_pkthdr.len = (m->m_len -= diff);
}
NET_EPOCH_EXIT(et);
mtx_lock(&sc->sc_mtx);
if_inc_counter(ifp, IFCOUNTER_IERRORS, err);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, pkt);
}
static void
usie_if_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct usie_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
if_t ifp = sc->sc_ifp;
struct mbuf *m;
uint16_t size;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(11, "transfer complete\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
/* fall though */
case USB_ST_SETUP:
tr_setup:
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
m = if_dequeue(ifp);
if (m == NULL)
break;
if (m->m_pkthdr.len > (int)(MCLBYTES - ETHER_HDR_LEN +
ETHER_CRC_LEN - sizeof(sc->sc_txd))) {
DPRINTF("packet len is too big: %d\n",
m->m_pkthdr.len);
break;
}
pc = usbd_xfer_get_frame(xfer, 0);
sc->sc_txd.hip.len = htobe16(m->m_pkthdr.len +
ETHER_HDR_LEN + ETHER_CRC_LEN);
size = sizeof(sc->sc_txd);
usbd_copy_in(pc, 0, &sc->sc_txd, size);
usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
usbd_xfer_set_frame_len(xfer, 0, m->m_pkthdr.len +
size + ETHER_CRC_LEN);
BPF_MTAP(ifp, m);
m_freem(m);
usbd_transfer_submit(xfer);
break;
default: /* Error */
DPRINTF("USB transfer error, %s\n",
usbd_errstr(error));
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
goto tr_setup;
}
break;
}
}
static void
usie_if_status_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct usie_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
struct usb_cdc_notification cdc;
uint32_t actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(4, "info received, actlen=%d\n", actlen);
/* usb_cdc_notification - .data[16] */
if (actlen < (sizeof(cdc) - 16)) {
DPRINTF("data too short %d\n", actlen);
goto tr_setup;
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &cdc, (sizeof(cdc) - 16));
DPRINTFN(4, "bNotification=%x\n", cdc.bNotification);
if (cdc.bNotification & UCDC_N_RESPONSE_AVAILABLE) {
taskqueue_enqueue(taskqueue_thread,
&sc->sc_if_status_task);
}
/* fall though */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default: /* Error */
DPRINTF("USB transfer error, %s\n",
usbd_errstr(error));
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
usie_if_sync_to(void *arg)
{
struct usie_softc *sc = arg;
taskqueue_enqueue(taskqueue_thread, &sc->sc_if_sync_task);
}
static void
usie_if_sync_cb(void *arg, int pending)
{
struct usie_softc *sc = arg;
mtx_lock(&sc->sc_mtx);
/* call twice */
usie_if_cmd(sc, USIE_HIP_SYNC2M);
usie_if_cmd(sc, USIE_HIP_SYNC2M);
usb_callout_reset(&sc->sc_if_sync_ch, 2 * hz, usie_if_sync_to, sc);
mtx_unlock(&sc->sc_mtx);
}
static void
usie_if_status_cb(void *arg, int pending)
{
struct usie_softc *sc = arg;
if_t ifp = sc->sc_ifp;
struct usb_device_request req;
struct usie_hip *hip;
struct usie_lsi *lsi;
uint16_t actlen;
uint8_t ntries;
uint8_t pad;
mtx_lock(&sc->sc_mtx);
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = UCDC_GET_ENCAPSULATED_RESPONSE;
USETW(req.wValue, 0);
USETW(req.wIndex, sc->sc_if_ifnum);
USETW(req.wLength, sizeof(sc->sc_status_temp));
for (ntries = 0; ntries != 10; ntries++) {
int err;
err = usbd_do_request_flags(sc->sc_udev,
&sc->sc_mtx, &req, sc->sc_status_temp, USB_SHORT_XFER_OK,
&actlen, USB_DEFAULT_TIMEOUT);
if (err == 0)
break;
DPRINTF("Control request failed: %s %d/10\n",
usbd_errstr(err), ntries);
usb_pause_mtx(&sc->sc_mtx, USB_MS_TO_TICKS(10));
}
if (ntries == 10) {
mtx_unlock(&sc->sc_mtx);
DPRINTF("Timeout\n");
return;
}
hip = (struct usie_hip *)sc->sc_status_temp;
pad = (hip->id & USIE_HIP_PAD) ? 1 : 0;
DPRINTF("hip.id=%x hip.len=%d actlen=%u pad=%d\n",
hip->id, be16toh(hip->len), actlen, pad);
switch (hip->id & USIE_HIP_MASK) {
case USIE_HIP_SYNC2H:
usie_if_cmd(sc, USIE_HIP_SYNC2M);
break;
case USIE_HIP_RESTR:
usb_callout_stop(&sc->sc_if_sync_ch);
break;
case USIE_HIP_UMTS:
lsi = (struct usie_lsi *)(
sc->sc_status_temp + sizeof(struct usie_hip) + pad);
DPRINTF("lsi.proto=%x lsi.len=%d\n", lsi->proto,
be16toh(lsi->len));
if (lsi->proto != USIE_LSI_UMTS)
break;
if (lsi->area == USIE_LSI_AREA_NO ||
lsi->area == USIE_LSI_AREA_NODATA) {
device_printf(sc->sc_dev, "no service available\n");
break;
}
if (lsi->state == USIE_LSI_STATE_IDLE) {
DPRINTF("lsi.state=%x\n", lsi->state);
break;
}
DPRINTF("ctx=%x\n", hip->param);
sc->sc_txd.hip.param = hip->param;
sc->sc_net.addr_len = lsi->pdp_addr_len;
memcpy(&sc->sc_net.dns1_addr, &lsi->dns1_addr, 16);
memcpy(&sc->sc_net.dns2_addr, &lsi->dns2_addr, 16);
memcpy(sc->sc_net.pdp_addr, lsi->pdp_addr, 16);
memcpy(sc->sc_net.gw_addr, lsi->gw_addr, 16);
if_setflagbits(ifp, IFF_UP, 0);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
device_printf(sc->sc_dev, "IP Addr=%d.%d.%d.%d\n",
*lsi->pdp_addr, *(lsi->pdp_addr + 1),
*(lsi->pdp_addr + 2), *(lsi->pdp_addr + 3));
device_printf(sc->sc_dev, "Gateway Addr=%d.%d.%d.%d\n",
*lsi->gw_addr, *(lsi->gw_addr + 1),
*(lsi->gw_addr + 2), *(lsi->gw_addr + 3));
device_printf(sc->sc_dev, "Prim NS Addr=%d.%d.%d.%d\n",
*lsi->dns1_addr, *(lsi->dns1_addr + 1),
*(lsi->dns1_addr + 2), *(lsi->dns1_addr + 3));
device_printf(sc->sc_dev, "Scnd NS Addr=%d.%d.%d.%d\n",
*lsi->dns2_addr, *(lsi->dns2_addr + 1),
*(lsi->dns2_addr + 2), *(lsi->dns2_addr + 3));
usie_cns_req(sc, USIE_CNS_ID_RSSI, USIE_CNS_OB_RSSI);
break;
case USIE_HIP_RCGI:
/* ignore, workaround for sloppy windows */
break;
default:
DPRINTF("undefined msgid: %x\n", hip->id);
break;
}
mtx_unlock(&sc->sc_mtx);
}
static void
usie_if_start(if_t ifp)
{
struct usie_softc *sc = if_getsoftc(ifp);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
DPRINTF("Not running\n");
return;
}
mtx_lock(&sc->sc_mtx);
usbd_transfer_start(sc->sc_if_xfer[USIE_IF_TX]);
mtx_unlock(&sc->sc_mtx);
DPRINTFN(3, "interface started\n");
}
static int
usie_if_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
int err;
DPRINTF("proto=%x\n", dst->sa_family);
switch (dst->sa_family) {
#ifdef INET6
case AF_INET6;
/* fall though */
#endif
case AF_INET:
break;
/* silently drop dhclient packets */
case AF_UNSPEC:
m_freem(m);
return (0);
/* drop other packet types */
default:
m_freem(m);
return (EAFNOSUPPORT);
}
err = if_transmit(ifp, m);
if (err) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
return (0);
}
static void
usie_if_init(void *arg)
{
struct usie_softc *sc = arg;
if_t ifp = sc->sc_ifp;
uint8_t i;
mtx_lock(&sc->sc_mtx);
/* write tx descriptor */
sc->sc_txd.hip.id = USIE_HIP_CTX;
sc->sc_txd.hip.param = 0; /* init value */
sc->sc_txd.desc_type = htobe16(USIE_IP_TX);
for (i = 0; i != USIE_IF_N_XFER; i++)
usbd_xfer_set_stall(sc->sc_if_xfer[i]);
usbd_transfer_start(sc->sc_uc_xfer[USIE_HIP_IF][USIE_UC_RX]);
usbd_transfer_start(sc->sc_if_xfer[USIE_IF_STATUS]);
usbd_transfer_start(sc->sc_if_xfer[USIE_IF_RX]);
/* if not running, initiate the modem */
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
usie_cns_req(sc, USIE_CNS_ID_INIT, USIE_CNS_OB_LINK_UPDATE);
mtx_unlock(&sc->sc_mtx);
DPRINTF("ifnet initialized\n");
}
static void
usie_if_stop(struct usie_softc *sc)
{
usb_callout_drain(&sc->sc_if_sync_ch);
mtx_lock(&sc->sc_mtx);
/* usie_cns_req() clears IFF_* flags */
usie_cns_req(sc, USIE_CNS_ID_STOP, USIE_CNS_OB_LINK_UPDATE);
usbd_transfer_stop(sc->sc_if_xfer[USIE_IF_TX]);
usbd_transfer_stop(sc->sc_if_xfer[USIE_IF_RX]);
usbd_transfer_stop(sc->sc_if_xfer[USIE_IF_STATUS]);
/* shutdown device */
usie_if_cmd(sc, USIE_HIP_DOWN);
mtx_unlock(&sc->sc_mtx);
}
static int
usie_if_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct usie_softc *sc = if_getsoftc(ifp);
struct ieee80211req *ireq;
struct ieee80211req_sta_info si;
struct ifmediareq *ifmr;
switch (cmd) {
case SIOCSIFFLAGS:
if (if_getflags(ifp) & IFF_UP) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
usie_if_init(sc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
usie_if_stop(sc);
}
break;
case SIOCSIFCAP:
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
device_printf(sc->sc_dev,
"Connect to the network first.\n");
break;
}
mtx_lock(&sc->sc_mtx);
usie_cns_req(sc, USIE_CNS_ID_RSSI, USIE_CNS_OB_RSSI);
mtx_unlock(&sc->sc_mtx);
break;
case SIOCG80211:
ireq = (struct ieee80211req *)data;
if (ireq->i_type != IEEE80211_IOC_STA_INFO)
break;
memset(&si, 0, sizeof(si));
si.isi_len = sizeof(si);
/*
* ifconfig expects RSSI in 0.5dBm units
* relative to the noise floor.
*/
si.isi_rssi = 2 * sc->sc_rssi;
if (copyout(&si, (uint8_t *)ireq->i_data + 8,
sizeof(struct ieee80211req_sta_info)))
DPRINTF("copyout failed\n");
DPRINTF("80211\n");
break;
case SIOCGIFMEDIA: /* to fool ifconfig */
ifmr = (struct ifmediareq *)data;
ifmr->ifm_count = 1;
DPRINTF("media\n");
break;
case SIOCSIFADDR:
break;
default:
return (EINVAL);
}
return (0);
}
static int
usie_do_request(struct usie_softc *sc, struct usb_device_request *req,
void *data)
{
int err = 0;
int ntries;
mtx_assert(&sc->sc_mtx, MA_OWNED);
for (ntries = 0; ntries != 10; ntries++) {
err = usbd_do_request(sc->sc_udev,
&sc->sc_mtx, req, data);
if (err == 0)
break;
DPRINTF("Control request failed: %s %d/10\n",
usbd_errstr(err), ntries);
usb_pause_mtx(&sc->sc_mtx, USB_MS_TO_TICKS(10));
}
return (err);
}
static int
usie_if_cmd(struct usie_softc *sc, uint8_t cmd)
{
struct usb_device_request req;
struct usie_hip msg;
msg.len = 0;
msg.id = cmd;
msg.param = 0;
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UCDC_SEND_ENCAPSULATED_COMMAND;
USETW(req.wValue, 0);
USETW(req.wIndex, sc->sc_if_ifnum);
USETW(req.wLength, sizeof(msg));
DPRINTF("cmd=%x\n", cmd);
return (usie_do_request(sc, &req, &msg));
}
static void
usie_cns_req(struct usie_softc *sc, uint32_t id, uint16_t obj)
{
if_t ifp = sc->sc_ifp;
struct mbuf *m;
struct usb_xfer *xfer;
struct usie_hip *hip;
struct usie_cns *cns;
uint8_t *param;
uint8_t *tmp;
uint8_t cns_len;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m == NULL)) {
DPRINTF("could not allocate mbuf\n");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return;
}
/* to align usie_hip{} on 32 bit */
m->m_data += 3;
param = mtod(m, uint8_t *);
*param++ = USIE_HIP_FRM_CHR;
hip = (struct usie_hip *)param;
cns = (struct usie_cns *)(hip + 1);
tmp = param + USIE_HIPCNS_MIN - 2;
switch (obj) {
case USIE_CNS_OB_LINK_UPDATE:
cns_len = 2;
cns->op = USIE_CNS_OP_SET;
*tmp++ = 1; /* profile ID, always use 1 for now */
*tmp++ = id == USIE_CNS_ID_INIT ? 1 : 0;
break;
case USIE_CNS_OB_PROF_WRITE:
cns_len = 245;
cns->op = USIE_CNS_OP_SET;
*tmp++ = 1; /* profile ID, always use 1 for now */
*tmp++ = 2;
memcpy(tmp, &sc->sc_net, 34);
memset(tmp + 35, 0, 245 - 36);
tmp += 243;
break;
case USIE_CNS_OB_RSSI:
cns_len = 0;
cns->op = USIE_CNS_OP_REQ;
break;
default:
DPRINTF("unsupported CnS object type\n");
return;
}
*tmp = USIE_HIP_FRM_CHR;
hip->len = htobe16(sizeof(struct usie_cns) + cns_len);
hip->id = USIE_HIP_CNS2M;
hip->param = 0; /* none for CnS */
cns->obj = htobe16(obj);
cns->id = htobe32(id);
cns->len = cns_len;
cns->rsv0 = cns->rsv1 = 0; /* always '0' */
param = (uint8_t *)(cns + 1);
DPRINTF("param: %16D\n", param, ":");
m->m_pkthdr.len = m->m_len = USIE_HIPCNS_MIN + cns_len + 2;
xfer = sc->sc_uc_xfer[USIE_HIP_IF][USIE_UC_TX];
if (usbd_xfer_get_priv(xfer) == NULL) {
usbd_xfer_set_priv(xfer, m);
usbd_transfer_start(xfer);
} else {
DPRINTF("Dropped CNS event\n");
m_freem(m);
}
}
static void
usie_cns_rsp(struct usie_softc *sc, struct usie_cns *cns)
{
if_t ifp = sc->sc_ifp;
DPRINTF("received CnS\n");
switch (be16toh(cns->obj)) {
case USIE_CNS_OB_LINK_UPDATE:
if (be32toh(cns->id) & USIE_CNS_ID_INIT)
usie_if_sync_to(sc);
else if (be32toh(cns->id) & USIE_CNS_ID_STOP) {
if_setflagbits(ifp, 0, IFF_UP);
if_setdrvflagbits(ifp, 0,
IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
} else
DPRINTF("undefined link update\n");
break;
case USIE_CNS_OB_RSSI:
sc->sc_rssi = be16toh(*(int16_t *)(cns + 1));
if (sc->sc_rssi <= 0)
device_printf(sc->sc_dev, "No signal\n");
else {
device_printf(sc->sc_dev, "RSSI=%ddBm\n",
sc->sc_rssi - 110);
}
break;
case USIE_CNS_OB_PROF_WRITE:
break;
case USIE_CNS_OB_PDP_READ:
break;
default:
DPRINTF("undefined CnS\n");
break;
}
}
static void
usie_hip_rsp(struct usie_softc *sc, uint8_t *rsp, uint32_t len)
{
struct usie_hip *hip;
struct usie_cns *cns;
uint32_t i;
uint32_t j;
uint32_t off;
uint8_t tmp[USIE_HIPCNS_MAX] __aligned(4);
for (off = 0; (off + USIE_HIPCNS_MIN) <= len; off++) {
uint8_t pad;
while ((off < len) && (rsp[off] == USIE_HIP_FRM_CHR))
off++;
/* Unstuff the bytes */
for (i = j = 0; ((i + off) < len) &&
(j < USIE_HIPCNS_MAX); i++) {
if (rsp[i + off] == USIE_HIP_FRM_CHR)
break;
if (rsp[i + off] == USIE_HIP_ESC_CHR) {
if ((i + off + 1) >= len)
break;
tmp[j++] = rsp[i++ + off + 1] ^ 0x20;
} else {
tmp[j++] = rsp[i + off];
}
}
off += i;
DPRINTF("frame len=%d\n", j);
if (j < sizeof(struct usie_hip)) {
DPRINTF("too little data\n");
break;
}
/*
* Make sure we are not reading the stack if something
* is wrong.
*/
memset(tmp + j, 0, sizeof(tmp) - j);
hip = (struct usie_hip *)tmp;
DPRINTF("hip: len=%d msgID=%02x, param=%02x\n",
be16toh(hip->len), hip->id, hip->param);
pad = (hip->id & USIE_HIP_PAD) ? 1 : 0;
if ((hip->id & USIE_HIP_MASK) == USIE_HIP_CNS2H) {
cns = (struct usie_cns *)(((uint8_t *)(hip + 1)) + pad);
if (j < (sizeof(struct usie_cns) +
sizeof(struct usie_hip) + pad)) {
DPRINTF("too little data\n");
break;
}
DPRINTF("cns: obj=%04x, op=%02x, rsv0=%02x, "
"app=%08x, rsv1=%02x, len=%d\n",
be16toh(cns->obj), cns->op, cns->rsv0,
be32toh(cns->id), cns->rsv1, cns->len);
if (cns->op & USIE_CNS_OP_ERR)
DPRINTF("CnS error response\n");
else
usie_cns_rsp(sc, cns);
i = sizeof(struct usie_hip) + pad + sizeof(struct usie_cns);
j = cns->len;
} else {
i = sizeof(struct usie_hip) + pad;
j = be16toh(hip->len);
}
#ifdef USB_DEBUG
if (usie_debug == 0)
continue;
while (i < USIE_HIPCNS_MAX && j > 0) {
DPRINTF("param[0x%02x] = 0x%02x\n", i, tmp[i]);
i++;
j--;
}
#endif
}
}
static int
usie_driver_loaded(struct module *mod, int what, void *arg)
{
switch (what) {
case MOD_LOAD:
/* register autoinstall handler */
usie_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
usie_autoinst, NULL, EVENTHANDLER_PRI_ANY);
break;
case MOD_UNLOAD:
EVENTHANDLER_DEREGISTER(usb_dev_configured, usie_etag);
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
diff --git a/sys/dev/usb/net/uhso.c b/sys/dev/usb/net/uhso.c
index 300f982abeae..24135f6ccd5a 100644
--- a/sys/dev/usb/net/uhso.c
+++ b/sys/dev/usb/net/uhso.c
@@ -1,1932 +1,1928 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2010 Fredrik Lindberg <fli@shapeshifter.se>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/tty.h>
#include <sys/sysctl.h>
#include <sys/condvar.h>
#include <sys/sx.h>
#include <sys/proc.h>
#include <sys/conf.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <sys/limits.h>
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/netisr.h>
#include <net/bpf.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include <dev/usb/usb_cdc.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR uhso_debug
#include <dev/usb/usb_debug.h>
#include <dev/usb/usb_process.h>
#include <dev/usb/usb_busdma.h>
#include <dev/usb/usb_msctest.h>
#include <dev/usb/serial/usb_serial.h>
struct uhso_tty {
struct uhso_softc *ht_sc;
struct usb_xfer *ht_xfer[3];
int ht_muxport; /* Mux. port no */
int ht_open;
char ht_name[32];
};
struct uhso_softc {
device_t sc_dev;
struct usb_device *sc_udev;
struct mtx sc_mtx;
uint32_t sc_type; /* Interface definition */
int sc_radio;
struct usb_xfer *sc_xfer[3];
uint8_t sc_iface_no;
uint8_t sc_iface_index;
/* Control pipe */
struct usb_xfer * sc_ctrl_xfer[2];
uint8_t sc_ctrl_iface_no;
/* Network */
struct usb_xfer *sc_if_xfer[2];
if_t sc_ifp;
struct mbuf *sc_mwait; /* Partial packet */
size_t sc_waitlen; /* No. of outstanding bytes */
struct mbufq sc_rxq;
struct callout sc_c;
/* TTY related structures */
struct ucom_super_softc sc_super_ucom;
int sc_ttys;
struct uhso_tty *sc_tty;
struct ucom_softc *sc_ucom;
int sc_msr;
int sc_lsr;
int sc_line;
};
#define UHSO_MAX_MTU 2048
/*
* There are mainly two type of cards floating around.
* The first one has 2,3 or 4 interfaces with a multiplexed serial port
* and packet interface on the first interface and bulk serial ports
* on the others.
* The second type of card has several other interfaces, their purpose
* can be detected during run-time.
*/
#define UHSO_IFACE_SPEC(usb_type, port, port_type) \
(((usb_type) << 24) | ((port) << 16) | (port_type))
#define UHSO_IFACE_USB_TYPE(x) ((x >> 24) & 0xff)
#define UHSO_IFACE_PORT(x) ((x >> 16) & 0xff)
#define UHSO_IFACE_PORT_TYPE(x) (x & 0xff)
/*
* USB interface types
*/
#define UHSO_IF_NET 0x01 /* Network packet interface */
#define UHSO_IF_MUX 0x02 /* Multiplexed serial port */
#define UHSO_IF_BULK 0x04 /* Bulk interface */
/*
* Port types
*/
#define UHSO_PORT_UNKNOWN 0x00
#define UHSO_PORT_SERIAL 0x01 /* Serial port */
#define UHSO_PORT_NETWORK 0x02 /* Network packet interface */
/*
* Multiplexed serial port destination sub-port names
*/
#define UHSO_MPORT_TYPE_CTL 0x00 /* Control port */
#define UHSO_MPORT_TYPE_APP 0x01 /* Application */
#define UHSO_MPORT_TYPE_PCSC 0x02
#define UHSO_MPORT_TYPE_GPS 0x03
#define UHSO_MPORT_TYPE_APP2 0x04 /* Secondary application */
#define UHSO_MPORT_TYPE_MAX UHSO_MPORT_TYPE_APP2
#define UHSO_MPORT_TYPE_NOMAX 8 /* Max number of mux ports */
/*
* Port definitions
* Note that these definitions are arbitrary and do not match the values
* returned by the auto config descriptor.
*/
#define UHSO_PORT_TYPE_UNKNOWN 0x00
#define UHSO_PORT_TYPE_CTL 0x01
#define UHSO_PORT_TYPE_APP 0x02
#define UHSO_PORT_TYPE_APP2 0x03
#define UHSO_PORT_TYPE_MODEM 0x04
#define UHSO_PORT_TYPE_NETWORK 0x05
#define UHSO_PORT_TYPE_DIAG 0x06
#define UHSO_PORT_TYPE_DIAG2 0x07
#define UHSO_PORT_TYPE_GPS 0x08
#define UHSO_PORT_TYPE_GPSCTL 0x09
#define UHSO_PORT_TYPE_PCSC 0x0a
#define UHSO_PORT_TYPE_MSD 0x0b
#define UHSO_PORT_TYPE_VOICE 0x0c
#define UHSO_PORT_TYPE_MAX 0x0c
static eventhandler_tag uhso_etag;
/* Overall port type */
static char *uhso_port[] = {
"Unknown",
"Serial",
"Network",
"Network/Serial"
};
/*
* Map between interface port type read from device and description type.
* The position in this array is a direct map to the auto config
* descriptor values.
*/
static unsigned char uhso_port_map[] = {
UHSO_PORT_TYPE_UNKNOWN,
UHSO_PORT_TYPE_DIAG,
UHSO_PORT_TYPE_GPS,
UHSO_PORT_TYPE_GPSCTL,
UHSO_PORT_TYPE_APP,
UHSO_PORT_TYPE_APP2,
UHSO_PORT_TYPE_CTL,
UHSO_PORT_TYPE_NETWORK,
UHSO_PORT_TYPE_MODEM,
UHSO_PORT_TYPE_MSD,
UHSO_PORT_TYPE_PCSC,
UHSO_PORT_TYPE_VOICE
};
static char uhso_port_map_max = sizeof(uhso_port_map) / sizeof(char);
static unsigned char uhso_mux_port_map[] = {
UHSO_PORT_TYPE_CTL,
UHSO_PORT_TYPE_APP,
UHSO_PORT_TYPE_PCSC,
UHSO_PORT_TYPE_GPS,
UHSO_PORT_TYPE_APP2
};
static char *uhso_port_type[] = {
"Unknown", /* Not a valid port */
"Control",
"Application",
"Application (Secondary)",
"Modem",
"Network",
"Diagnostic",
"Diagnostic (Secondary)",
"GPS",
"GPS Control",
"PC Smartcard",
"MSD",
"Voice",
};
static char *uhso_port_type_sysctl[] = {
"unknown",
"control",
"application",
"application",
"modem",
"network",
"diagnostic",
"diagnostic",
"gps",
"gps_control",
"pcsc",
"msd",
"voice",
};
#define UHSO_STATIC_IFACE 0x01
#define UHSO_AUTO_IFACE 0x02
/* ifnet device unit allocations */
static struct unrhdr *uhso_ifnet_unit = NULL;
static const STRUCT_USB_HOST_ID uhso_devs[] = {
#define UHSO_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) }
/* Option GlobeTrotter MAX 7.2 with upgraded firmware */
UHSO_DEV(OPTION, GTMAX72, UHSO_STATIC_IFACE),
/* Option GlobeSurfer iCON 7.2 */
UHSO_DEV(OPTION, GSICON72, UHSO_STATIC_IFACE),
/* Option iCON 225 */
UHSO_DEV(OPTION, GTHSDPA, UHSO_STATIC_IFACE),
/* Option GlobeSurfer iCON HSUPA */
UHSO_DEV(OPTION, GSICONHSUPA, UHSO_STATIC_IFACE),
/* Option GlobeTrotter HSUPA */
UHSO_DEV(OPTION, GTHSUPA, UHSO_STATIC_IFACE),
/* GE40x */
UHSO_DEV(OPTION, GE40X, UHSO_AUTO_IFACE),
UHSO_DEV(OPTION, GE40X_1, UHSO_AUTO_IFACE),
UHSO_DEV(OPTION, GE40X_2, UHSO_AUTO_IFACE),
UHSO_DEV(OPTION, GE40X_3, UHSO_AUTO_IFACE),
/* Option GlobeSurfer iCON 401 */
UHSO_DEV(OPTION, ICON401, UHSO_AUTO_IFACE),
/* Option GlobeTrotter Module 382 */
UHSO_DEV(OPTION, GMT382, UHSO_AUTO_IFACE),
/* Option GTM661W */
UHSO_DEV(OPTION, GTM661W, UHSO_AUTO_IFACE),
/* Option iCON EDGE */
UHSO_DEV(OPTION, ICONEDGE, UHSO_STATIC_IFACE),
/* Option Module HSxPA */
UHSO_DEV(OPTION, MODHSXPA, UHSO_STATIC_IFACE),
/* Option iCON 321 */
UHSO_DEV(OPTION, ICON321, UHSO_STATIC_IFACE),
/* Option iCON 322 */
UHSO_DEV(OPTION, GTICON322, UHSO_STATIC_IFACE),
/* Option iCON 505 */
UHSO_DEV(OPTION, ICON505, UHSO_AUTO_IFACE),
/* Option iCON 452 */
UHSO_DEV(OPTION, ICON505, UHSO_AUTO_IFACE),
#undef UHSO_DEV
};
static SYSCTL_NODE(_hw_usb, OID_AUTO, uhso, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"USB uhso");
static int uhso_autoswitch = 1;
SYSCTL_INT(_hw_usb_uhso, OID_AUTO, auto_switch, CTLFLAG_RWTUN,
&uhso_autoswitch, 0, "Automatically switch to modem mode");
#ifdef USB_DEBUG
#ifdef UHSO_DEBUG
static int uhso_debug = UHSO_DEBUG;
#else
static int uhso_debug = -1;
#endif
SYSCTL_INT(_hw_usb_uhso, OID_AUTO, debug, CTLFLAG_RWTUN,
&uhso_debug, 0, "Debug level");
#define UHSO_DPRINTF(n, x, ...) {\
if (uhso_debug >= n) {\
printf("%s: " x, __func__, ##__VA_ARGS__);\
}\
}
#else
#define UHSO_DPRINTF(n, x, ...)
#endif
#ifdef UHSO_DEBUG_HEXDUMP
# define UHSO_HEXDUMP(_buf, _len) do { \
{ \
size_t __tmp; \
const char *__buf = (const char *)_buf; \
for (__tmp = 0; __tmp < _len; __tmp++) \
printf("%02hhx ", *__buf++); \
printf("\n"); \
} \
} while(0)
#else
# define UHSO_HEXDUMP(_buf, _len)
#endif
enum {
UHSO_MUX_ENDPT_INTR = 0,
UHSO_MUX_ENDPT_MAX
};
enum {
UHSO_CTRL_READ = 0,
UHSO_CTRL_WRITE,
UHSO_CTRL_MAX
};
enum {
UHSO_IFNET_READ = 0,
UHSO_IFNET_WRITE,
UHSO_IFNET_MAX
};
enum {
UHSO_BULK_ENDPT_READ = 0,
UHSO_BULK_ENDPT_WRITE,
UHSO_BULK_ENDPT_INTR,
UHSO_BULK_ENDPT_MAX
};
static usb_callback_t uhso_mux_intr_callback;
static usb_callback_t uhso_mux_read_callback;
static usb_callback_t uhso_mux_write_callback;
static usb_callback_t uhso_bs_read_callback;
static usb_callback_t uhso_bs_write_callback;
static usb_callback_t uhso_bs_intr_callback;
static usb_callback_t uhso_ifnet_read_callback;
static usb_callback_t uhso_ifnet_write_callback;
/* Config used for the default control pipes */
static const struct usb_config uhso_ctrl_config[UHSO_CTRL_MAX] = {
[UHSO_CTRL_READ] = {
.type = UE_CONTROL,
.endpoint = 0x00,
.direction = UE_DIR_ANY,
.flags = { .pipe_bof = 1, .short_xfer_ok = 1 },
.bufsize = sizeof(struct usb_device_request) + 1024,
.callback = &uhso_mux_read_callback
},
[UHSO_CTRL_WRITE] = {
.type = UE_CONTROL,
.endpoint = 0x00,
.direction = UE_DIR_ANY,
.flags = { .pipe_bof = 1, .force_short_xfer = 1 },
.bufsize = sizeof(struct usb_device_request) + 1024,
.timeout = 1000,
.callback = &uhso_mux_write_callback
}
};
/* Config for the multiplexed serial ports */
static const struct usb_config uhso_mux_config[UHSO_MUX_ENDPT_MAX] = {
[UHSO_MUX_ENDPT_INTR] = {
.type = UE_INTERRUPT,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.flags = { .short_xfer_ok = 1 },
.bufsize = 0,
.callback = &uhso_mux_intr_callback,
}
};
/* Config for the raw IP-packet interface */
static const struct usb_config uhso_ifnet_config[UHSO_IFNET_MAX] = {
[UHSO_IFNET_READ] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.flags = { .pipe_bof = 1, .short_xfer_ok = 1 },
.bufsize = MCLBYTES,
.callback = &uhso_ifnet_read_callback
},
[UHSO_IFNET_WRITE] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.flags = { .pipe_bof = 1, .force_short_xfer = 1 },
.bufsize = MCLBYTES,
.timeout = 5 * USB_MS_HZ,
.callback = &uhso_ifnet_write_callback
}
};
/* Config for interfaces with normal bulk serial ports */
static const struct usb_config uhso_bs_config[UHSO_BULK_ENDPT_MAX] = {
[UHSO_BULK_ENDPT_READ] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.flags = { .pipe_bof = 1, .short_xfer_ok = 1 },
.bufsize = 4096,
.callback = &uhso_bs_read_callback
},
[UHSO_BULK_ENDPT_WRITE] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.flags = { .pipe_bof = 1, .force_short_xfer = 1 },
.bufsize = 8192,
.callback = &uhso_bs_write_callback
},
[UHSO_BULK_ENDPT_INTR] = {
.type = UE_INTERRUPT,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.flags = { .short_xfer_ok = 1 },
.bufsize = 0,
.callback = &uhso_bs_intr_callback,
}
};
static int uhso_probe_iface(struct uhso_softc *, int,
int (*probe)(struct usb_device *, int));
static int uhso_probe_iface_auto(struct usb_device *, int);
static int uhso_probe_iface_static(struct usb_device *, int);
static int uhso_attach_muxserial(struct uhso_softc *, struct usb_interface *,
int type);
static int uhso_attach_bulkserial(struct uhso_softc *, struct usb_interface *,
int type);
static int uhso_attach_ifnet(struct uhso_softc *, struct usb_interface *,
int type);
static void uhso_test_autoinst(void *, struct usb_device *,
struct usb_attach_arg *);
static int uhso_driver_loaded(struct module *, int, void *);
static int uhso_radio_sysctl(SYSCTL_HANDLER_ARGS);
static int uhso_radio_ctrl(struct uhso_softc *, int);
static void uhso_free(struct ucom_softc *);
static void uhso_ucom_start_read(struct ucom_softc *);
static void uhso_ucom_stop_read(struct ucom_softc *);
static void uhso_ucom_start_write(struct ucom_softc *);
static void uhso_ucom_stop_write(struct ucom_softc *);
static void uhso_ucom_cfg_get_status(struct ucom_softc *, uint8_t *, uint8_t *);
static void uhso_ucom_cfg_set_dtr(struct ucom_softc *, uint8_t);
static void uhso_ucom_cfg_set_rts(struct ucom_softc *, uint8_t);
static void uhso_if_init(void *);
static void uhso_if_start(if_t);
static void uhso_if_stop(struct uhso_softc *);
static int uhso_if_ioctl(if_t, u_long, caddr_t);
static int uhso_if_output(if_t, struct mbuf *,
const struct sockaddr *, struct route *);
static void uhso_if_rxflush(void *);
static device_probe_t uhso_probe;
static device_attach_t uhso_attach;
static device_detach_t uhso_detach;
static void uhso_free_softc(struct uhso_softc *);
static device_method_t uhso_methods[] = {
DEVMETHOD(device_probe, uhso_probe),
DEVMETHOD(device_attach, uhso_attach),
DEVMETHOD(device_detach, uhso_detach),
{ 0, 0 }
};
static driver_t uhso_driver = {
.name = "uhso",
.methods = uhso_methods,
.size = sizeof(struct uhso_softc)
};
DRIVER_MODULE(uhso, uhub, uhso_driver, uhso_driver_loaded, NULL);
MODULE_DEPEND(uhso, ucom, 1, 1, 1);
MODULE_DEPEND(uhso, usb, 1, 1, 1);
MODULE_VERSION(uhso, 1);
USB_PNP_HOST_INFO(uhso_devs);
static struct ucom_callback uhso_ucom_callback = {
.ucom_cfg_get_status = &uhso_ucom_cfg_get_status,
.ucom_cfg_set_dtr = &uhso_ucom_cfg_set_dtr,
.ucom_cfg_set_rts = &uhso_ucom_cfg_set_rts,
.ucom_start_read = uhso_ucom_start_read,
.ucom_stop_read = uhso_ucom_stop_read,
.ucom_start_write = uhso_ucom_start_write,
.ucom_stop_write = uhso_ucom_stop_write,
.ucom_free = &uhso_free,
};
static int
uhso_probe(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
int error;
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != 0)
return (ENXIO);
if (uaa->info.bDeviceClass != 0xff)
return (ENXIO);
error = usbd_lookup_id_by_uaa(uhso_devs, sizeof(uhso_devs), uaa);
if (error != 0)
return (error);
/*
* Probe device to see if we are able to attach
* to this interface or not.
*/
if (USB_GET_DRIVER_INFO(uaa) == UHSO_AUTO_IFACE) {
if (uhso_probe_iface_auto(uaa->device,
uaa->info.bIfaceNum) == 0)
return (ENXIO);
}
return (error);
}
static int
uhso_attach(device_t self)
{
struct uhso_softc *sc = device_get_softc(self);
struct usb_attach_arg *uaa = device_get_ivars(self);
struct usb_interface_descriptor *id;
struct sysctl_ctx_list *sctx;
struct sysctl_oid *soid;
struct sysctl_oid *tree = NULL, *tty_node;
struct ucom_softc *ucom;
struct uhso_tty *ht;
int i, error, port;
void *probe_f;
usb_error_t uerr;
char *desc;
sc->sc_dev = self;
sc->sc_udev = uaa->device;
mtx_init(&sc->sc_mtx, "uhso", NULL, MTX_DEF);
mbufq_init(&sc->sc_rxq, INT_MAX); /* XXXGL: sane maximum */
ucom_ref(&sc->sc_super_ucom);
sc->sc_radio = 1;
id = usbd_get_interface_descriptor(uaa->iface);
sc->sc_ctrl_iface_no = id->bInterfaceNumber;
sc->sc_iface_no = uaa->info.bIfaceNum;
sc->sc_iface_index = uaa->info.bIfaceIndex;
/* Setup control pipe */
uerr = usbd_transfer_setup(uaa->device,
&sc->sc_iface_index, sc->sc_ctrl_xfer,
uhso_ctrl_config, UHSO_CTRL_MAX, sc, &sc->sc_mtx);
if (uerr) {
device_printf(self, "Failed to setup control pipe: %s\n",
usbd_errstr(uerr));
goto out;
}
if (USB_GET_DRIVER_INFO(uaa) == UHSO_STATIC_IFACE)
probe_f = uhso_probe_iface_static;
else if (USB_GET_DRIVER_INFO(uaa) == UHSO_AUTO_IFACE)
probe_f = uhso_probe_iface_auto;
else
goto out;
error = uhso_probe_iface(sc, uaa->info.bIfaceNum, probe_f);
if (error != 0)
goto out;
sctx = device_get_sysctl_ctx(sc->sc_dev);
soid = device_get_sysctl_tree(sc->sc_dev);
SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "type",
CTLFLAG_RD, uhso_port[UHSO_IFACE_PORT(sc->sc_type)], 0,
"Port available at this interface");
SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "radio",
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, sc, 0,
uhso_radio_sysctl, "I", "Enable radio");
/*
* The default interface description on most Option devices isn't
* very helpful. So we skip device_set_usb_desc and set the
* device description manually.
*/
device_set_desc_copy(self, uhso_port_type[UHSO_IFACE_PORT_TYPE(sc->sc_type)]);
/* Announce device */
device_printf(self, "<%s port> at <%s %s> on %s\n",
uhso_port_type[UHSO_IFACE_PORT_TYPE(sc->sc_type)],
usb_get_manufacturer(uaa->device),
usb_get_product(uaa->device),
device_get_nameunit(device_get_parent(self)));
if (sc->sc_ttys > 0) {
SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "ports",
CTLFLAG_RD, &sc->sc_ttys, 0, "Number of attached serial ports");
tree = SYSCTL_ADD_NODE(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
"port", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Serial ports");
}
/*
* Loop through the number of found TTYs and create sysctl
* nodes for them.
*/
for (i = 0; i < sc->sc_ttys; i++) {
ht = &sc->sc_tty[i];
ucom = &sc->sc_ucom[i];
if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_MUX)
port = uhso_mux_port_map[ht->ht_muxport];
else
port = UHSO_IFACE_PORT_TYPE(sc->sc_type);
desc = uhso_port_type_sysctl[port];
tty_node = SYSCTL_ADD_NODE(sctx, SYSCTL_CHILDREN(tree), OID_AUTO,
desc, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
ht->ht_name[0] = 0;
if (sc->sc_ttys == 1)
snprintf(ht->ht_name, 32, "cuaU%d", ucom->sc_super->sc_unit);
else {
snprintf(ht->ht_name, 32, "cuaU%d.%d",
ucom->sc_super->sc_unit, ucom->sc_subunit);
}
desc = uhso_port_type[port];
SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(tty_node), OID_AUTO,
"tty", CTLFLAG_RD, ht->ht_name, 0, "");
SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(tty_node), OID_AUTO,
"desc", CTLFLAG_RD, desc, 0, "");
if (bootverbose)
device_printf(sc->sc_dev,
"\"%s\" port at %s\n", desc, ht->ht_name);
}
return (0);
out:
uhso_detach(sc->sc_dev);
return (ENXIO);
}
static int
uhso_detach(device_t self)
{
struct uhso_softc *sc = device_get_softc(self);
int i;
usbd_transfer_unsetup(sc->sc_xfer, 3);
usbd_transfer_unsetup(sc->sc_ctrl_xfer, UHSO_CTRL_MAX);
if (sc->sc_ttys > 0) {
ucom_detach(&sc->sc_super_ucom, sc->sc_ucom);
for (i = 0; i < sc->sc_ttys; i++) {
if (sc->sc_tty[i].ht_muxport != -1) {
usbd_transfer_unsetup(sc->sc_tty[i].ht_xfer,
UHSO_CTRL_MAX);
}
}
}
if (sc->sc_ifp != NULL) {
callout_drain(&sc->sc_c);
free_unr(uhso_ifnet_unit, if_getdunit(sc->sc_ifp));
mtx_lock(&sc->sc_mtx);
uhso_if_stop(sc);
mtx_unlock(&sc->sc_mtx);
bpfdetach(sc->sc_ifp);
if_detach(sc->sc_ifp);
if_free(sc->sc_ifp);
usbd_transfer_unsetup(sc->sc_if_xfer, UHSO_IFNET_MAX);
}
device_claim_softc(self);
uhso_free_softc(sc);
return (0);
}
UCOM_UNLOAD_DRAIN(uhso);
static void
uhso_free_softc(struct uhso_softc *sc)
{
if (ucom_unref(&sc->sc_super_ucom)) {
free(sc->sc_tty, M_USBDEV);
free(sc->sc_ucom, M_USBDEV);
mtx_destroy(&sc->sc_mtx);
device_free_softc(sc);
}
}
static void
uhso_free(struct ucom_softc *ucom)
{
uhso_free_softc(ucom->sc_parent);
}
static void
uhso_test_autoinst(void *arg, struct usb_device *udev,
struct usb_attach_arg *uaa)
{
struct usb_interface *iface;
struct usb_interface_descriptor *id;
if (uaa->dev_state != UAA_DEV_READY || !uhso_autoswitch)
return;
iface = usbd_get_iface(udev, 0);
if (iface == NULL)
return;
id = iface->idesc;
if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
return;
if (usbd_lookup_id_by_uaa(uhso_devs, sizeof(uhso_devs), uaa))
return; /* no device match */
if (usb_msc_eject(udev, 0, MSC_EJECT_REZERO) == 0) {
/* success, mark the udev as disappearing */
uaa->dev_state = UAA_DEV_EJECTING;
}
}
static int
uhso_driver_loaded(struct module *mod, int what, void *arg)
{
switch (what) {
case MOD_LOAD:
/* register our autoinstall handler */
uhso_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
uhso_test_autoinst, NULL, EVENTHANDLER_PRI_ANY);
/* create our unit allocator for inet devs */
uhso_ifnet_unit = new_unrhdr(0, INT_MAX, NULL);
break;
case MOD_UNLOAD:
EVENTHANDLER_DEREGISTER(usb_dev_configured, uhso_etag);
delete_unrhdr(uhso_ifnet_unit);
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
/*
* Probe the interface type by querying the device. The elements
* of an array indicates the capabilities of a particular interface.
* Returns a bit mask with the interface capabilities.
*/
static int
uhso_probe_iface_auto(struct usb_device *udev, int index)
{
struct usb_device_request req;
usb_error_t uerr;
uint16_t actlen = 0;
char port;
char buf[17] = {0};
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = 0x86;
USETW(req.wValue, 0);
USETW(req.wIndex, 0);
USETW(req.wLength, 17);
uerr = usbd_do_request_flags(udev, NULL, &req, buf,
0, &actlen, USB_MS_HZ);
if (uerr != 0) {
printf("%s: usbd_do_request_flags failed, %s\n",
__func__, usbd_errstr(uerr));
return (0);
}
UHSO_DPRINTF(1, "actlen=%d\n", actlen);
UHSO_HEXDUMP(buf, 17);
if (index < 0 || index > 16) {
UHSO_DPRINTF(0, "Index %d out of range\n", index);
return (0);
}
UHSO_DPRINTF(1, "index=%d, type=%x[%s]\n", index, buf[index],
uhso_port_type[(int)uhso_port_map[(int)buf[index]]]);
if (buf[index] >= uhso_port_map_max)
port = 0;
else
port = uhso_port_map[(int)buf[index]];
switch (port) {
case UHSO_PORT_TYPE_NETWORK:
return (UHSO_IFACE_SPEC(UHSO_IF_NET | UHSO_IF_MUX,
UHSO_PORT_SERIAL | UHSO_PORT_NETWORK, port));
case UHSO_PORT_TYPE_DIAG:
case UHSO_PORT_TYPE_DIAG2:
case UHSO_PORT_TYPE_GPS:
case UHSO_PORT_TYPE_GPSCTL:
case UHSO_PORT_TYPE_CTL:
case UHSO_PORT_TYPE_APP:
case UHSO_PORT_TYPE_APP2:
case UHSO_PORT_TYPE_MODEM:
return (UHSO_IFACE_SPEC(UHSO_IF_BULK,
UHSO_PORT_SERIAL, port));
case UHSO_PORT_TYPE_MSD:
return (0);
case UHSO_PORT_TYPE_UNKNOWN:
default:
return (0);
}
return (0);
}
/*
* Returns the capabilities of interfaces for devices that don't
* support the automatic query.
* Returns a bit mask with the interface capabilities.
*/
static int
uhso_probe_iface_static(struct usb_device *udev, int index)
{
struct usb_config_descriptor *cd;
cd = usbd_get_config_descriptor(udev);
if (cd->bNumInterface <= 3) {
/* Cards with 3 or less interfaces */
switch (index) {
case 0:
return UHSO_IFACE_SPEC(UHSO_IF_NET | UHSO_IF_MUX,
UHSO_PORT_SERIAL | UHSO_PORT_NETWORK,
UHSO_PORT_TYPE_NETWORK);
case 1:
return UHSO_IFACE_SPEC(UHSO_IF_BULK,
UHSO_PORT_SERIAL, UHSO_PORT_TYPE_DIAG);
case 2:
return UHSO_IFACE_SPEC(UHSO_IF_BULK,
UHSO_PORT_SERIAL, UHSO_PORT_TYPE_MODEM);
}
} else {
/* Cards with 4 interfaces */
switch (index) {
case 0:
return UHSO_IFACE_SPEC(UHSO_IF_NET | UHSO_IF_MUX,
UHSO_PORT_SERIAL | UHSO_PORT_NETWORK,
UHSO_PORT_TYPE_NETWORK);
case 1:
return UHSO_IFACE_SPEC(UHSO_IF_BULK,
UHSO_PORT_SERIAL, UHSO_PORT_TYPE_DIAG2);
case 2:
return UHSO_IFACE_SPEC(UHSO_IF_BULK,
UHSO_PORT_SERIAL, UHSO_PORT_TYPE_MODEM);
case 3:
return UHSO_IFACE_SPEC(UHSO_IF_BULK,
UHSO_PORT_SERIAL, UHSO_PORT_TYPE_DIAG);
}
}
return (0);
}
/*
* Probes an interface for its particular capabilities and attaches if
* it's a supported interface.
*/
static int
uhso_probe_iface(struct uhso_softc *sc, int index,
int (*probe)(struct usb_device *, int))
{
struct usb_interface *iface;
int type, error;
UHSO_DPRINTF(1, "Probing for interface %d, probe_func=%p\n", index, probe);
type = probe(sc->sc_udev, index);
UHSO_DPRINTF(1, "Probe result %x\n", type);
if (type <= 0)
return (ENXIO);
sc->sc_type = type;
iface = usbd_get_iface(sc->sc_udev, index);
if (UHSO_IFACE_PORT_TYPE(type) == UHSO_PORT_TYPE_NETWORK) {
error = uhso_attach_ifnet(sc, iface, type);
if (error) {
UHSO_DPRINTF(1, "uhso_attach_ifnet failed");
return (ENXIO);
}
/*
* If there is an additional interrupt endpoint on this
* interface then we most likely have a multiplexed serial port
* available.
*/
if (iface->idesc->bNumEndpoints < 3) {
sc->sc_type = UHSO_IFACE_SPEC(
UHSO_IFACE_USB_TYPE(type) & ~UHSO_IF_MUX,
UHSO_IFACE_PORT(type) & ~UHSO_PORT_SERIAL,
UHSO_IFACE_PORT_TYPE(type));
return (0);
}
UHSO_DPRINTF(1, "Trying to attach mux. serial\n");
error = uhso_attach_muxserial(sc, iface, type);
if (error == 0 && sc->sc_ttys > 0) {
error = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom,
sc->sc_ttys, sc, &uhso_ucom_callback, &sc->sc_mtx);
if (error) {
device_printf(sc->sc_dev, "ucom_attach failed\n");
return (ENXIO);
}
ucom_set_pnpinfo_usb(&sc->sc_super_ucom, sc->sc_dev);
mtx_lock(&sc->sc_mtx);
usbd_transfer_start(sc->sc_xfer[UHSO_MUX_ENDPT_INTR]);
mtx_unlock(&sc->sc_mtx);
}
} else if ((UHSO_IFACE_USB_TYPE(type) & UHSO_IF_BULK) &&
UHSO_IFACE_PORT(type) & UHSO_PORT_SERIAL) {
error = uhso_attach_bulkserial(sc, iface, type);
if (error)
return (ENXIO);
error = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom,
sc->sc_ttys, sc, &uhso_ucom_callback, &sc->sc_mtx);
if (error) {
device_printf(sc->sc_dev, "ucom_attach failed\n");
return (ENXIO);
}
ucom_set_pnpinfo_usb(&sc->sc_super_ucom, sc->sc_dev);
}
else {
UHSO_DPRINTF(0, "Unknown type %x\n", type);
return (ENXIO);
}
return (0);
}
static int
uhso_radio_ctrl(struct uhso_softc *sc, int onoff)
{
struct usb_device_request req;
usb_error_t uerr;
req.bmRequestType = UT_VENDOR;
req.bRequest = onoff ? 0x82 : 0x81;
USETW(req.wValue, 0);
USETW(req.wIndex, 0);
USETW(req.wLength, 0);
uerr = usbd_do_request(sc->sc_udev, NULL, &req, NULL);
if (uerr != 0) {
device_printf(sc->sc_dev, "usbd_do_request_flags failed: %s\n",
usbd_errstr(uerr));
return (-1);
}
return (onoff);
}
static int
uhso_radio_sysctl(SYSCTL_HANDLER_ARGS)
{
struct uhso_softc *sc = arg1;
int error, radio;
radio = sc->sc_radio;
error = sysctl_handle_int(oidp, &radio, 0, req);
if (error)
return (error);
if (radio != sc->sc_radio) {
radio = radio != 0 ? 1 : 0;
error = uhso_radio_ctrl(sc, radio);
if (error != -1)
sc->sc_radio = radio;
}
return (0);
}
/*
* Expands allocated memory to fit an additional TTY.
* Two arrays are kept with matching indexes, one for ucom and one
* for our private data.
*/
static int
uhso_alloc_tty(struct uhso_softc *sc)
{
sc->sc_ttys++;
sc->sc_tty = reallocf(sc->sc_tty, sizeof(struct uhso_tty) * sc->sc_ttys,
M_USBDEV, M_WAITOK | M_ZERO);
if (sc->sc_tty == NULL)
return (-1);
sc->sc_ucom = reallocf(sc->sc_ucom,
sizeof(struct ucom_softc) * sc->sc_ttys, M_USBDEV, M_WAITOK | M_ZERO);
if (sc->sc_ucom == NULL)
return (-1);
sc->sc_tty[sc->sc_ttys - 1].ht_sc = sc;
UHSO_DPRINTF(1, "Allocated TTY %d\n", sc->sc_ttys - 1);
return (sc->sc_ttys - 1);
}
/*
* Attach a multiplexed serial port
* Data is read/written with requests on the default control pipe. An interrupt
* endpoint returns when there is new data to be read.
*/
static int
uhso_attach_muxserial(struct uhso_softc *sc, struct usb_interface *iface,
int type)
{
struct usb_descriptor *desc;
int i, port, tty;
usb_error_t uerr;
/*
* The class specific interface (type 0x24) descriptor subtype field
* contains a bitmask that specifies which (and how many) ports that
* are available through this multiplexed serial port.
*/
desc = usbd_find_descriptor(sc->sc_udev, NULL,
iface->idesc->bInterfaceNumber, UDESC_CS_INTERFACE, 0xff, 0, 0);
if (desc == NULL) {
UHSO_DPRINTF(0, "Failed to find UDESC_CS_INTERFACE\n");
return (ENXIO);
}
UHSO_DPRINTF(1, "Mux port mask %x\n", desc->bDescriptorSubtype);
if (desc->bDescriptorSubtype == 0)
return (ENXIO);
/*
* The bitmask is one octet, loop through the number of
* bits that are set and create a TTY for each.
*/
for (i = 0; i < 8; i++) {
port = (1 << i);
if ((port & desc->bDescriptorSubtype) == port) {
UHSO_DPRINTF(2, "Found mux port %x (%d)\n", port, i);
tty = uhso_alloc_tty(sc);
if (tty < 0)
return (ENOMEM);
sc->sc_tty[tty].ht_muxport = i;
uerr = usbd_transfer_setup(sc->sc_udev,
&sc->sc_iface_index, sc->sc_tty[tty].ht_xfer,
uhso_ctrl_config, UHSO_CTRL_MAX, sc, &sc->sc_mtx);
if (uerr) {
device_printf(sc->sc_dev,
"Failed to setup control pipe: %s\n",
usbd_errstr(uerr));
return (ENXIO);
}
}
}
/* Setup the intr. endpoint */
uerr = usbd_transfer_setup(sc->sc_udev,
&iface->idesc->bInterfaceNumber, sc->sc_xfer,
uhso_mux_config, 1, sc, &sc->sc_mtx);
if (uerr)
return (ENXIO);
return (0);
}
/*
* Interrupt callback for the multiplexed serial port. Indicates
* which serial port has data waiting.
*/
static void
uhso_mux_intr_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct usb_page_cache *pc;
struct usb_page_search res;
struct uhso_softc *sc = usbd_xfer_softc(xfer);
unsigned i, mux;
UHSO_DPRINTF(3, "status %d\n", USB_GET_STATE(xfer));
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
/*
* The multiplexed port number can be found at the first byte.
* It contains a bit mask, we transform this in to an integer.
*/
pc = usbd_xfer_get_frame(xfer, 0);
usbd_get_page(pc, 0, &res);
i = *((unsigned char *)res.buffer);
mux = 0;
while (i >>= 1) {
mux++;
}
UHSO_DPRINTF(3, "mux port %d (%d)\n", mux, i);
if (mux > UHSO_MPORT_TYPE_NOMAX)
break;
/* Issue a read for this serial port */
usbd_xfer_set_priv(
sc->sc_tty[mux].ht_xfer[UHSO_CTRL_READ],
&sc->sc_tty[mux]);
usbd_transfer_start(sc->sc_tty[mux].ht_xfer[UHSO_CTRL_READ]);
break;
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default:
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
static void
uhso_mux_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uhso_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
struct usb_device_request req;
struct uhso_tty *ht;
int actlen, len;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
UHSO_DPRINTF(3, "status %d\n", USB_GET_STATE(xfer));
ht = usbd_xfer_get_priv(xfer);
UHSO_DPRINTF(3, "ht=%p open=%d\n", ht, ht->ht_open);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
/* Got data, send to ucom */
pc = usbd_xfer_get_frame(xfer, 1);
len = usbd_xfer_frame_len(xfer, 1);
UHSO_DPRINTF(3, "got %d bytes on mux port %d\n", len,
ht->ht_muxport);
if (len <= 0) {
usbd_transfer_start(sc->sc_xfer[UHSO_MUX_ENDPT_INTR]);
break;
}
/* Deliver data if the TTY is open, discard otherwise */
if (ht->ht_open)
ucom_put_data(&sc->sc_ucom[ht->ht_muxport], pc, 0, len);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
memset(&req, 0, sizeof(struct usb_device_request));
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = UCDC_GET_ENCAPSULATED_RESPONSE;
USETW(req.wValue, 0);
USETW(req.wIndex, ht->ht_muxport);
USETW(req.wLength, 1024);
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &req, sizeof(req));
usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
usbd_xfer_set_frame_len(xfer, 1, 1024);
usbd_xfer_set_frames(xfer, 2);
usbd_transfer_submit(xfer);
break;
default:
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
static void
uhso_mux_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uhso_softc *sc = usbd_xfer_softc(xfer);
struct uhso_tty *ht;
struct usb_page_cache *pc;
struct usb_device_request req;
int actlen;
struct usb_page_search res;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
ht = usbd_xfer_get_priv(xfer);
UHSO_DPRINTF(3, "status=%d, using mux port %d\n",
USB_GET_STATE(xfer), ht->ht_muxport);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
UHSO_DPRINTF(3, "wrote %zd data bytes to muxport %d\n",
actlen - sizeof(struct usb_device_request) ,
ht->ht_muxport);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
pc = usbd_xfer_get_frame(xfer, 1);
if (ucom_get_data(&sc->sc_ucom[ht->ht_muxport], pc,
0, 32, &actlen)) {
usbd_get_page(pc, 0, &res);
memset(&req, 0, sizeof(struct usb_device_request));
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UCDC_SEND_ENCAPSULATED_COMMAND;
USETW(req.wValue, 0);
USETW(req.wIndex, ht->ht_muxport);
USETW(req.wLength, actlen);
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &req, sizeof(req));
usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
usbd_xfer_set_frame_len(xfer, 1, actlen);
usbd_xfer_set_frames(xfer, 2);
UHSO_DPRINTF(3, "Prepared %d bytes for transmit "
"on muxport %d\n", actlen, ht->ht_muxport);
usbd_transfer_submit(xfer);
}
break;
default:
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
static int
uhso_attach_bulkserial(struct uhso_softc *sc, struct usb_interface *iface,
int type)
{
usb_error_t uerr;
int tty;
/* Try attaching RD/WR/INTR first */
uerr = usbd_transfer_setup(sc->sc_udev,
&iface->idesc->bInterfaceNumber, sc->sc_xfer,
uhso_bs_config, UHSO_BULK_ENDPT_MAX, sc, &sc->sc_mtx);
if (uerr) {
/* Try only RD/WR */
uerr = usbd_transfer_setup(sc->sc_udev,
&iface->idesc->bInterfaceNumber, sc->sc_xfer,
uhso_bs_config, UHSO_BULK_ENDPT_MAX - 1, sc, &sc->sc_mtx);
}
if (uerr) {
UHSO_DPRINTF(0, "usbd_transfer_setup failed");
return (-1);
}
tty = uhso_alloc_tty(sc);
if (tty < 0) {
usbd_transfer_unsetup(sc->sc_xfer, UHSO_BULK_ENDPT_MAX);
return (ENOMEM);
}
sc->sc_tty[tty].ht_muxport = -1;
return (0);
}
static void
uhso_bs_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uhso_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
UHSO_DPRINTF(3, "status %d, actlen=%d\n", USB_GET_STATE(xfer), actlen);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
pc = usbd_xfer_get_frame(xfer, 0);
ucom_put_data(&sc->sc_ucom[0], pc, 0, actlen);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default:
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
static void
uhso_bs_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uhso_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
UHSO_DPRINTF(3, "status %d, actlen=%d\n", USB_GET_STATE(xfer), actlen);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
case USB_ST_SETUP:
tr_setup:
pc = usbd_xfer_get_frame(xfer, 0);
if (ucom_get_data(&sc->sc_ucom[0], pc, 0, 8192, &actlen)) {
usbd_xfer_set_frame_len(xfer, 0, actlen);
usbd_transfer_submit(xfer);
}
break;
break;
default:
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
static void
uhso_bs_cfg(struct uhso_softc *sc)
{
struct usb_device_request req;
usb_error_t uerr;
if (!(UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_BULK))
return;
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UCDC_SET_CONTROL_LINE_STATE;
USETW(req.wValue, sc->sc_line);
USETW(req.wIndex, sc->sc_iface_no);
USETW(req.wLength, 0);
uerr = ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom[0], &req, NULL, 0, 1000);
if (uerr != 0) {
device_printf(sc->sc_dev, "failed to set ctrl line state to "
"0x%02x: %s\n", sc->sc_line, usbd_errstr(uerr));
}
}
static void
uhso_bs_intr_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uhso_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
int actlen;
struct usb_cdc_notification cdc;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
UHSO_DPRINTF(3, "status %d, actlen=%d\n", USB_GET_STATE(xfer), actlen);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
if (actlen < UCDC_NOTIFICATION_LENGTH) {
UHSO_DPRINTF(0, "UCDC notification too short: %d\n", actlen);
goto tr_setup;
}
else if (actlen > (int)sizeof(struct usb_cdc_notification)) {
UHSO_DPRINTF(0, "UCDC notification too large: %d\n", actlen);
actlen = sizeof(struct usb_cdc_notification);
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &cdc, actlen);
if (UGETW(cdc.wIndex) != sc->sc_iface_no) {
UHSO_DPRINTF(0, "Interface mismatch, got %d expected %d\n",
UGETW(cdc.wIndex), sc->sc_iface_no);
goto tr_setup;
}
if (cdc.bmRequestType == UCDC_NOTIFICATION &&
cdc.bNotification == UCDC_N_SERIAL_STATE) {
UHSO_DPRINTF(2, "notify = 0x%02x\n", cdc.data[0]);
sc->sc_msr = 0;
sc->sc_lsr = 0;
if (cdc.data[0] & UCDC_N_SERIAL_RI)
sc->sc_msr |= SER_RI;
if (cdc.data[0] & UCDC_N_SERIAL_DSR)
sc->sc_msr |= SER_DSR;
if (cdc.data[0] & UCDC_N_SERIAL_DCD)
sc->sc_msr |= SER_DCD;
ucom_status_change(&sc->sc_ucom[0]);
}
case USB_ST_SETUP:
tr_setup:
default:
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
static void
uhso_ucom_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
{
struct uhso_softc *sc = ucom->sc_parent;
*lsr = sc->sc_lsr;
*msr = sc->sc_msr;
}
static void
uhso_ucom_cfg_set_dtr(struct ucom_softc *ucom, uint8_t onoff)
{
struct uhso_softc *sc = ucom->sc_parent;
if (!(UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_BULK))
return;
if (onoff)
sc->sc_line |= UCDC_LINE_DTR;
else
sc->sc_line &= ~UCDC_LINE_DTR;
uhso_bs_cfg(sc);
}
static void
uhso_ucom_cfg_set_rts(struct ucom_softc *ucom, uint8_t onoff)
{
struct uhso_softc *sc = ucom->sc_parent;
if (!(UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_BULK))
return;
if (onoff)
sc->sc_line |= UCDC_LINE_RTS;
else
sc->sc_line &= ~UCDC_LINE_RTS;
uhso_bs_cfg(sc);
}
static void
uhso_ucom_start_read(struct ucom_softc *ucom)
{
struct uhso_softc *sc = ucom->sc_parent;
UHSO_DPRINTF(3, "unit=%d, subunit=%d\n",
ucom->sc_super->sc_unit, ucom->sc_subunit);
if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_MUX) {
sc->sc_tty[ucom->sc_subunit].ht_open = 1;
usbd_transfer_start(sc->sc_xfer[UHSO_MUX_ENDPT_INTR]);
}
else if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_BULK) {
sc->sc_tty[0].ht_open = 1;
usbd_transfer_start(sc->sc_xfer[UHSO_BULK_ENDPT_READ]);
if (sc->sc_xfer[UHSO_BULK_ENDPT_INTR] != NULL)
usbd_transfer_start(sc->sc_xfer[UHSO_BULK_ENDPT_INTR]);
}
}
static void
uhso_ucom_stop_read(struct ucom_softc *ucom)
{
struct uhso_softc *sc = ucom->sc_parent;
if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_MUX) {
sc->sc_tty[ucom->sc_subunit].ht_open = 0;
usbd_transfer_stop(
sc->sc_tty[ucom->sc_subunit].ht_xfer[UHSO_CTRL_READ]);
}
else if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_BULK) {
sc->sc_tty[0].ht_open = 0;
usbd_transfer_start(sc->sc_xfer[UHSO_BULK_ENDPT_READ]);
if (sc->sc_xfer[UHSO_BULK_ENDPT_INTR] != NULL)
usbd_transfer_stop(sc->sc_xfer[UHSO_BULK_ENDPT_INTR]);
}
}
static void
uhso_ucom_start_write(struct ucom_softc *ucom)
{
struct uhso_softc *sc = ucom->sc_parent;
if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_MUX) {
UHSO_DPRINTF(3, "local unit %d\n", ucom->sc_subunit);
usbd_transfer_start(sc->sc_xfer[UHSO_MUX_ENDPT_INTR]);
usbd_xfer_set_priv(
sc->sc_tty[ucom->sc_subunit].ht_xfer[UHSO_CTRL_WRITE],
&sc->sc_tty[ucom->sc_subunit]);
usbd_transfer_start(
sc->sc_tty[ucom->sc_subunit].ht_xfer[UHSO_CTRL_WRITE]);
}
else if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_BULK) {
usbd_transfer_start(sc->sc_xfer[UHSO_BULK_ENDPT_WRITE]);
}
}
static void
uhso_ucom_stop_write(struct ucom_softc *ucom)
{
struct uhso_softc *sc = ucom->sc_parent;
if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_MUX) {
usbd_transfer_stop(
sc->sc_tty[ucom->sc_subunit].ht_xfer[UHSO_CTRL_WRITE]);
}
else if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_BULK) {
usbd_transfer_stop(sc->sc_xfer[UHSO_BULK_ENDPT_WRITE]);
}
}
static int
uhso_attach_ifnet(struct uhso_softc *sc, struct usb_interface *iface, int type)
{
if_t ifp;
usb_error_t uerr;
struct sysctl_ctx_list *sctx;
struct sysctl_oid *soid;
unsigned devunit;
uerr = usbd_transfer_setup(sc->sc_udev,
&iface->idesc->bInterfaceNumber, sc->sc_if_xfer,
uhso_ifnet_config, UHSO_IFNET_MAX, sc, &sc->sc_mtx);
if (uerr) {
UHSO_DPRINTF(0, "usbd_transfer_setup failed: %s\n",
usbd_errstr(uerr));
return (-1);
}
sc->sc_ifp = ifp = if_alloc(IFT_OTHER);
- if (sc->sc_ifp == NULL) {
- device_printf(sc->sc_dev, "if_alloc() failed\n");
- return (-1);
- }
callout_init_mtx(&sc->sc_c, &sc->sc_mtx, 0);
mtx_lock(&sc->sc_mtx);
callout_reset(&sc->sc_c, 1, uhso_if_rxflush, sc);
mtx_unlock(&sc->sc_mtx);
/*
* We create our own unit numbers for ifnet devices because the
* USB interface unit numbers can be at arbitrary positions yielding
* odd looking device names.
*/
devunit = alloc_unr(uhso_ifnet_unit);
if_initname(ifp, device_get_name(sc->sc_dev), devunit);
if_setmtu(ifp, UHSO_MAX_MTU);
if_setioctlfn(ifp, uhso_if_ioctl);
if_setinitfn(ifp, uhso_if_init);
if_setstartfn(ifp, uhso_if_start);
if_setoutputfn(ifp, uhso_if_output);
if_setflags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_NOARP);
if_setsoftc(ifp, sc);
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
if_attach(ifp);
bpfattach(ifp, DLT_RAW, 0);
sctx = device_get_sysctl_ctx(sc->sc_dev);
soid = device_get_sysctl_tree(sc->sc_dev);
/* Unlocked read... */
SYSCTL_ADD_CONST_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "netif",
CTLFLAG_RD, if_name(ifp), "Attached network interface");
return (0);
}
static void
uhso_ifnet_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uhso_softc *sc = usbd_xfer_softc(xfer);
struct mbuf *m;
struct usb_page_cache *pc;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
UHSO_DPRINTF(3, "status=%d, actlen=%d\n", USB_GET_STATE(xfer), actlen);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
if (actlen > 0 && (if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING)) {
pc = usbd_xfer_get_frame(xfer, 0);
if (mbufq_full(&sc->sc_rxq))
break;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
usbd_copy_out(pc, 0, mtod(m, uint8_t *), actlen);
m->m_pkthdr.len = m->m_len = actlen;
/* Enqueue frame for further processing */
mbufq_enqueue(&sc->sc_rxq, m);
if (!callout_pending(&sc->sc_c) ||
!callout_active(&sc->sc_c)) {
callout_schedule(&sc->sc_c, 1);
}
}
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default:
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
/*
* Deferred RX processing, called with mutex locked.
*
* Each frame we receive might contain several small ip-packets as well
* as partial ip-packets. We need to separate/assemble them into individual
* packets before sending them to the ip-layer.
*/
static void
uhso_if_rxflush(void *arg)
{
struct epoch_tracker et;
struct uhso_softc *sc = arg;
if_t ifp = sc->sc_ifp;
uint8_t *cp;
struct mbuf *m, *m0, *mwait;
struct ip *ip;
#ifdef INET6
struct ip6_hdr *ip6;
#endif
uint16_t iplen;
int isr;
m = NULL;
mwait = sc->sc_mwait;
NET_EPOCH_ENTER(et);
for (;;) {
if (m == NULL) {
if ((m = mbufq_dequeue(&sc->sc_rxq)) == NULL)
break;
UHSO_DPRINTF(3, "dequeue m=%p, len=%d\n", m, m->m_len);
}
mtx_unlock(&sc->sc_mtx);
/* Do we have a partial packet waiting? */
if (mwait != NULL) {
m0 = mwait;
mwait = NULL;
UHSO_DPRINTF(3, "partial m0=%p(%d), concat w/ m=%p(%d)\n",
m0, m0->m_len, m, m->m_len);
m_catpkt(m0, m);
m = m_pullup(m0, sizeof(struct ip));
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
UHSO_DPRINTF(0, "m_pullup failed\n");
mtx_lock(&sc->sc_mtx);
continue;
}
UHSO_DPRINTF(3, "Constructed mbuf=%p, len=%d\n",
m, m->m_pkthdr.len);
}
cp = mtod(m, uint8_t *);
ip = (struct ip *)cp;
#ifdef INET6
ip6 = (struct ip6_hdr *)cp;
#endif
/* Check for IPv4 */
if (ip->ip_v == IPVERSION) {
iplen = htons(ip->ip_len);
isr = NETISR_IP;
}
#ifdef INET6
/* Check for IPv6 */
else if ((ip6->ip6_vfc & IPV6_VERSION_MASK) == IPV6_VERSION) {
iplen = htons(ip6->ip6_plen);
isr = NETISR_IPV6;
}
#endif
else {
UHSO_DPRINTF(0, "got unexpected ip version %d, "
"m=%p, len=%d\n", (*cp & 0xf0) >> 4, m, m->m_len);
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
UHSO_HEXDUMP(cp, 4);
m_freem(m);
m = NULL;
mtx_lock(&sc->sc_mtx);
continue;
}
if (iplen == 0) {
UHSO_DPRINTF(0, "Zero IP length\n");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
m = NULL;
mtx_lock(&sc->sc_mtx);
continue;
}
UHSO_DPRINTF(3, "m=%p, len=%d, cp=%p, iplen=%d\n",
m, m->m_pkthdr.len, cp, iplen);
m0 = NULL;
/* More IP packets in this mbuf */
if (iplen < m->m_pkthdr.len) {
m0 = m;
/*
* Allocate a new mbuf for this IP packet and
* copy the IP-packet into it.
*/
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
memcpy(mtod(m, uint8_t *), mtod(m0, uint8_t *), iplen);
m->m_pkthdr.len = m->m_len = iplen;
/* Adjust the size of the original mbuf */
m_adj(m0, iplen);
m0 = m_defrag(m0, M_WAITOK);
UHSO_DPRINTF(3, "New mbuf=%p, len=%d/%d, m0=%p, "
"m0_len=%d/%d\n", m, m->m_pkthdr.len, m->m_len,
m0, m0->m_pkthdr.len, m0->m_len);
}
else if (iplen > m->m_pkthdr.len) {
UHSO_DPRINTF(3, "Deferred mbuf=%p, len=%d\n",
m, m->m_pkthdr.len);
mwait = m;
m = NULL;
mtx_lock(&sc->sc_mtx);
continue;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
/* Dispatch to IP layer */
BPF_MTAP(sc->sc_ifp, m);
M_SETFIB(m, if_getfib(ifp));
netisr_dispatch(isr, m);
m = m0 != NULL ? m0 : NULL;
mtx_lock(&sc->sc_mtx);
}
NET_EPOCH_EXIT(et);
sc->sc_mwait = mwait;
}
static void
uhso_ifnet_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uhso_softc *sc = usbd_xfer_softc(xfer);
if_t ifp = sc->sc_ifp;
struct usb_page_cache *pc;
struct mbuf *m;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
UHSO_DPRINTF(3, "status %d, actlen=%d\n", USB_GET_STATE(xfer), actlen);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
case USB_ST_SETUP:
tr_setup:
m = if_dequeue(ifp);
if (m == NULL)
break;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if (m->m_pkthdr.len > MCLBYTES)
m->m_pkthdr.len = MCLBYTES;
usbd_xfer_set_frame_len(xfer, 0, m->m_pkthdr.len);
pc = usbd_xfer_get_frame(xfer, 0);
usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len);
usbd_transfer_submit(xfer);
BPF_MTAP(ifp, m);
m_freem(m);
break;
default:
UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error));
if (error == USB_ERR_CANCELLED)
break;
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
}
static int
uhso_if_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct uhso_softc *sc;
sc = if_getsoftc(ifp);
switch (cmd) {
case SIOCSIFFLAGS:
if (if_getflags(ifp) & IFF_UP) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
uhso_if_init(sc);
}
}
else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
mtx_lock(&sc->sc_mtx);
uhso_if_stop(sc);
mtx_unlock(&sc->sc_mtx);
}
}
break;
case SIOCSIFADDR:
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
default:
return (EINVAL);
}
return (0);
}
static void
uhso_if_init(void *priv)
{
struct uhso_softc *sc = priv;
if_t ifp = sc->sc_ifp;
mtx_lock(&sc->sc_mtx);
uhso_if_stop(sc);
ifp = sc->sc_ifp;
if_setflagbits(ifp, IFF_UP, 0);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
mtx_unlock(&sc->sc_mtx);
UHSO_DPRINTF(2, "ifnet initialized\n");
}
static int
uhso_if_output(if_t ifp, struct mbuf *m0, const struct sockaddr *dst,
struct route *ro)
{
int error;
/* Only IPv4/6 support */
if (dst->sa_family != AF_INET
#ifdef INET6
&& dst->sa_family != AF_INET6
#endif
) {
return (EAFNOSUPPORT);
}
error = if_transmit(ifp, m0);
if (error) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
return (0);
}
static void
uhso_if_start(if_t ifp)
{
struct uhso_softc *sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
UHSO_DPRINTF(1, "Not running\n");
return;
}
mtx_lock(&sc->sc_mtx);
usbd_transfer_start(sc->sc_if_xfer[UHSO_IFNET_READ]);
usbd_transfer_start(sc->sc_if_xfer[UHSO_IFNET_WRITE]);
mtx_unlock(&sc->sc_mtx);
UHSO_DPRINTF(3, "interface started\n");
}
static void
uhso_if_stop(struct uhso_softc *sc)
{
usbd_transfer_stop(sc->sc_if_xfer[UHSO_IFNET_READ]);
usbd_transfer_stop(sc->sc_if_xfer[UHSO_IFNET_WRITE]);
if_setdrvflagbits(sc->sc_ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
diff --git a/sys/dev/usb/net/usb_ethernet.c b/sys/dev/usb/net/usb_ethernet.c
index 6025412ebc9c..2f423f557569 100644
--- a/sys/dev/usb/net/usb_ethernet.c
+++ b/sys/dev/usb/net/usb_ethernet.c
@@ -1,666 +1,661 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2009 Andrew Thompson (thompsa@FreeBSD.org)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/condvar.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/sx.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usb_process.h>
#include <dev/usb/net/usb_ethernet.h>
static SYSCTL_NODE(_net, OID_AUTO, ue, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"USB Ethernet parameters");
#define UE_LOCK(_ue) mtx_lock((_ue)->ue_mtx)
#define UE_UNLOCK(_ue) mtx_unlock((_ue)->ue_mtx)
#define UE_LOCK_ASSERT(_ue, t) mtx_assert((_ue)->ue_mtx, t)
MODULE_DEPEND(uether, usb, 1, 1, 1);
MODULE_DEPEND(uether, miibus, 1, 1, 1);
static struct unrhdr *ueunit;
static usb_proc_callback_t ue_attach_post_task;
static usb_proc_callback_t ue_promisc_task;
static usb_proc_callback_t ue_setmulti_task;
static usb_proc_callback_t ue_ifmedia_task;
static usb_proc_callback_t ue_tick_task;
static usb_proc_callback_t ue_start_task;
static usb_proc_callback_t ue_stop_task;
static void ue_init(void *);
static void ue_start(if_t);
static int ue_ifmedia_upd(if_t);
static void ue_watchdog(void *);
/*
* Return values:
* 0: success
* Else: device has been detached
*/
uint8_t
uether_pause(struct usb_ether *ue, unsigned _ticks)
{
if (usb_proc_is_gone(&ue->ue_tq)) {
/* nothing to do */
return (1);
}
usb_pause_mtx(ue->ue_mtx, _ticks);
return (0);
}
static void
ue_queue_command(struct usb_ether *ue,
usb_proc_callback_t *fn,
struct usb_proc_msg *t0, struct usb_proc_msg *t1)
{
struct usb_ether_cfg_task *task;
UE_LOCK_ASSERT(ue, MA_OWNED);
if (usb_proc_is_gone(&ue->ue_tq)) {
return; /* nothing to do */
}
/*
* NOTE: The task cannot get executed before we drop the
* "sc_mtx" mutex. It is safe to update fields in the message
* structure after that the message got queued.
*/
task = (struct usb_ether_cfg_task *)
usb_proc_msignal(&ue->ue_tq, t0, t1);
/* Setup callback and self pointers */
task->hdr.pm_callback = fn;
task->ue = ue;
/*
* Start and stop must be synchronous!
*/
if ((fn == ue_start_task) || (fn == ue_stop_task))
usb_proc_mwait(&ue->ue_tq, t0, t1);
}
if_t
uether_getifp(struct usb_ether *ue)
{
return (ue->ue_ifp);
}
struct mii_data *
uether_getmii(struct usb_ether *ue)
{
return (device_get_softc(ue->ue_miibus));
}
void *
uether_getsc(struct usb_ether *ue)
{
return (ue->ue_sc);
}
static int
ue_sysctl_parent(SYSCTL_HANDLER_ARGS)
{
struct usb_ether *ue = arg1;
const char *name;
name = device_get_nameunit(ue->ue_dev);
return SYSCTL_OUT_STR(req, name);
}
int
uether_ifattach(struct usb_ether *ue)
{
int error;
/* check some critical parameters */
if ((ue->ue_dev == NULL) ||
(ue->ue_udev == NULL) ||
(ue->ue_mtx == NULL) ||
(ue->ue_methods == NULL))
return (EINVAL);
error = usb_proc_create(&ue->ue_tq, ue->ue_mtx,
device_get_nameunit(ue->ue_dev), USB_PRI_MED);
if (error) {
device_printf(ue->ue_dev, "could not setup taskqueue\n");
goto error;
}
/* fork rest of the attach code */
UE_LOCK(ue);
ue_queue_command(ue, ue_attach_post_task,
&ue->ue_sync_task[0].hdr,
&ue->ue_sync_task[1].hdr);
UE_UNLOCK(ue);
error:
return (error);
}
void
uether_ifattach_wait(struct usb_ether *ue)
{
UE_LOCK(ue);
usb_proc_mwait(&ue->ue_tq,
&ue->ue_sync_task[0].hdr,
&ue->ue_sync_task[1].hdr);
UE_UNLOCK(ue);
}
static void
ue_attach_post_task(struct usb_proc_msg *_task)
{
struct usb_ether_cfg_task *task =
(struct usb_ether_cfg_task *)_task;
struct usb_ether *ue = task->ue;
if_t ifp;
int error;
char num[14]; /* sufficient for 32 bits */
/* first call driver's post attach routine */
ue->ue_methods->ue_attach_post(ue);
UE_UNLOCK(ue);
ue->ue_unit = alloc_unr(ueunit);
usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0);
sysctl_ctx_init(&ue->ue_sysctl_ctx);
mbufq_init(&ue->ue_rxq, 0 /* unlimited length */);
error = 0;
CURVNET_SET_QUIET(vnet0);
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(ue->ue_dev, "could not allocate ifnet\n");
- goto fail;
- }
-
if_setsoftc(ifp, ue);
if_initname(ifp, "ue", ue->ue_unit);
if (ue->ue_methods->ue_attach_post_sub != NULL) {
ue->ue_ifp = ifp;
error = ue->ue_methods->ue_attach_post_sub(ue);
} else {
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if (ue->ue_methods->ue_ioctl != NULL)
if_setioctlfn(ifp, ue->ue_methods->ue_ioctl);
else
if_setioctlfn(ifp, uether_ioctl);
if_setstartfn(ifp, ue_start);
if_setinitfn(ifp, ue_init);
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
ue->ue_ifp = ifp;
if (ue->ue_methods->ue_mii_upd != NULL &&
ue->ue_methods->ue_mii_sts != NULL) {
bus_topo_lock();
error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
ue_ifmedia_upd, ue->ue_methods->ue_mii_sts,
BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
bus_topo_unlock();
}
}
if (error) {
device_printf(ue->ue_dev, "attaching PHYs failed\n");
goto fail;
}
if_printf(ifp, "<USB Ethernet> on %s\n", device_get_nameunit(ue->ue_dev));
ether_ifattach(ifp, ue->ue_eaddr);
/* Tell upper layer we support VLAN oversized frames. */
if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
CURVNET_RESTORE();
snprintf(num, sizeof(num), "%u", ue->ue_unit);
ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx,
&SYSCTL_NODE_CHILDREN(_net, ue),
OID_AUTO, num, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx,
SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO, "%parent",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ue, 0,
ue_sysctl_parent, "A", "parent device");
UE_LOCK(ue);
return;
fail:
CURVNET_RESTORE();
/* drain mbuf queue */
mbufq_drain(&ue->ue_rxq);
/* free unit */
free_unr(ueunit, ue->ue_unit);
if (ue->ue_ifp != NULL) {
if_free(ue->ue_ifp);
ue->ue_ifp = NULL;
}
UE_LOCK(ue);
return;
}
void
uether_ifdetach(struct usb_ether *ue)
{
if_t ifp;
/* wait for any post attach or other command to complete */
usb_proc_drain(&ue->ue_tq);
/* read "ifnet" pointer after taskqueue drain */
ifp = ue->ue_ifp;
if (ifp != NULL) {
/* we are not running any more */
UE_LOCK(ue);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
UE_UNLOCK(ue);
/* drain any callouts */
usb_callout_drain(&ue->ue_watchdog);
/*
* Detach ethernet first to stop miibus calls from
* user-space:
*/
ether_ifdetach(ifp);
/* detach miibus */
if (ue->ue_miibus != NULL) {
bus_topo_lock();
device_delete_child(ue->ue_dev, ue->ue_miibus);
bus_topo_unlock();
}
/* free interface instance */
if_free(ifp);
/* free sysctl */
sysctl_ctx_free(&ue->ue_sysctl_ctx);
/* drain mbuf queue */
mbufq_drain(&ue->ue_rxq);
/* free unit */
free_unr(ueunit, ue->ue_unit);
}
/* free taskqueue, if any */
usb_proc_free(&ue->ue_tq);
}
uint8_t
uether_is_gone(struct usb_ether *ue)
{
return (usb_proc_is_gone(&ue->ue_tq));
}
void
uether_init(void *arg)
{
ue_init(arg);
}
static void
ue_init(void *arg)
{
struct usb_ether *ue = arg;
UE_LOCK(ue);
ue_queue_command(ue, ue_start_task,
&ue->ue_sync_task[0].hdr,
&ue->ue_sync_task[1].hdr);
UE_UNLOCK(ue);
}
static void
ue_start_task(struct usb_proc_msg *_task)
{
struct usb_ether_cfg_task *task =
(struct usb_ether_cfg_task *)_task;
struct usb_ether *ue = task->ue;
if_t ifp = ue->ue_ifp;
UE_LOCK_ASSERT(ue, MA_OWNED);
ue->ue_methods->ue_init(ue);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
if (ue->ue_methods->ue_tick != NULL)
usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue);
}
static void
ue_stop_task(struct usb_proc_msg *_task)
{
struct usb_ether_cfg_task *task =
(struct usb_ether_cfg_task *)_task;
struct usb_ether *ue = task->ue;
UE_LOCK_ASSERT(ue, MA_OWNED);
usb_callout_stop(&ue->ue_watchdog);
ue->ue_methods->ue_stop(ue);
}
void
uether_start(if_t ifp)
{
ue_start(ifp);
}
static void
ue_start(if_t ifp)
{
struct usb_ether *ue = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
UE_LOCK(ue);
ue->ue_methods->ue_start(ue);
UE_UNLOCK(ue);
}
static void
ue_promisc_task(struct usb_proc_msg *_task)
{
struct usb_ether_cfg_task *task =
(struct usb_ether_cfg_task *)_task;
struct usb_ether *ue = task->ue;
ue->ue_methods->ue_setpromisc(ue);
}
static void
ue_setmulti_task(struct usb_proc_msg *_task)
{
struct usb_ether_cfg_task *task =
(struct usb_ether_cfg_task *)_task;
struct usb_ether *ue = task->ue;
ue->ue_methods->ue_setmulti(ue);
}
int
uether_ifmedia_upd(if_t ifp)
{
return (ue_ifmedia_upd(ifp));
}
static int
ue_ifmedia_upd(if_t ifp)
{
struct usb_ether *ue = if_getsoftc(ifp);
/* Defer to process context */
UE_LOCK(ue);
ue_queue_command(ue, ue_ifmedia_task,
&ue->ue_media_task[0].hdr,
&ue->ue_media_task[1].hdr);
UE_UNLOCK(ue);
return (0);
}
static void
ue_ifmedia_task(struct usb_proc_msg *_task)
{
struct usb_ether_cfg_task *task =
(struct usb_ether_cfg_task *)_task;
struct usb_ether *ue = task->ue;
if_t ifp = ue->ue_ifp;
ue->ue_methods->ue_mii_upd(ifp);
}
static void
ue_watchdog(void *arg)
{
struct usb_ether *ue = arg;
if_t ifp = ue->ue_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
ue_queue_command(ue, ue_tick_task,
&ue->ue_tick_task[0].hdr,
&ue->ue_tick_task[1].hdr);
usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue);
}
static void
ue_tick_task(struct usb_proc_msg *_task)
{
struct usb_ether_cfg_task *task =
(struct usb_ether_cfg_task *)_task;
struct usb_ether *ue = task->ue;
if_t ifp = ue->ue_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
ue->ue_methods->ue_tick(ue);
}
int
uether_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct usb_ether *ue = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
struct mii_data *mii;
int error = 0;
switch (command) {
case SIOCSIFFLAGS:
UE_LOCK(ue);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
ue_queue_command(ue, ue_promisc_task,
&ue->ue_promisc_task[0].hdr,
&ue->ue_promisc_task[1].hdr);
else
ue_queue_command(ue, ue_start_task,
&ue->ue_sync_task[0].hdr,
&ue->ue_sync_task[1].hdr);
} else {
ue_queue_command(ue, ue_stop_task,
&ue->ue_sync_task[0].hdr,
&ue->ue_sync_task[1].hdr);
}
UE_UNLOCK(ue);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
UE_LOCK(ue);
ue_queue_command(ue, ue_setmulti_task,
&ue->ue_multi_task[0].hdr,
&ue->ue_multi_task[1].hdr);
UE_UNLOCK(ue);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
if (ue->ue_miibus != NULL) {
mii = device_get_softc(ue->ue_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
} else
error = ether_ioctl(ifp, command, data);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
uether_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
ueunit = new_unrhdr(0, INT_MAX, NULL);
break;
case MOD_UNLOAD:
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t uether_mod = {
"uether",
uether_modevent,
0
};
struct mbuf *
uether_newbuf(void)
{
struct mbuf *m_new;
m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m_new == NULL)
return (NULL);
m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
m_adj(m_new, ETHER_ALIGN);
return (m_new);
}
int
uether_rxmbuf(struct usb_ether *ue, struct mbuf *m,
unsigned len)
{
if_t ifp = ue->ue_ifp;
UE_LOCK_ASSERT(ue, MA_OWNED);
/* finalize mbuf */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len;
/* enqueue for later when the lock can be released */
(void)mbufq_enqueue(&ue->ue_rxq, m);
return (0);
}
int
uether_rxbuf(struct usb_ether *ue, struct usb_page_cache *pc,
unsigned offset, unsigned len)
{
if_t ifp = ue->ue_ifp;
struct mbuf *m;
UE_LOCK_ASSERT(ue, MA_OWNED);
if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN)
return (1);
m = uether_newbuf();
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
return (ENOMEM);
}
usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
/* finalize mbuf */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len;
/* enqueue for later when the lock can be released */
(void)mbufq_enqueue(&ue->ue_rxq, m);
return (0);
}
void
uether_rxflush(struct usb_ether *ue)
{
if_t ifp = ue->ue_ifp;
struct epoch_tracker et;
struct mbuf *m, *n;
UE_LOCK_ASSERT(ue, MA_OWNED);
n = mbufq_flush(&ue->ue_rxq);
UE_UNLOCK(ue);
NET_EPOCH_ENTER(et);
while ((m = n) != NULL) {
n = STAILQ_NEXT(m, m_stailqpkt);
m->m_nextpkt = NULL;
if_input(ifp, m);
}
NET_EPOCH_EXIT(et);
UE_LOCK(ue);
}
/*
* USB net drivers are run by DRIVER_MODULE() thus SI_SUB_DRIVERS,
* SI_ORDER_MIDDLE. Run uether after that.
*/
DECLARE_MODULE(uether, uether_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
MODULE_VERSION(uether, 1);
diff --git a/sys/dev/usb/usb_pf.c b/sys/dev/usb/usb_pf.c
index 4da59419a7c6..0e7a75d04d6a 100644
--- a/sys/dev/usb/usb_pf.c
+++ b/sys/dev/usb/usb_pf.c
@@ -1,537 +1,531 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1990, 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from the Stanford/CMU enet packet filter,
* (net/enet.c) distributed as part of 4.3BSD, and code contributed
* to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
* Berkeley Laboratory.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef USB_GLOBAL_INCLUDE_FILE
#include USB_GLOBAL_INCLUDE_FILE
#else
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/if_clone.h>
#include <net/bpf.h>
#include <sys/sysctl.h>
#include <net/route.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usb_busdma.h>
#include <dev/usb/usb_controller.h>
#include <dev/usb/usb_core.h>
#include <dev/usb/usb_process.h>
#include <dev/usb/usb_device.h>
#include <dev/usb/usb_bus.h>
#include <dev/usb/usb_pf.h>
#include <dev/usb/usb_transfer.h>
#endif /* USB_GLOBAL_INCLUDE_FILE */
static void usbpf_init(void *);
static void usbpf_uninit(void *);
static int usbpf_ioctl(if_t, u_long, caddr_t);
static int usbpf_clone_match(struct if_clone *, const char *);
static int usbpf_clone_create(struct if_clone *, char *, size_t,
struct ifc_data *, if_t *);
static int usbpf_clone_destroy(struct if_clone *, if_t, uint32_t);
static struct usb_bus *usbpf_ifname2ubus(const char *);
static uint32_t usbpf_aggregate_xferflags(struct usb_xfer_flags *);
static uint32_t usbpf_aggregate_status(struct usb_xfer_flags_int *);
static int usbpf_xfer_frame_is_read(struct usb_xfer *, uint32_t);
static uint32_t usbpf_xfer_precompute_size(struct usb_xfer *, int);
static struct if_clone *usbpf_cloner;
static const char usbusname[] = "usbus";
SYSINIT(usbpf_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, usbpf_init, NULL);
SYSUNINIT(usbpf_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, usbpf_uninit, NULL);
static void
usbpf_init(void *arg)
{
struct if_clone_addreq req = {
.match_f = usbpf_clone_match,
.create_f = usbpf_clone_create,
.destroy_f = usbpf_clone_destroy,
};
usbpf_cloner = ifc_attach_cloner(usbusname, &req);
}
static void
usbpf_uninit(void *arg)
{
int devlcnt;
device_t *devlp;
devclass_t dc;
struct usb_bus *ubus;
int error;
int i;
if_clone_detach(usbpf_cloner);
dc = devclass_find(usbusname);
if (dc == NULL)
return;
error = devclass_get_devices(dc, &devlp, &devlcnt);
if (error)
return;
for (i = 0; i < devlcnt; i++) {
ubus = device_get_softc(devlp[i]);
if (ubus != NULL && ubus->ifp != NULL)
usbpf_clone_destroy(usbpf_cloner, ubus->ifp, 0);
}
free(devlp, M_TEMP);
}
static int
usbpf_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
/* No configuration allowed. */
return (EINVAL);
}
static struct usb_bus *
usbpf_ifname2ubus(const char *ifname)
{
device_t dev;
devclass_t dc;
int unit;
int error;
if (strncmp(ifname, usbusname, sizeof(usbusname) - 1) != 0)
return (NULL);
error = ifc_name2unit(ifname, &unit);
if (error || unit < 0)
return (NULL);
dc = devclass_find(usbusname);
if (dc == NULL)
return (NULL);
dev = devclass_get_device(dc, unit);
if (dev == NULL)
return (NULL);
return (device_get_softc(dev));
}
static int
usbpf_clone_match(struct if_clone *ifc, const char *name)
{
struct usb_bus *ubus;
ubus = usbpf_ifname2ubus(name);
if (ubus == NULL)
return (0);
if (ubus->ifp != NULL)
return (0);
return (1);
}
static int
usbpf_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, if_t *ifpp)
{
int error;
int unit;
if_t ifp;
struct usb_bus *ubus;
error = ifc_name2unit(name, &unit);
if (error)
return (error);
if (unit < 0)
return (EINVAL);
ubus = usbpf_ifname2ubus(name);
if (ubus == NULL)
return (1);
if (ubus->ifp != NULL)
return (1);
error = ifc_alloc_unit(ifc, &unit);
if (error) {
device_printf(ubus->parent, "usbpf: Could not allocate "
"instance\n");
return (error);
}
ifp = ubus->ifp = if_alloc(IFT_USB);
- if (ifp == NULL) {
- ifc_free_unit(ifc, unit);
- device_printf(ubus->parent, "usbpf: Could not allocate "
- "instance\n");
- return (ENOSPC);
- }
if_setsoftc(ifp, ubus);
if_initname(ifp, usbusname, unit);
if_setname(ifp, name);
if_setioctlfn(ifp, usbpf_ioctl);
if_attach(ifp);
if_setflagbits(ifp, IFF_UP, 0);
rt_ifmsg(ifp, IFF_UP);
/*
* XXX According to the specification of DLT_USB, it indicates
* packets beginning with USB setup header. But not sure all
* packets would be.
*/
bpfattach(ifp, DLT_USB, USBPF_HDR_LEN);
*ifpp = ifp;
return (0);
}
static int
usbpf_clone_destroy(struct if_clone *ifc, if_t ifp, uint32_t flags)
{
struct usb_bus *ubus;
int unit;
ubus = if_getsoftc(ifp);
unit = if_getdunit(ifp);
/*
* Lock USB before clearing the "ifp" pointer, to avoid
* clearing the pointer in the middle of a TAP operation:
*/
USB_BUS_LOCK(ubus);
ubus->ifp = NULL;
USB_BUS_UNLOCK(ubus);
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
ifc_free_unit(ifc, unit);
return (0);
}
void
usbpf_attach(struct usb_bus *ubus)
{
if (bootverbose)
device_printf(ubus->parent, "usbpf: Attached\n");
}
void
usbpf_detach(struct usb_bus *ubus)
{
if (ubus->ifp != NULL)
usbpf_clone_destroy(usbpf_cloner, ubus->ifp, 0);
if (bootverbose)
device_printf(ubus->parent, "usbpf: Detached\n");
}
static uint32_t
usbpf_aggregate_xferflags(struct usb_xfer_flags *flags)
{
uint32_t val = 0;
if (flags->force_short_xfer == 1)
val |= USBPF_FLAG_FORCE_SHORT_XFER;
if (flags->short_xfer_ok == 1)
val |= USBPF_FLAG_SHORT_XFER_OK;
if (flags->short_frames_ok == 1)
val |= USBPF_FLAG_SHORT_FRAMES_OK;
if (flags->pipe_bof == 1)
val |= USBPF_FLAG_PIPE_BOF;
if (flags->proxy_buffer == 1)
val |= USBPF_FLAG_PROXY_BUFFER;
if (flags->ext_buffer == 1)
val |= USBPF_FLAG_EXT_BUFFER;
if (flags->manual_status == 1)
val |= USBPF_FLAG_MANUAL_STATUS;
if (flags->no_pipe_ok == 1)
val |= USBPF_FLAG_NO_PIPE_OK;
if (flags->stall_pipe == 1)
val |= USBPF_FLAG_STALL_PIPE;
return (val);
}
static uint32_t
usbpf_aggregate_status(struct usb_xfer_flags_int *flags)
{
uint32_t val = 0;
if (flags->open == 1)
val |= USBPF_STATUS_OPEN;
if (flags->transferring == 1)
val |= USBPF_STATUS_TRANSFERRING;
if (flags->did_dma_delay == 1)
val |= USBPF_STATUS_DID_DMA_DELAY;
if (flags->did_close == 1)
val |= USBPF_STATUS_DID_CLOSE;
if (flags->draining == 1)
val |= USBPF_STATUS_DRAINING;
if (flags->started == 1)
val |= USBPF_STATUS_STARTED;
if (flags->bandwidth_reclaimed == 1)
val |= USBPF_STATUS_BW_RECLAIMED;
if (flags->control_xfr == 1)
val |= USBPF_STATUS_CONTROL_XFR;
if (flags->control_hdr == 1)
val |= USBPF_STATUS_CONTROL_HDR;
if (flags->control_act == 1)
val |= USBPF_STATUS_CONTROL_ACT;
if (flags->control_stall == 1)
val |= USBPF_STATUS_CONTROL_STALL;
if (flags->short_frames_ok == 1)
val |= USBPF_STATUS_SHORT_FRAMES_OK;
if (flags->short_xfer_ok == 1)
val |= USBPF_STATUS_SHORT_XFER_OK;
#if USB_HAVE_BUSDMA
if (flags->bdma_enable == 1)
val |= USBPF_STATUS_BDMA_ENABLE;
if (flags->bdma_no_post_sync == 1)
val |= USBPF_STATUS_BDMA_NO_POST_SYNC;
if (flags->bdma_setup == 1)
val |= USBPF_STATUS_BDMA_SETUP;
#endif
if (flags->isochronous_xfr == 1)
val |= USBPF_STATUS_ISOCHRONOUS_XFR;
if (flags->curr_dma_set == 1)
val |= USBPF_STATUS_CURR_DMA_SET;
if (flags->can_cancel_immed == 1)
val |= USBPF_STATUS_CAN_CANCEL_IMMED;
if (flags->doing_callback == 1)
val |= USBPF_STATUS_DOING_CALLBACK;
return (val);
}
static int
usbpf_xfer_frame_is_read(struct usb_xfer *xfer, uint32_t frame)
{
int isread;
if ((frame == 0) && (xfer->flags_int.control_xfr != 0) &&
(xfer->flags_int.control_hdr != 0)) {
/* special case */
if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
/* The device controller writes to memory */
isread = 1;
} else {
/* The host controller reads from memory */
isread = 0;
}
} else {
isread = USB_GET_DATA_ISREAD(xfer);
}
return (isread);
}
static uint32_t
usbpf_xfer_precompute_size(struct usb_xfer *xfer, int type)
{
uint32_t totlen;
uint32_t x;
uint32_t nframes;
if (type == USBPF_XFERTAP_SUBMIT)
nframes = xfer->nframes;
else
nframes = xfer->aframes;
totlen = USBPF_HDR_LEN + (USBPF_FRAME_HDR_LEN * nframes);
/* precompute all trace lengths */
for (x = 0; x != nframes; x++) {
if (usbpf_xfer_frame_is_read(xfer, x)) {
if (type != USBPF_XFERTAP_SUBMIT) {
totlen += USBPF_FRAME_ALIGN(
xfer->frlengths[x]);
}
} else {
if (type == USBPF_XFERTAP_SUBMIT) {
totlen += USBPF_FRAME_ALIGN(
xfer->frlengths[x]);
}
}
}
return (totlen);
}
void
usbpf_xfertap(struct usb_xfer *xfer, int type)
{
struct usb_bus *bus;
struct usbpf_pkthdr *up;
struct usbpf_framehdr *uf;
usb_frlength_t offset;
uint32_t totlen;
uint32_t frame;
uint32_t temp;
uint32_t nframes;
uint32_t x;
uint8_t *buf;
uint8_t *ptr;
bus = xfer->xroot->bus;
/* sanity checks */
if (bus->ifp == NULL || !bpf_peers_present_if(bus->ifp))
return;
totlen = usbpf_xfer_precompute_size(xfer, type);
if (type == USBPF_XFERTAP_SUBMIT)
nframes = xfer->nframes;
else
nframes = xfer->aframes;
/*
* XXX TODO XXX
*
* When BPF supports it we could pass a fragmented array of
* buffers avoiding the data copy operation here.
*/
buf = ptr = malloc(totlen, M_TEMP, M_NOWAIT);
if (buf == NULL) {
device_printf(bus->parent, "usbpf: Out of memory\n");
return;
}
up = (struct usbpf_pkthdr *)ptr;
ptr += USBPF_HDR_LEN;
/* fill out header */
temp = device_get_unit(bus->bdev);
up->up_totlen = htole32(totlen);
up->up_busunit = htole32(temp);
up->up_address = xfer->xroot->udev->device_index;
if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
up->up_mode = USBPF_MODE_DEVICE;
else
up->up_mode = USBPF_MODE_HOST;
up->up_type = type;
up->up_xfertype = xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE;
temp = usbpf_aggregate_xferflags(&xfer->flags);
up->up_flags = htole32(temp);
temp = usbpf_aggregate_status(&xfer->flags_int);
up->up_status = htole32(temp);
temp = xfer->error;
up->up_error = htole32(temp);
temp = xfer->interval;
up->up_interval = htole32(temp);
up->up_frames = htole32(nframes);
temp = xfer->max_packet_size;
up->up_packet_size = htole32(temp);
temp = xfer->max_packet_count;
up->up_packet_count = htole32(temp);
temp = xfer->endpointno;
up->up_endpoint = htole32(temp);
up->up_speed = xfer->xroot->udev->speed;
/* clear reserved area */
memset(up->up_reserved, 0, sizeof(up->up_reserved));
/* init offset and frame */
offset = 0;
frame = 0;
/* iterate all the USB frames and copy data, if any */
for (x = 0; x != nframes; x++) {
uint32_t length;
int isread;
/* get length */
length = xfer->frlengths[x];
/* get frame header pointer */
uf = (struct usbpf_framehdr *)ptr;
ptr += USBPF_FRAME_HDR_LEN;
/* fill out packet header */
uf->length = htole32(length);
uf->flags = 0;
/* get information about data read/write */
isread = usbpf_xfer_frame_is_read(xfer, x);
/* check if we need to copy any data */
if (isread) {
if (type == USBPF_XFERTAP_SUBMIT)
length = 0;
else {
uf->flags |= htole32(
USBPF_FRAMEFLAG_DATA_FOLLOWS);
}
} else {
if (type != USBPF_XFERTAP_SUBMIT)
length = 0;
else {
uf->flags |= htole32(
USBPF_FRAMEFLAG_DATA_FOLLOWS);
}
}
/* check if data is read direction */
if (isread)
uf->flags |= htole32(USBPF_FRAMEFLAG_READ);
/* copy USB data, if any */
if (length != 0) {
/* copy data */
usbd_copy_out(&xfer->frbuffers[frame],
offset, ptr, length);
/* align length */
temp = USBPF_FRAME_ALIGN(length);
/* zero pad */
if (temp != length)
memset(ptr + length, 0, temp - length);
ptr += temp;
}
if (xfer->flags_int.isochronous_xfr) {
offset += usbd_xfer_old_frame_length(xfer, x);
} else {
frame ++;
}
}
bpf_tap_if(bus->ifp, buf, totlen);
free(buf, M_TEMP);
}
diff --git a/sys/dev/vge/if_vge.c b/sys/dev/vge/if_vge.c
index 37606f28ba17..9f71049a150b 100644
--- a/sys/dev/vge/if_vge.c
+++ b/sys/dev/vge/if_vge.c
@@ -1,2933 +1,2927 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2004
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
*
* Written by Bill Paul <wpaul@windriver.com>
* Senior Networking Software Engineer
* Wind River Systems
*/
/*
* The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
* combines a tri-speed ethernet MAC and PHY, with the following
* features:
*
* o Jumbo frame support up to 16K
* o Transmit and receive flow control
* o IPv4 checksum offload
* o VLAN tag insertion and stripping
* o TCP large send
* o 64-bit multicast hash table filter
* o 64 entry CAM filter
* o 16K RX FIFO and 48K TX FIFO memory
* o Interrupt moderation
*
* The VT6122 supports up to four transmit DMA queues. The descriptors
* in the transmit ring can address up to 7 data fragments; frames which
* span more than 7 data buffers must be coalesced, but in general the
* BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
* long. The receive descriptors address only a single buffer.
*
* There are two peculiar design issues with the VT6122. One is that
* receive data buffers must be aligned on a 32-bit boundary. This is
* not a problem where the VT6122 is used as a LOM device in x86-based
* systems, but on architectures that generate unaligned access traps, we
* have to do some copying.
*
* The other issue has to do with the way 64-bit addresses are handled.
* The DMA descriptors only allow you to specify 48 bits of addressing
* information. The remaining 16 bits are specified using one of the
* I/O registers. If you only have a 32-bit system, then this isn't
* an issue, but if you have a 64-bit system and more than 4GB of
* memory, you must have to make sure your network data buffers reside
* in the same 48-bit 'segment.'
*
* Special thanks to Ryan Fu at VIA Networking for providing documentation
* and sample NICs for testing.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
MODULE_DEPEND(vge, pci, 1, 1, 1);
MODULE_DEPEND(vge, ether, 1, 1, 1);
MODULE_DEPEND(vge, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#include <dev/vge/if_vgereg.h>
#include <dev/vge/if_vgevar.h>
#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
/* Tunables */
static int msi_disable = 0;
TUNABLE_INT("hw.vge.msi_disable", &msi_disable);
/*
* The SQE error counter of MIB seems to report bogus value.
* Vendor's workaround does not seem to work on PCIe based
* controllers. Disable it until we find better workaround.
*/
#undef VGE_ENABLE_SQEERR
/*
* Various supported device vendors/types and their names.
*/
static struct vge_type vge_devs[] = {
{ VIA_VENDORID, VIA_DEVICEID_61XX,
"VIA Networking Velocity Gigabit Ethernet" },
{ 0, 0, NULL }
};
static int vge_attach(device_t);
static int vge_detach(device_t);
static int vge_probe(device_t);
static int vge_resume(device_t);
static int vge_shutdown(device_t);
static int vge_suspend(device_t);
static void vge_cam_clear(struct vge_softc *);
static int vge_cam_set(struct vge_softc *, uint8_t *);
static void vge_clrwol(struct vge_softc *);
static void vge_discard_rxbuf(struct vge_softc *, int);
static int vge_dma_alloc(struct vge_softc *);
static void vge_dma_free(struct vge_softc *);
static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
#ifdef VGE_EEPROM
static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
#endif
static int vge_encap(struct vge_softc *, struct mbuf **);
#ifndef __NO_STRICT_ALIGNMENT
static __inline void
vge_fixup_rx(struct mbuf *);
#endif
static void vge_freebufs(struct vge_softc *);
static void vge_ifmedia_sts(if_t, struct ifmediareq *);
static int vge_ifmedia_upd(if_t);
static int vge_ifmedia_upd_locked(struct vge_softc *);
static void vge_init(void *);
static void vge_init_locked(struct vge_softc *);
static void vge_intr(void *);
static void vge_intr_holdoff(struct vge_softc *);
static int vge_ioctl(if_t, u_long, caddr_t);
static void vge_link_statchg(void *);
static int vge_miibus_readreg(device_t, int, int);
static int vge_miibus_writereg(device_t, int, int, int);
static void vge_miipoll_start(struct vge_softc *);
static void vge_miipoll_stop(struct vge_softc *);
static int vge_newbuf(struct vge_softc *, int);
static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
static void vge_reset(struct vge_softc *);
static int vge_rx_list_init(struct vge_softc *);
static int vge_rxeof(struct vge_softc *, int);
static void vge_rxfilter(struct vge_softc *);
static void vge_setmedia(struct vge_softc *);
static void vge_setvlan(struct vge_softc *);
static void vge_setwol(struct vge_softc *);
static void vge_start(if_t);
static void vge_start_locked(if_t);
static void vge_stats_clear(struct vge_softc *);
static void vge_stats_update(struct vge_softc *);
static void vge_stop(struct vge_softc *);
static void vge_sysctl_node(struct vge_softc *);
static int vge_tx_list_init(struct vge_softc *);
static void vge_txeof(struct vge_softc *);
static void vge_watchdog(void *);
static device_method_t vge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, vge_probe),
DEVMETHOD(device_attach, vge_attach),
DEVMETHOD(device_detach, vge_detach),
DEVMETHOD(device_suspend, vge_suspend),
DEVMETHOD(device_resume, vge_resume),
DEVMETHOD(device_shutdown, vge_shutdown),
/* MII interface */
DEVMETHOD(miibus_readreg, vge_miibus_readreg),
DEVMETHOD(miibus_writereg, vge_miibus_writereg),
DEVMETHOD_END
};
static driver_t vge_driver = {
"vge",
vge_methods,
sizeof(struct vge_softc)
};
DRIVER_MODULE(vge, pci, vge_driver, 0, 0);
DRIVER_MODULE(miibus, vge, miibus_driver, 0, 0);
#ifdef VGE_EEPROM
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
{
int i;
uint16_t word = 0;
/*
* Enter EEPROM embedded programming mode. In order to
* access the EEPROM at all, we first have to set the
* EELOAD bit in the CHIPCFG2 register.
*/
CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
/* Select the address of the word we want to read */
CSR_WRITE_1(sc, VGE_EEADDR, addr);
/* Issue read command */
CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
/* Wait for the done bit to be set. */
for (i = 0; i < VGE_TIMEOUT; i++) {
if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
break;
}
if (i == VGE_TIMEOUT) {
device_printf(sc->vge_dev, "EEPROM read timed out\n");
*dest = 0;
return;
}
/* Read the result */
word = CSR_READ_2(sc, VGE_EERDDAT);
/* Turn off EEPROM access mode. */
CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
*dest = word;
}
#endif
/*
* Read a sequence of words from the EEPROM.
*/
static void
vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
{
int i;
#ifdef VGE_EEPROM
uint16_t word = 0, *ptr;
for (i = 0; i < cnt; i++) {
vge_eeprom_getword(sc, off + i, &word);
ptr = (uint16_t *)(dest + (i * 2));
if (swap)
*ptr = ntohs(word);
else
*ptr = word;
}
#else
for (i = 0; i < ETHER_ADDR_LEN; i++)
dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
#endif
}
static void
vge_miipoll_stop(struct vge_softc *sc)
{
int i;
CSR_WRITE_1(sc, VGE_MIICMD, 0);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
break;
}
if (i == VGE_TIMEOUT)
device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
}
static void
vge_miipoll_start(struct vge_softc *sc)
{
int i;
/* First, make sure we're idle. */
CSR_WRITE_1(sc, VGE_MIICMD, 0);
CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
break;
}
if (i == VGE_TIMEOUT) {
device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
return;
}
/* Now enable auto poll mode. */
CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
/* And make sure it started. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
break;
}
if (i == VGE_TIMEOUT)
device_printf(sc->vge_dev, "failed to start MII autopoll\n");
}
static int
vge_miibus_readreg(device_t dev, int phy, int reg)
{
struct vge_softc *sc;
int i;
uint16_t rval = 0;
sc = device_get_softc(dev);
vge_miipoll_stop(sc);
/* Specify the register we want to read. */
CSR_WRITE_1(sc, VGE_MIIADDR, reg);
/* Issue read command. */
CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
/* Wait for the read command bit to self-clear. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
break;
}
if (i == VGE_TIMEOUT)
device_printf(sc->vge_dev, "MII read timed out\n");
else
rval = CSR_READ_2(sc, VGE_MIIDATA);
vge_miipoll_start(sc);
return (rval);
}
static int
vge_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct vge_softc *sc;
int i, rval = 0;
sc = device_get_softc(dev);
vge_miipoll_stop(sc);
/* Specify the register we want to write. */
CSR_WRITE_1(sc, VGE_MIIADDR, reg);
/* Specify the data we want to write. */
CSR_WRITE_2(sc, VGE_MIIDATA, data);
/* Issue write command. */
CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
/* Wait for the write command bit to self-clear. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
break;
}
if (i == VGE_TIMEOUT) {
device_printf(sc->vge_dev, "MII write timed out\n");
rval = EIO;
}
vge_miipoll_start(sc);
return (rval);
}
static void
vge_cam_clear(struct vge_softc *sc)
{
int i;
/*
* Turn off all the mask bits. This tells the chip
* that none of the entries in the CAM filter are valid.
* desired entries will be enabled as we fill the filter in.
*/
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
for (i = 0; i < 8; i++)
CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
/* Clear the VLAN filter too. */
CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
for (i = 0; i < 8; i++)
CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
CSR_WRITE_1(sc, VGE_CAMADDR, 0);
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
sc->vge_camidx = 0;
}
static int
vge_cam_set(struct vge_softc *sc, uint8_t *addr)
{
int i, error = 0;
if (sc->vge_camidx == VGE_CAM_MAXADDRS)
return (ENOSPC);
/* Select the CAM data page. */
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
/* Set the filter entry we want to update and enable writing. */
CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
/* Write the address to the CAM registers */
for (i = 0; i < ETHER_ADDR_LEN; i++)
CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
/* Issue a write command. */
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
/* Wake for it to clear. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
break;
}
if (i == VGE_TIMEOUT) {
device_printf(sc->vge_dev, "setting CAM filter failed\n");
error = EIO;
goto fail;
}
/* Select the CAM mask page. */
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
/* Set the mask bit that enables this filter. */
CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
1<<(sc->vge_camidx & 7));
sc->vge_camidx++;
fail:
/* Turn off access to CAM. */
CSR_WRITE_1(sc, VGE_CAMADDR, 0);
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
return (error);
}
static void
vge_setvlan(struct vge_softc *sc)
{
if_t ifp;
uint8_t cfg;
VGE_LOCK_ASSERT(sc);
ifp = sc->vge_ifp;
cfg = CSR_READ_1(sc, VGE_RXCFG);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
cfg |= VGE_VTAG_OPT2;
else
cfg &= ~VGE_VTAG_OPT2;
CSR_WRITE_1(sc, VGE_RXCFG, cfg);
}
static u_int
vge_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct vge_softc *sc = arg;
if (sc->vge_camidx == VGE_CAM_MAXADDRS)
return (0);
(void )vge_cam_set(sc, LLADDR(sdl));
return (1);
}
static u_int
vge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t h, *hashes = arg;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
/*
* Program the multicast filter. We use the 64-entry CAM filter
* for perfect filtering. If there's more than 64 multicast addresses,
* we use the hash filter instead.
*/
static void
vge_rxfilter(struct vge_softc *sc)
{
if_t ifp;
uint32_t hashes[2];
uint8_t rxcfg;
VGE_LOCK_ASSERT(sc);
/* First, zot all the multicast entries. */
hashes[0] = 0;
hashes[1] = 0;
rxcfg = CSR_READ_1(sc, VGE_RXCTL);
rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
VGE_RXCTL_RX_PROMISC);
/*
* Always allow VLAN oversized frames and frames for
* this host.
*/
rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
ifp = sc->vge_ifp;
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
rxcfg |= VGE_RXCTL_RX_BCAST;
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
rxcfg |= VGE_RXCTL_RX_PROMISC;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
hashes[0] = 0xFFFFFFFF;
hashes[1] = 0xFFFFFFFF;
}
goto done;
}
vge_cam_clear(sc);
/* Now program new ones */
if_foreach_llmaddr(ifp, vge_set_maddr, sc);
/* If there were too many addresses, use the hash filter. */
if (sc->vge_camidx == VGE_CAM_MAXADDRS) {
vge_cam_clear(sc);
if_foreach_llmaddr(ifp, vge_hash_maddr, hashes);
}
done:
if (hashes[0] != 0 || hashes[1] != 0)
rxcfg |= VGE_RXCTL_RX_MCAST;
CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
}
static void
vge_reset(struct vge_softc *sc)
{
int i;
CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(5);
if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
break;
}
if (i == VGE_TIMEOUT) {
device_printf(sc->vge_dev, "soft reset timed out\n");
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
DELAY(2000);
}
DELAY(5000);
}
/*
* Probe for a VIA gigabit chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
vge_probe(device_t dev)
{
struct vge_type *t;
t = vge_devs;
while (t->vge_name != NULL) {
if ((pci_get_vendor(dev) == t->vge_vid) &&
(pci_get_device(dev) == t->vge_did)) {
device_set_desc(dev, t->vge_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
/*
* Map a single buffer address.
*/
struct vge_dmamap_arg {
bus_addr_t vge_busaddr;
};
static void
vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct vge_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct vge_dmamap_arg *)arg;
ctx->vge_busaddr = segs[0].ds_addr;
}
static int
vge_dma_alloc(struct vge_softc *sc)
{
struct vge_dmamap_arg ctx;
struct vge_txdesc *txd;
struct vge_rxdesc *rxd;
bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
int error, i;
/*
* It seems old PCI controllers do not support DAC. DAC
* configuration can be enabled by accessing VGE_CHIPCFG3
* register but honor EEPROM configuration instead of
* blindly overriding DAC configuration. PCIe based
* controllers are supposed to support 64bit DMA so enable
* 64bit DMA on these controllers.
*/
if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
lowaddr = BUS_SPACE_MAXADDR;
else
lowaddr = BUS_SPACE_MAXADDR_32BIT;
again:
/* Create parent ring tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
1, 0, /* algnmnt, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vge_cdata.vge_ring_tag);
if (error != 0) {
device_printf(sc->vge_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
VGE_TX_LIST_SZ, /* maxsize */
1, /* nsegments */
VGE_TX_LIST_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vge_cdata.vge_tx_ring_tag);
if (error != 0) {
device_printf(sc->vge_dev,
"could not allocate Tx ring DMA tag.\n");
goto fail;
}
/* Create tag for Rx ring. */
error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
VGE_RX_LIST_SZ, /* maxsize */
1, /* nsegments */
VGE_RX_LIST_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vge_cdata.vge_rx_ring_tag);
if (error != 0) {
device_printf(sc->vge_dev,
"could not allocate Rx ring DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
(void **)&sc->vge_rdata.vge_tx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->vge_cdata.vge_tx_ring_map);
if (error != 0) {
device_printf(sc->vge_dev,
"could not allocate DMA'able memory for Tx ring.\n");
goto fail;
}
ctx.vge_busaddr = 0;
error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0 || ctx.vge_busaddr == 0) {
device_printf(sc->vge_dev,
"could not load DMA'able memory for Tx ring.\n");
goto fail;
}
sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx ring. */
error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
(void **)&sc->vge_rdata.vge_rx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->vge_cdata.vge_rx_ring_map);
if (error != 0) {
device_printf(sc->vge_dev,
"could not allocate DMA'able memory for Rx ring.\n");
goto fail;
}
ctx.vge_busaddr = 0;
error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0 || ctx.vge_busaddr == 0) {
device_printf(sc->vge_dev,
"could not load DMA'able memory for Rx ring.\n");
goto fail;
}
sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
/* Tx/Rx descriptor queue should reside within 4GB boundary. */
tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
if ((VGE_ADDR_HI(tx_ring_end) !=
VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
(VGE_ADDR_HI(rx_ring_end) !=
VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
device_printf(sc->vge_dev, "4GB boundary crossed, "
"switching to 32bit DMA address mode.\n");
vge_dma_free(sc);
/* Limit DMA address space to 32bit and try again. */
lowaddr = BUS_SPACE_MAXADDR_32BIT;
goto again;
}
if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
lowaddr = VGE_BUF_DMA_MAXADDR;
else
lowaddr = BUS_SPACE_MAXADDR_32BIT;
/* Create parent buffer tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
1, 0, /* algnmnt, boundary */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vge_cdata.vge_buffer_tag);
if (error != 0) {
device_printf(sc->vge_dev,
"could not create parent buffer DMA tag.\n");
goto fail;
}
/* Create tag for Tx buffers. */
error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES * VGE_MAXTXSEGS, /* maxsize */
VGE_MAXTXSEGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vge_cdata.vge_tx_tag);
if (error != 0) {
device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
goto fail;
}
/* Create tag for Rx buffers. */
error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vge_cdata.vge_rx_tag);
if (error != 0) {
device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
goto fail;
}
/* Create DMA maps for Tx buffers. */
for (i = 0; i < VGE_TX_DESC_CNT; i++) {
txd = &sc->vge_cdata.vge_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->vge_dev,
"could not create Tx dmamap.\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
&sc->vge_cdata.vge_rx_sparemap)) != 0) {
device_printf(sc->vge_dev,
"could not create spare Rx dmamap.\n");
goto fail;
}
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
rxd = &sc->vge_cdata.vge_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->vge_dev,
"could not create Rx dmamap.\n");
goto fail;
}
}
fail:
return (error);
}
static void
vge_dma_free(struct vge_softc *sc)
{
struct vge_txdesc *txd;
struct vge_rxdesc *rxd;
int i;
/* Tx ring. */
if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
if (sc->vge_rdata.vge_tx_ring_paddr)
bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
sc->vge_cdata.vge_tx_ring_map);
if (sc->vge_rdata.vge_tx_ring)
bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
sc->vge_rdata.vge_tx_ring,
sc->vge_cdata.vge_tx_ring_map);
sc->vge_rdata.vge_tx_ring = NULL;
sc->vge_rdata.vge_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
sc->vge_cdata.vge_tx_ring_tag = NULL;
}
/* Rx ring. */
if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
if (sc->vge_rdata.vge_rx_ring_paddr)
bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
sc->vge_cdata.vge_rx_ring_map);
if (sc->vge_rdata.vge_rx_ring)
bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
sc->vge_rdata.vge_rx_ring,
sc->vge_cdata.vge_rx_ring_map);
sc->vge_rdata.vge_rx_ring = NULL;
sc->vge_rdata.vge_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
sc->vge_cdata.vge_rx_ring_tag = NULL;
}
/* Tx buffers. */
if (sc->vge_cdata.vge_tx_tag != NULL) {
for (i = 0; i < VGE_TX_DESC_CNT; i++) {
txd = &sc->vge_cdata.vge_txdesc[i];
if (txd->tx_dmamap != NULL) {
bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
sc->vge_cdata.vge_tx_tag = NULL;
}
/* Rx buffers. */
if (sc->vge_cdata.vge_rx_tag != NULL) {
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
rxd = &sc->vge_cdata.vge_rxdesc[i];
if (rxd->rx_dmamap != NULL) {
bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc->vge_cdata.vge_rx_sparemap != NULL) {
bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
sc->vge_cdata.vge_rx_sparemap);
sc->vge_cdata.vge_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
sc->vge_cdata.vge_rx_tag = NULL;
}
if (sc->vge_cdata.vge_buffer_tag != NULL) {
bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
sc->vge_cdata.vge_buffer_tag = NULL;
}
if (sc->vge_cdata.vge_ring_tag != NULL) {
bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
sc->vge_cdata.vge_ring_tag = NULL;
}
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
vge_attach(device_t dev)
{
u_char eaddr[ETHER_ADDR_LEN];
struct vge_softc *sc;
if_t ifp;
int error = 0, cap, i, msic, rid;
sc = device_get_softc(dev);
sc->vge_dev = dev;
mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
rid = PCIR_BAR(1);
sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->vge_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
sc->vge_flags |= VGE_FLAG_PCIE;
sc->vge_expcap = cap;
} else
sc->vge_flags |= VGE_FLAG_JUMBO;
if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
sc->vge_flags |= VGE_FLAG_PMCAP;
sc->vge_pmcap = cap;
}
rid = 0;
msic = pci_msi_count(dev);
if (msi_disable == 0 && msic > 0) {
msic = 1;
if (pci_alloc_msi(dev, &msic) == 0) {
if (msic == 1) {
sc->vge_flags |= VGE_FLAG_MSI;
device_printf(dev, "Using %d MSI message\n",
msic);
rid = 1;
} else
pci_release_msi(dev);
}
}
/* Allocate interrupt */
sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
if (sc->vge_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
/* Reset the adapter. */
vge_reset(sc);
/* Reload EEPROM. */
CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(5);
if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
break;
}
if (i == VGE_TIMEOUT)
device_printf(dev, "EEPROM reload timed out\n");
/*
* Clear PACPI as EEPROM reload will set the bit. Otherwise
* MAC will receive magic packet which in turn confuses
* controller.
*/
CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
/*
* Get station address from the EEPROM.
*/
vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
/*
* Save configured PHY address.
* It seems the PHY address of PCIe controllers just
* reflects media jump strapping status so we assume the
* internal PHY address of PCIe controller is at 1.
*/
if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
sc->vge_phyaddr = 1;
else
sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
VGE_MIICFG_PHYADDR;
/* Clear WOL and take hardware from powerdown. */
vge_clrwol(sc);
vge_sysctl_node(sc);
error = vge_dma_alloc(sc);
if (error)
goto fail;
ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
-
vge_miipoll_start(sc);
/* Do MII setup */
error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY,
MIIF_DOPAUSE);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, vge_ioctl);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
if_setstartfn(ifp, vge_start);
if_sethwassist(ifp, VGE_CSUM_FEATURES);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
IFCAP_VLAN_HWTAGGING, 0);
if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
if_setinitfn(ifp, vge_init);
if_setsendqlen(ifp, VGE_TX_DESC_CNT - 1);
if_setsendqready(ifp);
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
NULL, vge_intr, sc, &sc->vge_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
vge_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
vge_detach(device_t dev)
{
struct vge_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
ifp = sc->vge_ifp;
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
ether_ifdetach(ifp);
VGE_LOCK(sc);
vge_stop(sc);
VGE_UNLOCK(sc);
callout_drain(&sc->vge_watchdog);
}
if (sc->vge_miibus)
device_delete_child(dev, sc->vge_miibus);
bus_generic_detach(dev);
if (sc->vge_intrhand)
bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
if (sc->vge_irq)
bus_release_resource(dev, SYS_RES_IRQ,
sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
if (sc->vge_flags & VGE_FLAG_MSI)
pci_release_msi(dev);
if (sc->vge_res)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(1), sc->vge_res);
if (ifp)
if_free(ifp);
vge_dma_free(sc);
mtx_destroy(&sc->vge_mtx);
return (0);
}
static void
vge_discard_rxbuf(struct vge_softc *sc, int prod)
{
struct vge_rxdesc *rxd;
int i;
rxd = &sc->vge_cdata.vge_rxdesc[prod];
rxd->rx_desc->vge_sts = 0;
rxd->rx_desc->vge_ctl = 0;
/*
* Note: the manual fails to document the fact that for
* proper operation, the driver needs to replentish the RX
* DMA ring 4 descriptors at a time (rather than one at a
* time, like most chips). We can allocate the new buffers
* but we should not set the OWN bits until we're ready
* to hand back 4 of them in one shot.
*/
if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
for (i = VGE_RXCHUNK; i > 0; i--) {
rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
rxd = rxd->rxd_prev;
}
sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
}
}
static int
vge_newbuf(struct vge_softc *sc, int prod)
{
struct vge_rxdesc *rxd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int i, nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
/*
* This is part of an evil trick to deal with strict-alignment
* architectures. The VIA chip requires RX buffers to be aligned
* on 32-bit boundaries, but that will hose strict-alignment
* architectures. To get around this, we leave some empty space
* at the start of each buffer and for non-strict-alignment hosts,
* we copy the buffer back two bytes to achieve word alignment.
* This is slightly more efficient than allocating a new buffer,
* copying the contents, and discarding the old buffer.
*/
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, VGE_RX_BUF_ALIGN);
if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc->vge_cdata.vge_rxdesc[prod];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
sc->vge_cdata.vge_rx_sparemap = map;
bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
rxd->rx_desc->vge_sts = 0;
rxd->rx_desc->vge_ctl = 0;
rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
(VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
/*
* Note: the manual fails to document the fact that for
* proper operation, the driver needs to replenish the RX
* DMA ring 4 descriptors at a time (rather than one at a
* time, like most chips). We can allocate the new buffers
* but we should not set the OWN bits until we're ready
* to hand back 4 of them in one shot.
*/
if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
for (i = VGE_RXCHUNK; i > 0; i--) {
rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
rxd = rxd->rxd_prev;
}
sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
}
return (0);
}
static int
vge_tx_list_init(struct vge_softc *sc)
{
struct vge_ring_data *rd;
struct vge_txdesc *txd;
int i;
VGE_LOCK_ASSERT(sc);
sc->vge_cdata.vge_tx_prodidx = 0;
sc->vge_cdata.vge_tx_considx = 0;
sc->vge_cdata.vge_tx_cnt = 0;
rd = &sc->vge_rdata;
bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
for (i = 0; i < VGE_TX_DESC_CNT; i++) {
txd = &sc->vge_cdata.vge_txdesc[i];
txd->tx_m = NULL;
txd->tx_desc = &rd->vge_tx_ring[i];
}
bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
sc->vge_cdata.vge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static int
vge_rx_list_init(struct vge_softc *sc)
{
struct vge_ring_data *rd;
struct vge_rxdesc *rxd;
int i;
VGE_LOCK_ASSERT(sc);
sc->vge_cdata.vge_rx_prodidx = 0;
sc->vge_cdata.vge_head = NULL;
sc->vge_cdata.vge_tail = NULL;
sc->vge_cdata.vge_rx_commit = 0;
rd = &sc->vge_rdata;
bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
rxd = &sc->vge_cdata.vge_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_desc = &rd->vge_rx_ring[i];
if (i == 0)
rxd->rxd_prev =
&sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
else
rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
if (vge_newbuf(sc, i) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
sc->vge_cdata.vge_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->vge_cdata.vge_rx_commit = 0;
return (0);
}
static void
vge_freebufs(struct vge_softc *sc)
{
struct vge_txdesc *txd;
struct vge_rxdesc *rxd;
if_t ifp;
int i;
VGE_LOCK_ASSERT(sc);
ifp = sc->vge_ifp;
/*
* Free RX and TX mbufs still in the queues.
*/
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
rxd = &sc->vge_cdata.vge_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < VGE_TX_DESC_CNT; i++) {
txd = &sc->vge_cdata.vge_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
}
}
#ifndef __NO_STRICT_ALIGNMENT
static __inline void
vge_fixup_rx(struct mbuf *m)
{
int i;
uint16_t *src, *dst;
src = mtod(m, uint16_t *);
dst = src - 1;
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= ETHER_ALIGN;
}
#endif
/*
* RX handler. We support the reception of jumbo frames that have
* been fragmented across multiple 2K mbuf cluster buffers.
*/
static int
vge_rxeof(struct vge_softc *sc, int count)
{
struct mbuf *m;
if_t ifp;
int prod, prog, total_len;
struct vge_rxdesc *rxd;
struct vge_rx_desc *cur_rx;
uint32_t rxstat, rxctl;
VGE_LOCK_ASSERT(sc);
ifp = sc->vge_ifp;
bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
sc->vge_cdata.vge_rx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
prod = sc->vge_cdata.vge_rx_prodidx;
for (prog = 0; count > 0 &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0;
VGE_RX_DESC_INC(prod)) {
cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
rxstat = le32toh(cur_rx->vge_sts);
if ((rxstat & VGE_RDSTS_OWN) != 0)
break;
count--;
prog++;
rxctl = le32toh(cur_rx->vge_ctl);
total_len = VGE_RXBYTES(rxstat);
rxd = &sc->vge_cdata.vge_rxdesc[prod];
m = rxd->rx_m;
/*
* If the 'start of frame' bit is set, this indicates
* either the first fragment in a multi-fragment receive,
* or an intermediate fragment. Either way, we want to
* accumulate the buffers.
*/
if ((rxstat & VGE_RXPKT_SOF) != 0) {
if (vge_newbuf(sc, prod) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
VGE_CHAIN_RESET(sc);
vge_discard_rxbuf(sc, prod);
continue;
}
m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
if (sc->vge_cdata.vge_head == NULL) {
sc->vge_cdata.vge_head = m;
sc->vge_cdata.vge_tail = m;
} else {
m->m_flags &= ~M_PKTHDR;
sc->vge_cdata.vge_tail->m_next = m;
sc->vge_cdata.vge_tail = m;
}
continue;
}
/*
* Bad/error frames will have the RXOK bit cleared.
* However, there's one error case we want to allow:
* if a VLAN tagged frame arrives and the chip can't
* match it against the CAM filter, it considers this
* a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
* We don't want to drop the frame though: our VLAN
* filtering is done in software.
* We also want to receive bad-checksummed frames and
* and frames with bad-length.
*/
if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
(rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
VGE_RDSTS_CSUMERR)) == 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
/*
* If this is part of a multi-fragment packet,
* discard all the pieces.
*/
VGE_CHAIN_RESET(sc);
vge_discard_rxbuf(sc, prod);
continue;
}
if (vge_newbuf(sc, prod) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
VGE_CHAIN_RESET(sc);
vge_discard_rxbuf(sc, prod);
continue;
}
/* Chain received mbufs. */
if (sc->vge_cdata.vge_head != NULL) {
m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
/*
* Special case: if there's 4 bytes or less
* in this buffer, the mbuf can be discarded:
* the last 4 bytes is the CRC, which we don't
* care about anyway.
*/
if (m->m_len <= ETHER_CRC_LEN) {
sc->vge_cdata.vge_tail->m_len -=
(ETHER_CRC_LEN - m->m_len);
m_freem(m);
} else {
m->m_len -= ETHER_CRC_LEN;
m->m_flags &= ~M_PKTHDR;
sc->vge_cdata.vge_tail->m_next = m;
}
m = sc->vge_cdata.vge_head;
m->m_flags |= M_PKTHDR;
m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
} else {
m->m_flags |= M_PKTHDR;
m->m_pkthdr.len = m->m_len =
(total_len - ETHER_CRC_LEN);
}
#ifndef __NO_STRICT_ALIGNMENT
vge_fixup_rx(m);
#endif
m->m_pkthdr.rcvif = ifp;
/* Do RX checksumming if enabled */
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
(rxctl & VGE_RDCTL_FRAG) == 0) {
/* Check IP header checksum */
if ((rxctl & VGE_RDCTL_IPPKT) != 0)
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
/* Check TCP/UDP checksum */
if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
rxctl & VGE_RDCTL_PROTOCSUMOK) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
if ((rxstat & VGE_RDSTS_VTAG) != 0) {
/*
* The 32-bit rxctl register is stored in little-endian.
* However, the 16-bit vlan tag is stored in big-endian,
* so we have to byte swap it.
*/
m->m_pkthdr.ether_vtag =
bswap16(rxctl & VGE_RDCTL_VLANID);
m->m_flags |= M_VLANTAG;
}
VGE_UNLOCK(sc);
if_input(ifp, m);
VGE_LOCK(sc);
sc->vge_cdata.vge_head = NULL;
sc->vge_cdata.vge_tail = NULL;
}
if (prog > 0) {
sc->vge_cdata.vge_rx_prodidx = prod;
bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
sc->vge_cdata.vge_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Update residue counter. */
if (sc->vge_cdata.vge_rx_commit != 0) {
CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
sc->vge_cdata.vge_rx_commit);
sc->vge_cdata.vge_rx_commit = 0;
}
}
return (prog);
}
static void
vge_txeof(struct vge_softc *sc)
{
if_t ifp;
struct vge_tx_desc *cur_tx;
struct vge_txdesc *txd;
uint32_t txstat;
int cons, prod;
VGE_LOCK_ASSERT(sc);
ifp = sc->vge_ifp;
if (sc->vge_cdata.vge_tx_cnt == 0)
return;
bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
sc->vge_cdata.vge_tx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* Go through our tx list and free mbufs for those
* frames that have been transmitted.
*/
cons = sc->vge_cdata.vge_tx_considx;
prod = sc->vge_cdata.vge_tx_prodidx;
for (; cons != prod; VGE_TX_DESC_INC(cons)) {
cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
txstat = le32toh(cur_tx->vge_sts);
if ((txstat & VGE_TDSTS_OWN) != 0)
break;
sc->vge_cdata.vge_tx_cnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
txd = &sc->vge_cdata.vge_txdesc[cons];
bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
__func__));
m_freem(txd->tx_m);
txd->tx_m = NULL;
txd->tx_desc->vge_frag[0].vge_addrhi = 0;
}
bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
sc->vge_cdata.vge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->vge_cdata.vge_tx_considx = cons;
if (sc->vge_cdata.vge_tx_cnt == 0)
sc->vge_timer = 0;
}
static void
vge_link_statchg(void *xsc)
{
struct vge_softc *sc;
if_t ifp;
uint8_t physts;
sc = xsc;
ifp = sc->vge_ifp;
VGE_LOCK_ASSERT(sc);
physts = CSR_READ_1(sc, VGE_PHYSTS0);
if ((physts & VGE_PHYSTS_RESETSTS) == 0) {
if ((physts & VGE_PHYSTS_LINK) == 0) {
sc->vge_flags &= ~VGE_FLAG_LINK;
if_link_state_change(sc->vge_ifp,
LINK_STATE_DOWN);
} else {
sc->vge_flags |= VGE_FLAG_LINK;
if_link_state_change(sc->vge_ifp,
LINK_STATE_UP);
CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |
VGE_CR2_FDX_RXFLOWCTL_ENABLE);
if ((physts & VGE_PHYSTS_FDX) != 0) {
if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0)
CSR_WRITE_1(sc, VGE_CRS2,
VGE_CR2_FDX_TXFLOWCTL_ENABLE);
if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0)
CSR_WRITE_1(sc, VGE_CRS2,
VGE_CR2_FDX_RXFLOWCTL_ENABLE);
}
if (!if_sendq_empty(ifp))
vge_start_locked(ifp);
}
}
/*
* Restart MII auto-polling because link state change interrupt
* will disable it.
*/
vge_miipoll_start(sc);
}
#ifdef DEVICE_POLLING
static int
vge_poll (if_t ifp, enum poll_cmd cmd, int count)
{
struct vge_softc *sc = if_getsoftc(ifp);
int rx_npkts = 0;
VGE_LOCK(sc);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
goto done;
rx_npkts = vge_rxeof(sc, count);
vge_txeof(sc);
if (!if_sendq_empty(ifp))
vge_start_locked(ifp);
if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
uint32_t status;
status = CSR_READ_4(sc, VGE_ISR);
if (status == 0xFFFFFFFF)
goto done;
if (status)
CSR_WRITE_4(sc, VGE_ISR, status);
/*
* XXX check behaviour on receiver stalls.
*/
if (status & VGE_ISR_TXDMA_STALL ||
status & VGE_ISR_RXDMA_STALL) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vge_init_locked(sc);
}
if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
vge_rxeof(sc, count);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
}
}
done:
VGE_UNLOCK(sc);
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
vge_intr(void *arg)
{
struct vge_softc *sc;
if_t ifp;
uint32_t status;
sc = arg;
VGE_LOCK(sc);
ifp = sc->vge_ifp;
if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
(if_getflags(ifp) & IFF_UP) == 0) {
VGE_UNLOCK(sc);
return;
}
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
status = CSR_READ_4(sc, VGE_ISR);
CSR_WRITE_4(sc, VGE_ISR, status);
if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0)
vge_link_statchg(sc);
VGE_UNLOCK(sc);
return;
}
#endif
/* Disable interrupts */
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
status = CSR_READ_4(sc, VGE_ISR);
CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
/* If the card has gone away the read returns 0xffff. */
if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
goto done;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
vge_rxeof(sc, VGE_RX_DESC_CNT);
if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
vge_rxeof(sc, VGE_RX_DESC_CNT);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
}
if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
vge_txeof(sc);
if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vge_init_locked(sc);
}
if (status & VGE_ISR_LINKSTS)
vge_link_statchg(sc);
}
done:
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
/* Re-enable interrupts */
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
if (!if_sendq_empty(ifp))
vge_start_locked(ifp);
}
VGE_UNLOCK(sc);
}
static int
vge_encap(struct vge_softc *sc, struct mbuf **m_head)
{
struct vge_txdesc *txd;
struct vge_tx_frag *frag;
struct mbuf *m;
bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
int error, i, nsegs, padlen;
uint32_t cflags;
VGE_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
/* Argh. This chip does not autopad short frames. */
if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
m = *m_head;
padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
if (M_TRAILINGSPACE(m) < padlen) {
m = m_defrag(m, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
}
/*
* Manually pad short frames, and zero the pad space
* to avoid leaking data.
*/
bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
m->m_pkthdr.len += padlen;
m->m_len = m->m_pkthdr.len;
*m_head = m;
}
txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOMEM);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
m = *m_head;
cflags = 0;
/* Configure checksum offload. */
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
cflags |= VGE_TDCTL_IPCSUM;
if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
cflags |= VGE_TDCTL_TCPCSUM;
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
cflags |= VGE_TDCTL_UDPCSUM;
/* Configure VLAN. */
if ((m->m_flags & M_VLANTAG) != 0)
cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
/*
* XXX
* Velocity family seems to support TSO but no information
* for MSS configuration is available. Also the number of
* fragments supported by a descriptor is too small to hold
* entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
* VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
* longer chain of buffers but no additional information is
* available.
*
* When telling the chip how many segments there are, we
* must use nsegs + 1 instead of just nsegs. Darned if I
* know why. This also means we can't use the last fragment
* field of Tx descriptor.
*/
txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
VGE_TD_LS_NORM);
for (i = 0; i < nsegs; i++) {
frag = &txd->tx_desc->vge_frag[i];
frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
(VGE_BUFLEN(txsegs[i].ds_len) << 16));
}
sc->vge_cdata.vge_tx_cnt++;
VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
/*
* Finally request interrupt and give the first descriptor
* ownership to hardware.
*/
txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
txd->tx_m = m;
return (0);
}
/*
* Main transmit routine.
*/
static void
vge_start(if_t ifp)
{
struct vge_softc *sc;
sc = if_getsoftc(ifp);
VGE_LOCK(sc);
vge_start_locked(ifp);
VGE_UNLOCK(sc);
}
static void
vge_start_locked(if_t ifp)
{
struct vge_softc *sc;
struct vge_txdesc *txd;
struct mbuf *m_head;
int enq, idx;
sc = if_getsoftc(ifp);
VGE_LOCK_ASSERT(sc);
if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
(if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
idx = sc->vge_cdata.vge_tx_prodidx;
VGE_TX_DESC_DEC(idx);
for (enq = 0; !if_sendq_empty(ifp) &&
sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (vge_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
txd = &sc->vge_cdata.vge_txdesc[idx];
txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
VGE_TX_DESC_INC(idx);
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
sc->vge_cdata.vge_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Issue a transmit command. */
CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->vge_timer = 5;
}
}
static void
vge_init(void *xsc)
{
struct vge_softc *sc = xsc;
VGE_LOCK(sc);
vge_init_locked(sc);
VGE_UNLOCK(sc);
}
static void
vge_init_locked(struct vge_softc *sc)
{
if_t ifp = sc->vge_ifp;
int error, i;
VGE_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
vge_stop(sc);
vge_reset(sc);
vge_miipoll_start(sc);
/*
* Initialize the RX and TX descriptors and mbufs.
*/
error = vge_rx_list_init(sc);
if (error != 0) {
device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
return;
}
vge_tx_list_init(sc);
/* Clear MAC statistics. */
vge_stats_clear(sc);
/* Set our station address */
for (i = 0; i < ETHER_ADDR_LEN; i++)
CSR_WRITE_1(sc, VGE_PAR0 + i, if_getlladdr(sc->vge_ifp)[i]);
/*
* Set receive FIFO threshold. Also allow transmission and
* reception of VLAN tagged frames.
*/
CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
/* Set DMA burst length */
CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
/* Set collision backoff algorithm */
CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
/* Disable LPSEL field in priority resolution */
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
/*
* Load the addresses of the DMA queues into the chip.
* Note that we only use one transmit queue.
*/
CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
/* Configure interrupt moderation. */
vge_intr_holdoff(sc);
/* Enable and wake up the RX descriptor queue */
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
/* Enable the TX descriptor queue */
CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
/* Init the cam filter. */
vge_cam_clear(sc);
/* Set up receiver filter. */
vge_rxfilter(sc);
vge_setvlan(sc);
/* Initialize pause timer. */
CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF);
/*
* Initialize flow control parameters.
* TX XON high threshold : 48
* TX pause low threshold : 24
* Disable hald-duplex flow control
*/
CSR_WRITE_1(sc, VGE_CRC2, 0xFF);
CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B);
/* Enable jumbo frame reception (if desired) */
/* Start the MAC. */
CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
CSR_WRITE_1(sc, VGE_CRS0,
VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
#ifdef DEVICE_POLLING
/*
* Disable interrupts except link state change if we are polling.
*/
if (if_getcapenable(ifp) & IFCAP_POLLING) {
CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
} else /* otherwise ... */
#endif
{
/*
* Enable interrupts.
*/
CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
}
CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
sc->vge_flags &= ~VGE_FLAG_LINK;
vge_ifmedia_upd_locked(sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
}
/*
* Set media options.
*/
static int
vge_ifmedia_upd(if_t ifp)
{
struct vge_softc *sc;
int error;
sc = if_getsoftc(ifp);
VGE_LOCK(sc);
error = vge_ifmedia_upd_locked(sc);
VGE_UNLOCK(sc);
return (error);
}
static int
vge_ifmedia_upd_locked(struct vge_softc *sc)
{
struct mii_data *mii;
struct mii_softc *miisc;
int error;
mii = device_get_softc(sc->vge_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
vge_setmedia(sc);
error = mii_mediachg(mii);
return (error);
}
/*
* Report current media status.
*/
static void
vge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct vge_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->vge_miibus);
VGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
VGE_UNLOCK(sc);
return;
}
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
VGE_UNLOCK(sc);
}
static void
vge_setmedia(struct vge_softc *sc)
{
struct mii_data *mii;
struct ifmedia_entry *ife;
mii = device_get_softc(sc->vge_miibus);
ife = mii->mii_media.ifm_cur;
/*
* If the user manually selects a media mode, we need to turn
* on the forced MAC mode bit in the DIAGCTL register. If the
* user happens to choose a full duplex mode, we also need to
* set the 'force full duplex' bit. This applies only to
* 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
* mode is disabled, and in 1000baseT mode, full duplex is
* always implied, so we turn on the forced mode bit but leave
* the FDX bit cleared.
*/
switch (IFM_SUBTYPE(ife->ifm_media)) {
case IFM_AUTO:
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
break;
case IFM_1000_T:
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
break;
case IFM_100_TX:
case IFM_10_T:
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
} else {
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
}
break;
default:
device_printf(sc->vge_dev, "unknown media type: %x\n",
IFM_SUBTYPE(ife->ifm_media));
break;
}
}
static int
vge_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct vge_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int error = 0, mask;
switch (command) {
case SIOCSIFMTU:
VGE_LOCK(sc);
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
error = EINVAL;
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
if (ifr->ifr_mtu > ETHERMTU &&
(sc->vge_flags & VGE_FLAG_JUMBO) == 0)
error = EINVAL;
else
if_setmtu(ifp, ifr->ifr_mtu);
}
VGE_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
VGE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->vge_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
vge_rxfilter(sc);
else
vge_init_locked(sc);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
vge_stop(sc);
sc->vge_if_flags = if_getflags(ifp);
VGE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
VGE_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
vge_rxfilter(sc);
VGE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->vge_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(vge_poll, ifp);
if (error)
return (error);
VGE_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
VGE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
VGE_LOCK(sc);
CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
VGE_UNLOCK(sc);
}
}
#endif /* DEVICE_POLLING */
VGE_LOCK(sc);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, VGE_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, VGE_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_WOL_UCAST) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_UCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_UCAST);
if ((mask & IFCAP_WOL_MCAST) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
vge_setvlan(sc);
}
VGE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
vge_watchdog(void *arg)
{
struct vge_softc *sc;
if_t ifp;
sc = arg;
VGE_LOCK_ASSERT(sc);
vge_stats_update(sc);
callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
if (sc->vge_timer == 0 || --sc->vge_timer > 0)
return;
ifp = sc->vge_ifp;
if_printf(ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
vge_txeof(sc);
vge_rxeof(sc, VGE_RX_DESC_CNT);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vge_init_locked(sc);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
vge_stop(struct vge_softc *sc)
{
if_t ifp;
VGE_LOCK_ASSERT(sc);
ifp = sc->vge_ifp;
sc->vge_timer = 0;
callout_stop(&sc->vge_watchdog);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
vge_stats_update(sc);
VGE_CHAIN_RESET(sc);
vge_txeof(sc);
vge_freebufs(sc);
}
/*
* Device suspend routine. Stop the interface and save some PCI
* settings in case the BIOS doesn't restore them properly on
* resume.
*/
static int
vge_suspend(device_t dev)
{
struct vge_softc *sc;
sc = device_get_softc(dev);
VGE_LOCK(sc);
vge_stop(sc);
vge_setwol(sc);
sc->vge_flags |= VGE_FLAG_SUSPENDED;
VGE_UNLOCK(sc);
return (0);
}
/*
* Device resume routine. Restore some PCI settings in case the BIOS
* doesn't, re-enable busmastering, and restart the interface if
* appropriate.
*/
static int
vge_resume(device_t dev)
{
struct vge_softc *sc;
if_t ifp;
uint16_t pmstat;
sc = device_get_softc(dev);
VGE_LOCK(sc);
if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
/* Disable PME and clear PME status. */
pmstat = pci_read_config(sc->vge_dev,
sc->vge_pmcap + PCIR_POWER_STATUS, 2);
if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->vge_dev,
sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
}
}
vge_clrwol(sc);
/* Restart MII auto-polling. */
vge_miipoll_start(sc);
ifp = sc->vge_ifp;
/* Reinitialize interface if necessary. */
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vge_init_locked(sc);
}
sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
VGE_UNLOCK(sc);
return (0);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
vge_shutdown(device_t dev)
{
return (vge_suspend(dev));
}
#define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
static void
vge_sysctl_node(struct vge_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct vge_hw_stats *stats;
stats = &sc->vge_stats;
ctx = device_get_sysctl_ctx(sc->vge_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
/* Pull in device tunables. */
sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
resource_int_value(device_get_name(sc->vge_dev),
device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
resource_int_value(device_get_name(sc->vge_dev),
device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
resource_int_value(device_get_name(sc->vge_dev),
device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VGE statistics");
parent = SYSCTL_CHILDREN(tree);
/* Rx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
child = SYSCTL_CHILDREN(tree);
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
&stats->rx_frames, "frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->rx_good_frames, "Good frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
&stats->rx_fifo_oflows, "FIFO overflows");
VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
&stats->rx_runts, "Too short frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
&stats->rx_runts_errs, "Too short frames with errors");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
&stats->rx_pkts_64, "64 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
&stats->rx_pkts_65_127, "65 to 127 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
&stats->rx_pkts_128_255, "128 to 255 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
&stats->rx_pkts_256_511, "256 to 511 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
&stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
&stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
&stats->rx_pkts_1519_max, "1519 to max frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
&stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
&stats->rx_jumbos, "Jumbo frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
&stats->rx_crcerrs, "CRC errors");
VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->rx_pause_frames, "Pause frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
&stats->rx_alignerrs, "Alignment errors");
VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
&stats->rx_nobufs, "Frames with no buffer event");
VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
&stats->rx_symerrs, "Frames with symbol errors");
VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
&stats->rx_lenerrs, "Frames with length mismatched");
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
child = SYSCTL_CHILDREN(tree);
VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->tx_good_frames, "Good frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
&stats->tx_pkts_64, "64 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
&stats->tx_pkts_65_127, "65 to 127 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
&stats->tx_pkts_128_255, "128 to 255 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
&stats->tx_pkts_256_511, "256 to 511 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
&stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
&stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
&stats->tx_jumbos, "Jumbo frames");
VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
&stats->tx_colls, "Collisions");
VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
&stats->tx_latecolls, "Late collisions");
VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->tx_pause, "Pause frames");
#ifdef VGE_ENABLE_SQEERR
VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
&stats->tx_sqeerrs, "SQE errors");
#endif
/* Clear MAC statistics. */
vge_stats_clear(sc);
}
#undef VGE_SYSCTL_STAT_ADD32
static void
vge_stats_clear(struct vge_softc *sc)
{
int i;
CSR_WRITE_1(sc, VGE_MIBCSR,
CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
CSR_WRITE_1(sc, VGE_MIBCSR,
CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
for (i = VGE_TIMEOUT; i > 0; i--) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
break;
}
if (i == 0)
device_printf(sc->vge_dev, "MIB clear timed out!\n");
CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
~VGE_MIBCSR_FREEZE);
}
static void
vge_stats_update(struct vge_softc *sc)
{
struct vge_hw_stats *stats;
if_t ifp;
uint32_t mib[VGE_MIB_CNT], val;
int i;
VGE_LOCK_ASSERT(sc);
stats = &sc->vge_stats;
ifp = sc->vge_ifp;
CSR_WRITE_1(sc, VGE_MIBCSR,
CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
for (i = VGE_TIMEOUT; i > 0; i--) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
break;
}
if (i == 0) {
device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
vge_stats_clear(sc);
return;
}
bzero(mib, sizeof(mib));
reset_idx:
/* Set MIB read index to 0. */
CSR_WRITE_1(sc, VGE_MIBCSR,
CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
for (i = 0; i < VGE_MIB_CNT; i++) {
val = CSR_READ_4(sc, VGE_MIBDATA);
if (i != VGE_MIB_DATA_IDX(val)) {
/* Reading interrupted. */
goto reset_idx;
}
mib[i] = val & VGE_MIB_DATA_MASK;
}
/* Rx stats. */
stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
/* Tx stats. */
stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
stats->tx_colls += mib[VGE_MIB_TX_COLLS];
stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
#ifdef VGE_ENABLE_SQEERR
stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
#endif
stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
/* Update counters in ifnet. */
if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
if_inc_counter(ifp, IFCOUNTER_OERRORS,
mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]);
if_inc_counter(ifp, IFCOUNTER_IERRORS,
mib[VGE_MIB_RX_FIFO_OVERRUNS] +
mib[VGE_MIB_RX_RUNTS] +
mib[VGE_MIB_RX_RUNTS_ERRS] +
mib[VGE_MIB_RX_CRCERRS] +
mib[VGE_MIB_RX_ALIGNERRS] +
mib[VGE_MIB_RX_NOBUFS] +
mib[VGE_MIB_RX_SYMERRS] +
mib[VGE_MIB_RX_LENERRS]);
}
static void
vge_intr_holdoff(struct vge_softc *sc)
{
uint8_t intctl;
VGE_LOCK_ASSERT(sc);
/*
* Set Tx interrupt supression threshold.
* It's possible to use single-shot timer in VGE_CRS1 register
* in Tx path such that driver can remove most of Tx completion
* interrupts. However this requires additional access to
* VGE_CRS1 register to reload the timer in addintion to
* activating Tx kick command. Another downside is we don't know
* what single-shot timer value should be used in advance so
* reclaiming transmitted mbufs could be delayed a lot which in
* turn slows down Tx operation.
*/
CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
/* Set Rx interrupt suppresion threshold. */
CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
intctl = CSR_READ_1(sc, VGE_INTCTL1);
intctl &= ~VGE_INTCTL_SC_RELOAD;
intctl |= VGE_INTCTL_HC_RELOAD;
if (sc->vge_tx_coal_pkt <= 0)
intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
else
intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
if (sc->vge_rx_coal_pkt <= 0)
intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
else
intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
if (sc->vge_int_holdoff > 0) {
/* Set interrupt holdoff timer. */
CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
CSR_WRITE_1(sc, VGE_INTHOLDOFF,
VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
/* Enable holdoff timer. */
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
}
}
static void
vge_setlinkspeed(struct vge_softc *sc)
{
struct mii_data *mii;
int aneg, i;
VGE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->vge_miibus);
mii_pollstat(mii);
aneg = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch IFM_SUBTYPE(mii->mii_media_active) {
case IFM_10_T:
case IFM_100_TX:
return;
case IFM_1000_T:
aneg++;
default:
break;
}
}
/* Clear forced MAC speed/duplex configuration. */
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
BMCR_AUTOEN | BMCR_STARTNEG);
DELAY(1000);
if (aneg != 0) {
/* Poll link state until vge(4) get a 10/100 link. */
for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
mii_pollstat(mii);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
== (IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
return;
default:
break;
}
}
VGE_UNLOCK(sc);
pause("vgelnk", hz);
VGE_LOCK(sc);
}
if (i == MII_ANEGTICKS_GIGE)
device_printf(sc->vge_dev, "establishing link failed, "
"WOL may not work!");
}
/*
* No link, force MAC to have 100Mbps, full-duplex link.
* This is the last resort and may/may not work.
*/
mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
}
static void
vge_setwol(struct vge_softc *sc)
{
if_t ifp;
uint16_t pmstat;
uint8_t val;
VGE_LOCK_ASSERT(sc);
if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
/* No PME capability, PHY power down. */
vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
BMCR_PDOWN);
vge_miipoll_stop(sc);
return;
}
ifp = sc->vge_ifp;
/* Clear WOL on pattern match. */
CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
/* Disable WOL on magic/unicast packet. */
CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
VGE_WOLCFG_PMEOVR);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
vge_setlinkspeed(sc);
val = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
val |= VGE_WOLCR1_UCAST;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
val |= VGE_WOLCR1_MAGIC;
CSR_WRITE_1(sc, VGE_WOLCR1S, val);
val = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
/* Disable MII auto-polling. */
vge_miipoll_stop(sc);
}
CSR_SETBIT_1(sc, VGE_DIAGCTL,
VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
/* Clear WOL status on pattern match. */
CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
val = CSR_READ_1(sc, VGE_PWRSTAT);
val |= VGE_STICKHW_SWPTAG;
CSR_WRITE_1(sc, VGE_PWRSTAT, val);
/* Put hardware into sleep. */
val = CSR_READ_1(sc, VGE_PWRSTAT);
val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
CSR_WRITE_1(sc, VGE_PWRSTAT, val);
/* Request PME if WOL is requested. */
pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
pmstat, 2);
}
static void
vge_clrwol(struct vge_softc *sc)
{
uint8_t val;
val = CSR_READ_1(sc, VGE_PWRSTAT);
val &= ~VGE_STICKHW_SWPTAG;
CSR_WRITE_1(sc, VGE_PWRSTAT, val);
/* Disable WOL and clear power state indicator. */
val = CSR_READ_1(sc, VGE_PWRSTAT);
val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
CSR_WRITE_1(sc, VGE_PWRSTAT, val);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
/* Clear WOL on pattern match. */
CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
/* Disable WOL on magic/unicast packet. */
CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
VGE_WOLCFG_PMEOVR);
/* Clear WOL status on pattern match. */
CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
}
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 9c14e688f364..0dc887b3d394 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -1,4479 +1,4469 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO network devices. */
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/msan.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/random.h>
#include <sys/sglist.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
#include <machine/smp.h>
#include <vm/uma.h>
#include <net/debugnet.h>
#include <net/ethernet.h>
#include <net/pfil.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <net/bpf.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/network/virtio_net.h>
#include <dev/virtio/network/if_vtnetvar.h>
#include "virtio_if.h"
#include "opt_inet.h"
#include "opt_inet6.h"
#if defined(INET) || defined(INET6)
#include <machine/in_cksum.h>
#endif
#ifdef __NO_STRICT_ALIGNMENT
#define VTNET_ETHER_ALIGN 0
#else /* Strict alignment */
#define VTNET_ETHER_ALIGN ETHER_ALIGN
#endif
static int vtnet_modevent(module_t, int, void *);
static int vtnet_probe(device_t);
static int vtnet_attach(device_t);
static int vtnet_detach(device_t);
static int vtnet_suspend(device_t);
static int vtnet_resume(device_t);
static int vtnet_shutdown(device_t);
static int vtnet_attach_completed(device_t);
static int vtnet_config_change(device_t);
static int vtnet_negotiate_features(struct vtnet_softc *);
static int vtnet_setup_features(struct vtnet_softc *);
static int vtnet_init_rxq(struct vtnet_softc *, int);
static int vtnet_init_txq(struct vtnet_softc *, int);
static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
static void vtnet_free_rxtx_queues(struct vtnet_softc *);
static int vtnet_alloc_rx_filters(struct vtnet_softc *);
static void vtnet_free_rx_filters(struct vtnet_softc *);
static int vtnet_alloc_virtqueues(struct vtnet_softc *);
-static int vtnet_alloc_interface(struct vtnet_softc *);
+static void vtnet_alloc_interface(struct vtnet_softc *);
static int vtnet_setup_interface(struct vtnet_softc *);
static int vtnet_ioctl_mtu(struct vtnet_softc *, u_int);
static int vtnet_ioctl_ifflags(struct vtnet_softc *);
static int vtnet_ioctl_multi(struct vtnet_softc *);
static int vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
static int vtnet_ioctl(if_t, u_long, caddr_t);
static uint64_t vtnet_get_counter(if_t, ift_counter);
static int vtnet_rxq_populate(struct vtnet_rxq *);
static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
static struct mbuf *
vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
struct mbuf *, int);
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_new_buf(struct vtnet_rxq *);
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
uint16_t, int, struct virtio_net_hdr *);
static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
uint16_t, int, struct virtio_net_hdr *);
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
static int vtnet_rxq_eof(struct vtnet_rxq *);
static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries);
static void vtnet_rx_vq_intr(void *);
static void vtnet_rxq_tq_intr(void *, int);
static int vtnet_txq_intr_threshold(struct vtnet_txq *);
static int vtnet_txq_below_threshold(struct vtnet_txq *);
static int vtnet_txq_notify(struct vtnet_txq *);
static void vtnet_txq_free_mbufs(struct vtnet_txq *);
static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
int *, int *, int *);
static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
int, struct virtio_net_hdr *);
static struct mbuf *
vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
struct virtio_net_hdr *);
static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
struct vtnet_tx_header *);
static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
#ifdef VTNET_LEGACY_TX
static void vtnet_start_locked(struct vtnet_txq *, if_t);
static void vtnet_start(if_t);
#else
static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
static int vtnet_txq_mq_start(if_t, struct mbuf *);
static void vtnet_txq_tq_deferred(void *, int);
#endif
static void vtnet_txq_start(struct vtnet_txq *);
static void vtnet_txq_tq_intr(void *, int);
static int vtnet_txq_eof(struct vtnet_txq *);
static void vtnet_tx_vq_intr(void *);
static void vtnet_tx_start_all(struct vtnet_softc *);
#ifndef VTNET_LEGACY_TX
static void vtnet_qflush(if_t);
#endif
static int vtnet_watchdog(struct vtnet_txq *);
static void vtnet_accum_stats(struct vtnet_softc *,
struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
static void vtnet_tick(void *);
static void vtnet_start_taskqueues(struct vtnet_softc *);
static void vtnet_free_taskqueues(struct vtnet_softc *);
static void vtnet_drain_taskqueues(struct vtnet_softc *);
static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
static void vtnet_stop_rendezvous(struct vtnet_softc *);
static void vtnet_stop(struct vtnet_softc *);
static int vtnet_virtio_reinit(struct vtnet_softc *);
static void vtnet_init_rx_filters(struct vtnet_softc *);
static int vtnet_init_rx_queues(struct vtnet_softc *);
static int vtnet_init_tx_queues(struct vtnet_softc *);
static int vtnet_init_rxtx_queues(struct vtnet_softc *);
static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
static void vtnet_update_rx_offloads(struct vtnet_softc *);
static int vtnet_reinit(struct vtnet_softc *);
static void vtnet_init_locked(struct vtnet_softc *, int);
static void vtnet_init(void *);
static void vtnet_free_ctrl_vq(struct vtnet_softc *);
static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
struct sglist *, int, int);
static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
static int vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, bool);
static int vtnet_set_promisc(struct vtnet_softc *, bool);
static int vtnet_set_allmulti(struct vtnet_softc *, bool);
static void vtnet_rx_filter(struct vtnet_softc *);
static void vtnet_rx_filter_mac(struct vtnet_softc *);
static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
static void vtnet_rx_filter_vlan(struct vtnet_softc *);
static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
static void vtnet_register_vlan(void *, if_t, uint16_t);
static void vtnet_unregister_vlan(void *, if_t, uint16_t);
static void vtnet_update_speed_duplex(struct vtnet_softc *);
static int vtnet_is_link_up(struct vtnet_softc *);
static void vtnet_update_link_status(struct vtnet_softc *);
static int vtnet_ifmedia_upd(if_t);
static void vtnet_ifmedia_sts(if_t, struct ifmediareq *);
static void vtnet_get_macaddr(struct vtnet_softc *);
static void vtnet_set_macaddr(struct vtnet_softc *);
static void vtnet_attached_set_macaddr(struct vtnet_softc *);
static void vtnet_vlan_tag_remove(struct mbuf *);
static void vtnet_set_rx_process_limit(struct vtnet_softc *);
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct vtnet_rxq *);
static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct vtnet_txq *);
static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
static void vtnet_load_tunables(struct vtnet_softc *);
static void vtnet_setup_sysctl(struct vtnet_softc *);
static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
static int vtnet_txq_enable_intr(struct vtnet_txq *);
static void vtnet_txq_disable_intr(struct vtnet_txq *);
static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
static void vtnet_enable_interrupts(struct vtnet_softc *);
static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
static void vtnet_disable_interrupts(struct vtnet_softc *);
static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
DEBUGNET_DEFINE(vtnet);
#define vtnet_htog16(_sc, _val) virtio_htog16(vtnet_modern(_sc), _val)
#define vtnet_htog32(_sc, _val) virtio_htog32(vtnet_modern(_sc), _val)
#define vtnet_htog64(_sc, _val) virtio_htog64(vtnet_modern(_sc), _val)
#define vtnet_gtoh16(_sc, _val) virtio_gtoh16(vtnet_modern(_sc), _val)
#define vtnet_gtoh32(_sc, _val) virtio_gtoh32(vtnet_modern(_sc), _val)
#define vtnet_gtoh64(_sc, _val) virtio_gtoh64(vtnet_modern(_sc), _val)
/* Tunables. */
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"VirtIO Net driver parameters");
static int vtnet_csum_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
&vtnet_csum_disable, 0, "Disables receive and send checksum offload");
static int vtnet_fixup_needs_csum = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
&vtnet_fixup_needs_csum, 0,
"Calculate valid checksum for NEEDS_CSUM packets");
static int vtnet_tso_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
&vtnet_tso_disable, 0, "Disables TSO");
static int vtnet_lro_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
&vtnet_lro_disable, 0, "Disables hardware LRO");
static int vtnet_mq_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
&vtnet_mq_disable, 0, "Disables multiqueue support");
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
&vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
static int vtnet_tso_maxlen = IP_MAXPACKET;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
&vtnet_tso_maxlen, 0, "TSO burst limit");
static int vtnet_rx_process_limit = 1024;
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&vtnet_rx_process_limit, 0,
"Number of RX segments processed in one pass");
static int vtnet_lro_entry_count = 128;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
&vtnet_lro_entry_count, 0, "Software LRO entry count");
/* Enable sorted LRO, and the depth of the mbuf queue. */
static int vtnet_lro_mbufq_depth = 0;
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
&vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
static uma_zone_t vtnet_tx_header_zone;
static struct virtio_feature_desc vtnet_feature_desc[] = {
{ VIRTIO_NET_F_CSUM, "TxChecksum" },
{ VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "CtrlRxOffloads" },
{ VIRTIO_NET_F_MAC, "MAC" },
{ VIRTIO_NET_F_GSO, "TxGSO" },
{ VIRTIO_NET_F_GUEST_TSO4, "RxLROv4" },
{ VIRTIO_NET_F_GUEST_TSO6, "RxLROv6" },
{ VIRTIO_NET_F_GUEST_ECN, "RxLROECN" },
{ VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
{ VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
{ VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
{ VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
{ VIRTIO_NET_F_HOST_UFO, "TxUFO" },
{ VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
{ VIRTIO_NET_F_STATUS, "Status" },
{ VIRTIO_NET_F_CTRL_VQ, "CtrlVq" },
{ VIRTIO_NET_F_CTRL_RX, "CtrlRxMode" },
{ VIRTIO_NET_F_CTRL_VLAN, "CtrlVLANFilter" },
{ VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRxModeExtra" },
{ VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
{ VIRTIO_NET_F_MQ, "Multiqueue" },
{ VIRTIO_NET_F_CTRL_MAC_ADDR, "CtrlMacAddr" },
{ VIRTIO_NET_F_SPEED_DUPLEX, "SpeedDuplex" },
{ 0, NULL }
};
static device_method_t vtnet_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtnet_probe),
DEVMETHOD(device_attach, vtnet_attach),
DEVMETHOD(device_detach, vtnet_detach),
DEVMETHOD(device_suspend, vtnet_suspend),
DEVMETHOD(device_resume, vtnet_resume),
DEVMETHOD(device_shutdown, vtnet_shutdown),
/* VirtIO methods. */
DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
DEVMETHOD(virtio_config_change, vtnet_config_change),
DEVMETHOD_END
};
#ifdef DEV_NETMAP
#include <dev/netmap/if_vtnet_netmap.h>
#endif
static driver_t vtnet_driver = {
.name = "vtnet",
.methods = vtnet_methods,
.size = sizeof(struct vtnet_softc)
};
VIRTIO_DRIVER_MODULE(vtnet, vtnet_driver, vtnet_modevent, NULL);
MODULE_VERSION(vtnet, 1);
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
#ifdef DEV_NETMAP
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
#endif
VIRTIO_SIMPLE_PNPINFO(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter");
static int
vtnet_modevent(module_t mod __unused, int type, void *unused __unused)
{
int error = 0;
static int loaded = 0;
switch (type) {
case MOD_LOAD:
if (loaded++ == 0) {
vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
sizeof(struct vtnet_tx_header),
NULL, NULL, NULL, NULL, 0, 0);
#ifdef DEBUGNET
/*
* We need to allocate from this zone in the transmit path, so ensure
* that we have at least one item per header available.
* XXX add a separate zone like we do for mbufs? otherwise we may alloc
* buckets
*/
uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
#endif
}
break;
case MOD_QUIESCE:
if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
error = EBUSY;
break;
case MOD_UNLOAD:
if (--loaded == 0) {
uma_zdestroy(vtnet_tx_header_zone);
vtnet_tx_header_zone = NULL;
}
break;
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtnet_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, vtnet));
}
static int
vtnet_attach(device_t dev)
{
struct vtnet_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtnet_dev = dev;
virtio_set_feature_desc(dev, vtnet_feature_desc);
VTNET_CORE_LOCK_INIT(sc);
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
vtnet_load_tunables(sc);
- error = vtnet_alloc_interface(sc);
- if (error) {
- device_printf(dev, "cannot allocate interface\n");
- goto fail;
- }
-
+ vtnet_alloc_interface(sc);
vtnet_setup_sysctl(sc);
error = vtnet_setup_features(sc);
if (error) {
device_printf(dev, "cannot setup features\n");
goto fail;
}
error = vtnet_alloc_rx_filters(sc);
if (error) {
device_printf(dev, "cannot allocate Rx filters\n");
goto fail;
}
error = vtnet_alloc_rxtx_queues(sc);
if (error) {
device_printf(dev, "cannot allocate queues\n");
goto fail;
}
error = vtnet_alloc_virtqueues(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueues\n");
goto fail;
}
error = vtnet_setup_interface(sc);
if (error) {
device_printf(dev, "cannot setup interface\n");
goto fail;
}
error = virtio_setup_intr(dev, INTR_TYPE_NET);
if (error) {
device_printf(dev, "cannot setup interrupts\n");
ether_ifdetach(sc->vtnet_ifp);
goto fail;
}
#ifdef DEV_NETMAP
vtnet_netmap_attach(sc);
#endif
vtnet_start_taskqueues(sc);
fail:
if (error)
vtnet_detach(dev);
return (error);
}
static int
vtnet_detach(device_t dev)
{
struct vtnet_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->vtnet_ifp;
if (device_is_attached(dev)) {
VTNET_CORE_LOCK(sc);
vtnet_stop(sc);
VTNET_CORE_UNLOCK(sc);
callout_drain(&sc->vtnet_tick_ch);
vtnet_drain_taskqueues(sc);
ether_ifdetach(ifp);
}
#ifdef DEV_NETMAP
netmap_detach(ifp);
#endif
if (sc->vtnet_pfil != NULL) {
pfil_head_unregister(sc->vtnet_pfil);
sc->vtnet_pfil = NULL;
}
vtnet_free_taskqueues(sc);
if (sc->vtnet_vlan_attach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
sc->vtnet_vlan_attach = NULL;
}
if (sc->vtnet_vlan_detach != NULL) {
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
sc->vtnet_vlan_detach = NULL;
}
ifmedia_removeall(&sc->vtnet_media);
if (ifp != NULL) {
if_free(ifp);
sc->vtnet_ifp = NULL;
}
vtnet_free_rxtx_queues(sc);
vtnet_free_rx_filters(sc);
if (sc->vtnet_ctrl_vq != NULL)
vtnet_free_ctrl_vq(sc);
VTNET_CORE_LOCK_DESTROY(sc);
return (0);
}
static int
vtnet_suspend(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_stop(sc);
sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_resume(device_t dev)
{
struct vtnet_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP)
vtnet_init_locked(sc, 0);
sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_shutdown(device_t dev)
{
/*
* Suspend already does all of what we need to
* do here; we just never expect to be resumed.
*/
return (vtnet_suspend(dev));
}
static int
vtnet_attach_completed(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_attached_set_macaddr(sc);
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_config_change(device_t dev)
{
struct vtnet_softc *sc;
sc = device_get_softc(dev);
VTNET_CORE_LOCK(sc);
vtnet_update_link_status(sc);
if (sc->vtnet_link_active != 0)
vtnet_tx_start_all(sc);
VTNET_CORE_UNLOCK(sc);
return (0);
}
static int
vtnet_negotiate_features(struct vtnet_softc *sc)
{
device_t dev;
uint64_t features, negotiated_features;
int no_csum;
dev = sc->vtnet_dev;
features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
VTNET_LEGACY_FEATURES;
/*
* TSO and LRO are only available when their corresponding checksum
* offload feature is also negotiated.
*/
no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
if (no_csum)
features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
features &= ~VTNET_TSO_FEATURES;
if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
features &= ~VTNET_LRO_FEATURES;
#ifndef VTNET_LEGACY_TX
if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
features &= ~VIRTIO_NET_F_MQ;
#else
features &= ~VIRTIO_NET_F_MQ;
#endif
negotiated_features = virtio_negotiate_features(dev, features);
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
uint16_t mtu;
mtu = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, mtu));
if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
device_printf(dev, "Invalid MTU value: %d. "
"MTU feature disabled.\n", mtu);
features &= ~VIRTIO_NET_F_MTU;
negotiated_features =
virtio_negotiate_features(dev, features);
}
}
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
uint16_t npairs;
npairs = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, max_virtqueue_pairs));
if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
device_printf(dev, "Invalid max_virtqueue_pairs value: "
"%d. Multiqueue feature disabled.\n", npairs);
features &= ~VIRTIO_NET_F_MQ;
negotiated_features =
virtio_negotiate_features(dev, features);
}
}
if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
/*
* LRO without mergeable buffers requires special care. This
* is not ideal because every receive buffer must be large
* enough to hold the maximum TCP packet, the Ethernet header,
* and the header. This requires up to 34 descriptors with
* MCLBYTES clusters. If we do not have indirect descriptors,
* LRO is disabled since the virtqueue will not contain very
* many receive buffers.
*/
if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
device_printf(dev,
"Host LRO disabled since both mergeable buffers "
"and indirect descriptors were not negotiated\n");
features &= ~VTNET_LRO_FEATURES;
negotiated_features =
virtio_negotiate_features(dev, features);
} else
sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
}
sc->vtnet_features = negotiated_features;
sc->vtnet_negotiated_features = negotiated_features;
return (virtio_finalize_features(dev));
}
static int
vtnet_setup_features(struct vtnet_softc *sc)
{
device_t dev;
int error;
dev = sc->vtnet_dev;
error = vtnet_negotiate_features(sc);
if (error)
return (error);
if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
sc->vtnet_flags |= VTNET_FLAG_MODERN;
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
/* This feature should always be negotiated. */
sc->vtnet_flags |= VTNET_FLAG_MAC;
}
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, mtu));
} else
sc->vtnet_max_mtu = VTNET_MAX_MTU;
if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else if (vtnet_modern(sc)) {
/* This is identical to the mergeable header. */
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
} else
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
else
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
/*
* Favor "hardware" LRO if negotiated, but support software LRO as
* a fallback; there is usually little benefit (or worse) with both.
*/
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
else
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
sc->vtnet_req_vq_pairs = 1;
sc->vtnet_max_vq_pairs = 1;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config,
max_virtqueue_pairs));
}
}
if (sc->vtnet_max_vq_pairs > 1) {
int req;
/*
* Limit the maximum number of requested queue pairs to the
* number of CPUs and the configured maximum.
*/
req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
if (req < 0)
req = 1;
if (req == 0)
req = mp_ncpus;
if (req > sc->vtnet_max_vq_pairs)
req = sc->vtnet_max_vq_pairs;
if (req > mp_ncpus)
req = mp_ncpus;
if (req > 1) {
sc->vtnet_req_vq_pairs = req;
sc->vtnet_flags |= VTNET_FLAG_MQ;
}
}
return (0);
}
static int
vtnet_init_rxq(struct vtnet_softc *sc, int id)
{
struct vtnet_rxq *rxq;
rxq = &sc->vtnet_rxqs[id];
snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
device_get_nameunit(sc->vtnet_dev), id);
mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
rxq->vtnrx_sc = sc;
rxq->vtnrx_id = id;
rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
if (rxq->vtnrx_sg == NULL)
return (ENOMEM);
#if defined(INET) || defined(INET6)
if (vtnet_software_lro(sc)) {
if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
return (ENOMEM);
}
#endif
NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
taskqueue_thread_enqueue, &rxq->vtnrx_tq);
return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
}
static int
vtnet_init_txq(struct vtnet_softc *sc, int id)
{
struct vtnet_txq *txq;
txq = &sc->vtnet_txqs[id];
snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
device_get_nameunit(sc->vtnet_dev), id);
mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
txq->vtntx_sc = sc;
txq->vtntx_id = id;
txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
if (txq->vtntx_sg == NULL)
return (ENOMEM);
#ifndef VTNET_LEGACY_TX
txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &txq->vtntx_mtx);
if (txq->vtntx_br == NULL)
return (ENOMEM);
TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
#endif
TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
taskqueue_thread_enqueue, &txq->vtntx_tq);
if (txq->vtntx_tq == NULL)
return (ENOMEM);
return (0);
}
static int
vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
{
int i, npairs, error;
npairs = sc->vtnet_max_vq_pairs;
sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
M_NOWAIT | M_ZERO);
sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
return (ENOMEM);
for (i = 0; i < npairs; i++) {
error = vtnet_init_rxq(sc, i);
if (error)
return (error);
error = vtnet_init_txq(sc, i);
if (error)
return (error);
}
vtnet_set_rx_process_limit(sc);
vtnet_setup_queue_sysctl(sc);
return (0);
}
static void
vtnet_destroy_rxq(struct vtnet_rxq *rxq)
{
rxq->vtnrx_sc = NULL;
rxq->vtnrx_id = -1;
#if defined(INET) || defined(INET6)
tcp_lro_free(&rxq->vtnrx_lro);
#endif
if (rxq->vtnrx_sg != NULL) {
sglist_free(rxq->vtnrx_sg);
rxq->vtnrx_sg = NULL;
}
if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
mtx_destroy(&rxq->vtnrx_mtx);
}
static void
vtnet_destroy_txq(struct vtnet_txq *txq)
{
txq->vtntx_sc = NULL;
txq->vtntx_id = -1;
if (txq->vtntx_sg != NULL) {
sglist_free(txq->vtntx_sg);
txq->vtntx_sg = NULL;
}
#ifndef VTNET_LEGACY_TX
if (txq->vtntx_br != NULL) {
buf_ring_free(txq->vtntx_br, M_DEVBUF);
txq->vtntx_br = NULL;
}
#endif
if (mtx_initialized(&txq->vtntx_mtx) != 0)
mtx_destroy(&txq->vtntx_mtx);
}
static void
vtnet_free_rxtx_queues(struct vtnet_softc *sc)
{
int i;
if (sc->vtnet_rxqs != NULL) {
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
free(sc->vtnet_rxqs, M_DEVBUF);
sc->vtnet_rxqs = NULL;
}
if (sc->vtnet_txqs != NULL) {
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_destroy_txq(&sc->vtnet_txqs[i]);
free(sc->vtnet_txqs, M_DEVBUF);
sc->vtnet_txqs = NULL;
}
}
static int
vtnet_alloc_rx_filters(struct vtnet_softc *sc)
{
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtnet_mac_filter == NULL)
return (ENOMEM);
}
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
if (sc->vtnet_vlan_filter == NULL)
return (ENOMEM);
}
return (0);
}
static void
vtnet_free_rx_filters(struct vtnet_softc *sc)
{
if (sc->vtnet_mac_filter != NULL) {
free(sc->vtnet_mac_filter, M_DEVBUF);
sc->vtnet_mac_filter = NULL;
}
if (sc->vtnet_vlan_filter != NULL) {
free(sc->vtnet_vlan_filter, M_DEVBUF);
sc->vtnet_vlan_filter = NULL;
}
}
static int
vtnet_alloc_virtqueues(struct vtnet_softc *sc)
{
device_t dev;
struct vq_alloc_info *info;
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i, idx, nvqs, error;
dev = sc->vtnet_dev;
nvqs = sc->vtnet_max_vq_pairs * 2;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
nvqs++;
info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
if (info == NULL)
return (ENOMEM);
for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
rxq = &sc->vtnet_rxqs[i];
VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
/* These queues will not be used so allocate the minimum resources. */
for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
rxq = &sc->vtnet_rxqs[i];
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
&sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
}
error = virtio_alloc_virtqueues(dev, nvqs, info);
free(info, M_TEMP);
return (error);
}
-static int
+static void
vtnet_alloc_interface(struct vtnet_softc *sc)
{
device_t dev;
if_t ifp;
dev = sc->vtnet_dev;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOMEM);
-
sc->vtnet_ifp = ifp;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
-
- return (0);
}
static int
vtnet_setup_interface(struct vtnet_softc *sc)
{
device_t dev;
struct pfil_head_args pa;
if_t ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setbaudrate(ifp, IF_Gbps(10));
if_setinitfn(ifp, vtnet_init);
if_setioctlfn(ifp, vtnet_ioctl);
if_setgetcounterfn(ifp, vtnet_get_counter);
#ifndef VTNET_LEGACY_TX
if_settransmitfn(ifp, vtnet_txq_mq_start);
if_setqflushfn(ifp, vtnet_qflush);
#else
struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
if_setstartfn(ifp, vtnet_start);
if_setsendqlen(ifp, virtqueue_size(vq) - 1);
if_setsendqready(ifp);
#endif
vtnet_get_macaddr(sc);
if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
int gso;
if_setcapabilitiesbit(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6, 0);
gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
if (if_getcapabilities(ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
int tso_maxlen;
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
vtnet_tso_maxlen);
if_sethwtsomax(ifp, tso_maxlen -
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ifp, sc->vtnet_tx_nsegs - 1);
if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
}
}
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
#ifdef notyet
/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM_IPV6, 0);
#endif
if (vtnet_tunable_int(sc, "fixup_needs_csum",
vtnet_fixup_needs_csum) != 0)
sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
/* Support either "hardware" or software LRO. */
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
}
if (if_getcapabilities(ifp) & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
/*
* VirtIO does not support VLAN tagging, but we can fake
* it by inserting and removing the 802.1Q header during
* transmit and receive. We are then able to do checksum
* offloading of VLAN frames.
*/
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0);
}
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
/*
* Capabilities after here are not enabled by default.
*/
if_setcapenable(ifp, if_getcapabilities(ifp));
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
}
ether_ifattach(ifp, sc->vtnet_hwaddr);
/* Tell the upper layer(s) we support long frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
DEBUGNET_SET(ifp, vtnet);
pa.pa_version = PFIL_VERSION;
pa.pa_flags = PFIL_IN;
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = if_name(ifp);
sc->vtnet_pfil = pfil_head_register(&pa);
return (0);
}
static int
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
{
int framesz;
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
return (MJUMPAGESIZE);
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
return (MCLBYTES);
/*
* Try to scale the receive mbuf cluster size from the MTU. We
* could also use the VQ size to influence the selected size,
* but that would only matter for very small queues.
*/
if (vtnet_modern(sc)) {
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
framesz = sizeof(struct virtio_net_hdr_v1);
} else
framesz = sizeof(struct vtnet_rx_header);
framesz += sizeof(struct ether_vlan_header) + mtu;
/*
* Account for the offsetting we'll do elsewhere so we allocate the
* right size for the mtu.
*/
if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) {
framesz += VTNET_ETHER_ALIGN;
}
if (framesz <= MCLBYTES)
return (MCLBYTES);
else if (framesz <= MJUMPAGESIZE)
return (MJUMPAGESIZE);
else if (framesz <= MJUM9BYTES)
return (MJUM9BYTES);
/* Sane default; avoid 16KB clusters. */
return (MCLBYTES);
}
static int
vtnet_ioctl_mtu(struct vtnet_softc *sc, u_int mtu)
{
if_t ifp;
int clustersz;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (if_getmtu(ifp) == mtu)
return (0);
else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
return (EINVAL);
if_setmtu(ifp, mtu);
clustersz = vtnet_rx_cluster_size(sc, mtu);
if (clustersz != sc->vtnet_rx_clustersz &&
if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vtnet_init_locked(sc, 0);
}
return (0);
}
static int
vtnet_ioctl_ifflags(struct vtnet_softc *sc)
{
if_t ifp;
int drv_running;
ifp = sc->vtnet_ifp;
drv_running = (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0;
VTNET_CORE_LOCK_ASSERT(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
if (drv_running)
vtnet_stop(sc);
goto out;
}
if (!drv_running) {
vtnet_init_locked(sc, 0);
goto out;
}
if ((if_getflags(ifp) ^ sc->vtnet_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
vtnet_rx_filter(sc);
else {
/*
* We don't support filtering out multicast, so
* ALLMULTI is always set.
*/
if_setflagbits(ifp, IFF_ALLMULTI, 0);
if_setflagbits(ifp, IFF_PROMISC, 0);
}
}
out:
sc->vtnet_if_flags = if_getflags(ifp);
return (0);
}
static int
vtnet_ioctl_multi(struct vtnet_softc *sc)
{
if_t ifp;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
if_getdrvflags(ifp) & IFF_DRV_RUNNING)
vtnet_rx_filter_mac(sc);
return (0);
}
static int
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
{
if_t ifp;
int mask, reinit, update;
ifp = sc->vtnet_ifp;
mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^ if_getcapenable(ifp);
reinit = update = 0;
VTNET_CORE_LOCK_ASSERT(sc);
if (mask & IFCAP_TXCSUM)
if_togglecapenable(ifp, IFCAP_TXCSUM);
if (mask & IFCAP_TXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if (mask & IFCAP_TSO4)
if_togglecapenable(ifp, IFCAP_TSO4);
if (mask & IFCAP_TSO6)
if_togglecapenable(ifp, IFCAP_TSO6);
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
/*
* These Rx features require the negotiated features to
* be updated. Avoid a full reinit if possible.
*/
if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
update = 1;
else
reinit = 1;
/* BMV: Avoid needless renegotiation for just software LRO. */
if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
IFCAP_LRO && vtnet_software_lro(sc))
reinit = update = 0;
if (mask & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
/*
* VirtIO does not distinguish between IPv4 and IPv6 checksums
* so treat them as a pair. Guest TSO (LRO) requires receive
* checksums.
*/
if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
#ifdef notyet
if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0);
#endif
} else
if_setcapenablebit(ifp, 0,
(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO));
}
if (mask & IFCAP_VLAN_HWFILTER) {
/* These Rx features require renegotiation. */
reinit = 1;
if (mask & IFCAP_VLAN_HWFILTER)
if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
}
if (mask & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (mask & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (reinit) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vtnet_init_locked(sc, 0);
} else if (update)
vtnet_update_rx_offloads(sc);
}
return (0);
}
static int
vtnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct vtnet_softc *sc;
struct ifreq *ifr;
int error;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *) data;
error = 0;
switch (cmd) {
case SIOCSIFMTU:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
VTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_ifflags(sc);
VTNET_CORE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_multi(sc);
VTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
break;
case SIOCSIFCAP:
VTNET_CORE_LOCK(sc);
error = vtnet_ioctl_ifcap(sc, ifr);
VTNET_CORE_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
return (error);
}
static int
vtnet_rxq_populate(struct vtnet_rxq *rxq)
{
struct virtqueue *vq;
int nbufs, error;
#ifdef DEV_NETMAP
error = vtnet_netmap_rxq_populate(rxq);
if (error >= 0)
return (error);
#endif /* DEV_NETMAP */
vq = rxq->vtnrx_vq;
error = ENOSPC;
for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
error = vtnet_rxq_new_buf(rxq);
if (error)
break;
}
if (nbufs > 0) {
virtqueue_notify(vq);
/*
* EMSGSIZE signifies the virtqueue did not have enough
* entries available to hold the last mbuf. This is not
* an error.
*/
if (error == EMSGSIZE)
error = 0;
}
return (error);
}
static void
vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
{
struct virtqueue *vq;
struct mbuf *m;
int last;
#ifdef DEV_NETMAP
struct netmap_kring *kring = netmap_kring_on(NA(rxq->vtnrx_sc->vtnet_ifp),
rxq->vtnrx_id, NR_RX);
#else /* !DEV_NETMAP */
void *kring = NULL;
#endif /* !DEV_NETMAP */
vq = rxq->vtnrx_vq;
last = 0;
while ((m = virtqueue_drain(vq, &last)) != NULL) {
if (kring == NULL)
m_freem(m);
}
KASSERT(virtqueue_empty(vq),
("%s: mbufs remaining in rx queue %p", __func__, rxq));
}
static struct mbuf *
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
{
struct mbuf *m_head, *m_tail, *m;
int i, size;
m_head = NULL;
size = sc->vtnet_rx_clustersz;
KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
for (i = 0; i < nbufs; i++) {
m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
if (m == NULL) {
sc->vtnet_stats.mbuf_alloc_failed++;
m_freem(m_head);
return (NULL);
}
m->m_len = size;
/*
* Need to offset the mbuf if the header we're going to add
* will misalign.
*/
if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) {
m_adj(m, VTNET_ETHER_ALIGN);
}
if (m_head != NULL) {
m_tail->m_next = m;
m_tail = m;
} else
m_head = m_tail = m;
}
if (m_tailp != NULL)
*m_tailp = m_tail;
return (m_head);
}
/*
* Slow path for when LRO without mergeable buffers is negotiated.
*/
static int
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
int len0)
{
struct vtnet_softc *sc;
struct mbuf *m, *m_prev, *m_new, *m_tail;
int len, clustersz, nreplace, error;
sc = rxq->vtnrx_sc;
clustersz = sc->vtnet_rx_clustersz;
/*
* Need to offset the mbuf if the header we're going to add will
* misalign, account for that here.
*/
if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0)
clustersz -= VTNET_ETHER_ALIGN;
m_prev = NULL;
m_tail = NULL;
nreplace = 0;
m = m0;
len = len0;
/*
* Since these mbuf chains are so large, avoid allocating a complete
* replacement when the received frame did not consume the entire
* chain. Unused mbufs are moved to the tail of the replacement mbuf.
*/
while (len > 0) {
if (m == NULL) {
sc->vtnet_stats.rx_frame_too_large++;
return (EMSGSIZE);
}
/*
* Every mbuf should have the expected cluster size since that
* is also used to allocate the replacements.
*/
KASSERT(m->m_len == clustersz,
("%s: mbuf size %d not expected cluster size %d", __func__,
m->m_len, clustersz));
m->m_len = MIN(m->m_len, len);
len -= m->m_len;
m_prev = m;
m = m->m_next;
nreplace++;
}
KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
("%s: invalid replacement mbuf count %d max %d", __func__,
nreplace, sc->vtnet_rx_nmbufs));
m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
if (m_new == NULL) {
m_prev->m_len = clustersz;
return (ENOBUFS);
}
/*
* Move any unused mbufs from the received mbuf chain onto the
* end of the replacement chain.
*/
if (m_prev->m_next != NULL) {
m_tail->m_next = m_prev->m_next;
m_prev->m_next = NULL;
}
error = vtnet_rxq_enqueue_buf(rxq, m_new);
if (error) {
/*
* The replacement is suppose to be an copy of the one
* dequeued so this is a very unexpected error.
*
* Restore the m0 chain to the original state if it was
* modified so we can then discard it.
*/
if (m_tail->m_next != NULL) {
m_prev->m_next = m_tail->m_next;
m_tail->m_next = NULL;
}
m_prev->m_len = clustersz;
sc->vtnet_stats.rx_enq_replacement_failed++;
m_freem(m_new);
}
return (error);
}
static int
vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
{
struct vtnet_softc *sc;
struct mbuf *m_new;
int error;
sc = rxq->vtnrx_sc;
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
MPASS(m->m_next == NULL);
if (m->m_len < len)
return (EMSGSIZE);
m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
if (m_new == NULL)
return (ENOBUFS);
error = vtnet_rxq_enqueue_buf(rxq, m_new);
if (error) {
sc->vtnet_stats.rx_enq_replacement_failed++;
m_freem(m_new);
} else
m->m_len = len;
return (error);
}
static int
vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
{
struct vtnet_softc *sc;
struct sglist *sg;
int header_inlined, error;
sc = rxq->vtnrx_sc;
sg = rxq->vtnrx_sg;
KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
("%s: mbuf chain without LRO_NOMRG", __func__));
VTNET_RXQ_LOCK_ASSERT(rxq);
sglist_reset(sg);
header_inlined = vtnet_modern(sc) ||
(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
/*
* Note: The mbuf has been already adjusted when we allocate it if we
* have to do strict alignment.
*/
if (header_inlined)
error = sglist_append_mbuf(sg, m);
else {
struct vtnet_rx_header *rxhdr =
mtod(m, struct vtnet_rx_header *);
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
/* Append the header and remaining mbuf data. */
error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
if (error)
return (error);
error = sglist_append(sg, &rxhdr[1],
m->m_len - sizeof(struct vtnet_rx_header));
if (error)
return (error);
if (m->m_next != NULL)
error = sglist_append_mbuf(sg, m->m_next);
}
if (error)
return (error);
return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
}
static int
vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
{
struct vtnet_softc *sc;
struct mbuf *m;
int error;
sc = rxq->vtnrx_sc;
m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
if (m == NULL)
return (ENOBUFS);
error = vtnet_rxq_enqueue_buf(rxq, m);
if (error)
m_freem(m);
return (error);
}
static int
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
int hoff, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
int error;
sc = rxq->vtnrx_sc;
/*
* NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
* not have an analogous CSUM flag. The checksum has been validated,
* but is incomplete (TCP/UDP pseudo header).
*
* The packet is likely from another VM on the same host that itself
* performed checksum offloading so Tx/Rx is basically a memcpy and
* the checksum has little value.
*
* Default to receiving the packet as-is for performance reasons, but
* this can cause issues if the packet is to be forwarded because it
* does not contain a valid checksum. This patch may be helpful:
* https://reviews.freebsd.org/D6611. In the meantime, have the driver
* compute the checksum if requested.
*
* BMV: Need to add an CSUM_PARTIAL flag?
*/
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
return (error);
}
/*
* Compute the checksum in the driver so the packet will contain a
* valid checksum. The checksum is at csum_offset from csum_start.
*/
switch (etype) {
#if defined(INET) || defined(INET6)
case ETHERTYPE_IP:
case ETHERTYPE_IPV6: {
int csum_off, csum_end;
uint16_t csum;
csum_off = hdr->csum_start + hdr->csum_offset;
csum_end = csum_off + sizeof(uint16_t);
/* Assume checksum will be in the first mbuf. */
if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
return (1);
/*
* Like in_delayed_cksum()/in6_delayed_cksum(), compute the
* checksum and write it at the specified offset. We could
* try to verify the packet: csum_start should probably
* correspond to the start of the TCP/UDP header.
*
* BMV: Need to properly handle UDP with zero checksum. Is
* the IPv4 header checksum implicitly validated?
*/
csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
*(uint16_t *)(mtodo(m, csum_off)) = csum;
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
}
#endif
default:
sc->vtnet_stats.rx_csum_bad_ethtype++;
return (1);
}
return (0);
}
static int
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused)
{
#if 0
struct vtnet_softc *sc;
#endif
int protocol;
#if 0
sc = rxq->vtnrx_sc;
#endif
switch (etype) {
#if defined(INET)
case ETHERTYPE_IP:
if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
protocol = IPPROTO_DONE;
else {
struct ip *ip = (struct ip *)(m->m_data + hoff);
protocol = ip->ip_p;
}
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
|| ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
protocol = IPPROTO_DONE;
break;
#endif
default:
protocol = IPPROTO_DONE;
break;
}
switch (protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xFFFF;
break;
default:
/*
* FreeBSD does not support checksum offloading of this
* protocol. Let the stack re-verify the checksum later
* if the protocol is supported.
*/
#if 0
if_printf(sc->vtnet_ifp,
"%s: checksum offload of unsupported protocol "
"etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
__func__, etype, protocol, hdr->csum_start,
hdr->csum_offset);
#endif
break;
}
return (0);
}
static int
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
const struct ether_header *eh;
int hoff;
uint16_t etype;
eh = mtod(m, const struct ether_header *);
etype = ntohs(eh->ether_type);
if (etype == ETHERTYPE_VLAN) {
/* TODO BMV: Handle QinQ. */
const struct ether_vlan_header *evh =
mtod(m, const struct ether_vlan_header *);
etype = ntohs(evh->evl_proto);
hoff = sizeof(struct ether_vlan_header);
} else
hoff = sizeof(struct ether_header);
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
else /* VIRTIO_NET_HDR_F_DATA_VALID */
return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
}
static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
{
struct mbuf *m;
while (--nbufs > 0) {
m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
if (m == NULL)
break;
vtnet_rxq_discard_buf(rxq, m);
}
}
static void
vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
{
int error __diagused;
/*
* Requeue the discarded mbuf. This should always be successful
* since it was just dequeued.
*/
error = vtnet_rxq_enqueue_buf(rxq, m);
KASSERT(error == 0,
("%s: cannot requeue discarded mbuf %d", __func__, error));
}
static int
vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct mbuf *m_tail;
sc = rxq->vtnrx_sc;
vq = rxq->vtnrx_vq;
m_tail = m_head;
while (--nbufs > 0) {
struct mbuf *m;
uint32_t len;
m = virtqueue_dequeue(vq, &len);
if (m == NULL) {
rxq->vtnrx_stats.vrxs_ierrors++;
goto fail;
}
if (vtnet_rxq_new_buf(rxq) != 0) {
rxq->vtnrx_stats.vrxs_iqdrops++;
vtnet_rxq_discard_buf(rxq, m);
if (nbufs > 1)
vtnet_rxq_discard_merged_bufs(rxq, nbufs);
goto fail;
}
if (m->m_len < len)
len = m->m_len;
m->m_len = len;
m->m_flags &= ~M_PKTHDR;
m_head->m_pkthdr.len += len;
m_tail->m_next = m;
m_tail = m;
}
return (0);
fail:
sc->vtnet_stats.rx_mergeable_failed++;
m_freem(m_head);
return (1);
}
#if defined(INET) || defined(INET6)
static int
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
{
struct lro_ctrl *lro;
lro = &rxq->vtnrx_lro;
if (lro->lro_mbuf_max != 0) {
tcp_lro_queue_mbuf(lro, m);
return (0);
}
return (tcp_lro_rx(lro, m, 0));
}
#endif
static void
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
if_t ifp;
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
struct ether_header *eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
vtnet_vlan_tag_remove(m);
/*
* With the 802.1Q header removed, update the
* checksum starting location accordingly.
*/
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
}
}
m->m_pkthdr.flowid = rxq->vtnrx_id;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
if (hdr->flags &
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
if (vtnet_rxq_csum(rxq, m, hdr) == 0)
rxq->vtnrx_stats.vrxs_csum++;
else
rxq->vtnrx_stats.vrxs_csum_failed++;
}
if (hdr->gso_size != 0) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
m->m_pkthdr.lro_nsegs =
howmany(m->m_pkthdr.len, hdr->gso_size);
rxq->vtnrx_stats.vrxs_host_lro++;
break;
}
}
rxq->vtnrx_stats.vrxs_ipackets++;
rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
#if defined(INET) || defined(INET6)
if (vtnet_software_lro(sc) && if_getcapenable(ifp) & IFCAP_LRO) {
if (vtnet_lro_rx(rxq, m) == 0)
return;
}
#endif
if_input(ifp, m);
}
static int
vtnet_rxq_eof(struct vtnet_rxq *rxq)
{
struct virtio_net_hdr lhdr, *hdr;
struct vtnet_softc *sc;
if_t ifp;
struct virtqueue *vq;
int deq, count;
sc = rxq->vtnrx_sc;
vq = rxq->vtnrx_vq;
ifp = sc->vtnet_ifp;
deq = 0;
count = sc->vtnet_rx_process_limit;
VTNET_RXQ_LOCK_ASSERT(rxq);
CURVNET_SET(if_getvnet(ifp));
while (count-- > 0) {
struct mbuf *m;
uint32_t len, nbufs, adjsz;
m = virtqueue_dequeue(vq, &len);
if (m == NULL)
break;
deq++;
if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
rxq->vtnrx_stats.vrxs_ierrors++;
vtnet_rxq_discard_buf(rxq, m);
continue;
}
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
struct virtio_net_hdr_mrg_rxbuf *mhdr =
mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
kmsan_mark(mhdr, sizeof(*mhdr), KMSAN_STATE_INITED);
nbufs = vtnet_htog16(sc, mhdr->num_buffers);
adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else if (vtnet_modern(sc)) {
nbufs = 1; /* num_buffers is always 1 */
adjsz = sizeof(struct virtio_net_hdr_v1);
} else {
nbufs = 1;
adjsz = sizeof(struct vtnet_rx_header);
/*
* Account for our gap between the header and start of
* data to keep the segments separated.
*/
len += VTNET_RX_HEADER_PAD;
}
if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
rxq->vtnrx_stats.vrxs_iqdrops++;
vtnet_rxq_discard_buf(rxq, m);
if (nbufs > 1)
vtnet_rxq_discard_merged_bufs(rxq, nbufs);
continue;
}
m->m_pkthdr.len = len;
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.csum_flags = 0;
if (nbufs > 1) {
/* Dequeue the rest of chain. */
if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
continue;
}
kmsan_mark_mbuf(m, KMSAN_STATE_INITED);
/*
* Save an endian swapped version of the header prior to it
* being stripped. The header is always at the start of the
* mbuf data. num_buffers was already saved (and not needed)
* so use the standard header.
*/
hdr = mtod(m, struct virtio_net_hdr *);
lhdr.flags = hdr->flags;
lhdr.gso_type = hdr->gso_type;
lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
m_adj(m, adjsz);
if (PFIL_HOOKED_IN(sc->vtnet_pfil)) {
pfil_return_t pfil;
pfil = pfil_mbuf_in(sc->vtnet_pfil, &m, ifp, NULL);
switch (pfil) {
case PFIL_DROPPED:
case PFIL_CONSUMED:
continue;
default:
KASSERT(pfil == PFIL_PASS,
("Filter returned %d!", pfil));
}
}
vtnet_rxq_input(rxq, m, &lhdr);
}
if (deq > 0) {
#if defined(INET) || defined(INET6)
if (vtnet_software_lro(sc))
tcp_lro_flush_all(&rxq->vtnrx_lro);
#endif
virtqueue_notify(vq);
}
CURVNET_RESTORE();
return (count > 0 ? 0 : EAGAIN);
}
static void
vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries)
{
struct vtnet_softc *sc;
if_t ifp;
u_int more;
#ifdef DEV_NETMAP
int nmirq;
#endif /* DEV_NETMAP */
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
/*
* Ignore this interrupt. Either this is a spurious interrupt
* or multiqueue without per-VQ MSIX so every queue needs to
* be polled (a brain dead configuration we could try harder
* to avoid).
*/
vtnet_rxq_disable_intr(rxq);
return;
}
VTNET_RXQ_LOCK(rxq);
#ifdef DEV_NETMAP
/*
* We call netmap_rx_irq() under lock to prevent concurrent calls.
* This is not necessary to serialize the access to the RX vq, but
* rather to avoid races that may happen if this interface is
* attached to a VALE switch, which would cause received packets
* to stall in the RX queue (nm_kr_tryget() could find the kring
* busy when called from netmap_bwrap_intr_notify()).
*/
nmirq = netmap_rx_irq(ifp, rxq->vtnrx_id, &more);
if (nmirq != NM_IRQ_PASS) {
VTNET_RXQ_UNLOCK(rxq);
if (nmirq == NM_IRQ_RESCHED) {
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
}
return;
}
#endif /* DEV_NETMAP */
again:
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
VTNET_RXQ_UNLOCK(rxq);
return;
}
more = vtnet_rxq_eof(rxq);
if (more || vtnet_rxq_enable_intr(rxq) != 0) {
if (!more)
vtnet_rxq_disable_intr(rxq);
/*
* This is an occasional condition or race (when !more),
* so retry a few times before scheduling the taskqueue.
*/
if (tries-- > 0)
goto again;
rxq->vtnrx_stats.vrxs_rescheduled++;
VTNET_RXQ_UNLOCK(rxq);
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
} else
VTNET_RXQ_UNLOCK(rxq);
}
static void
vtnet_rx_vq_intr(void *xrxq)
{
struct vtnet_rxq *rxq;
rxq = xrxq;
vtnet_rx_vq_process(rxq, VTNET_INTR_DISABLE_RETRIES);
}
static void
vtnet_rxq_tq_intr(void *xrxq, int pending __unused)
{
struct vtnet_rxq *rxq;
rxq = xrxq;
vtnet_rx_vq_process(rxq, 0);
}
static int
vtnet_txq_intr_threshold(struct vtnet_txq *txq)
{
struct vtnet_softc *sc;
int threshold;
sc = txq->vtntx_sc;
/*
* The Tx interrupt is disabled until the queue free count falls
* below our threshold. Completed frames are drained from the Tx
* virtqueue before transmitting new frames and in the watchdog
* callout, so the frequency of Tx interrupts is greatly reduced,
* at the cost of not freeing mbufs as quickly as they otherwise
* would be.
*/
threshold = virtqueue_size(txq->vtntx_vq) / 4;
/*
* Without indirect descriptors, leave enough room for the most
* segments we handle.
*/
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
threshold < sc->vtnet_tx_nsegs)
threshold = sc->vtnet_tx_nsegs;
return (threshold);
}
static int
vtnet_txq_below_threshold(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
}
static int
vtnet_txq_notify(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
virtqueue_notify(vq);
if (vtnet_txq_enable_intr(txq) == 0)
return (0);
/*
* Drain frames that were completed since last checked. If this
* causes the queue to go above the threshold, the caller should
* continue transmitting.
*/
if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
virtqueue_disable_intr(vq);
return (1);
}
return (0);
}
static void
vtnet_txq_free_mbufs(struct vtnet_txq *txq)
{
struct virtqueue *vq;
struct vtnet_tx_header *txhdr;
int last;
#ifdef DEV_NETMAP
struct netmap_kring *kring = netmap_kring_on(NA(txq->vtntx_sc->vtnet_ifp),
txq->vtntx_id, NR_TX);
#else /* !DEV_NETMAP */
void *kring = NULL;
#endif /* !DEV_NETMAP */
vq = txq->vtntx_vq;
last = 0;
while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
if (kring == NULL) {
m_freem(txhdr->vth_mbuf);
uma_zfree(vtnet_tx_header_zone, txhdr);
}
}
KASSERT(virtqueue_empty(vq),
("%s: mbufs remaining in tx queue %p", __func__, txq));
}
/*
* BMV: This can go away once we finally have offsets in the mbuf header.
*/
static int
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
int *proto, int *start)
{
struct vtnet_softc *sc;
struct ether_vlan_header *evh;
#if defined(INET) || defined(INET6)
int offset;
#endif
sc = txq->vtntx_sc;
evh = mtod(m, struct ether_vlan_header *);
if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
/* BMV: We should handle nested VLAN tags too. */
*etype = ntohs(evh->evl_proto);
#if defined(INET) || defined(INET6)
offset = sizeof(struct ether_vlan_header);
#endif
} else {
*etype = ntohs(evh->evl_encap_proto);
#if defined(INET) || defined(INET6)
offset = sizeof(struct ether_header);
#endif
}
switch (*etype) {
#if defined(INET)
case ETHERTYPE_IP: {
struct ip *ip, iphdr;
if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
m_copydata(m, offset, sizeof(struct ip),
(caddr_t) &iphdr);
ip = &iphdr;
} else
ip = (struct ip *)(m->m_data + offset);
*proto = ip->ip_p;
*start = offset + (ip->ip_hl << 2);
break;
}
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
*proto = -1;
*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
/* Assert the network stack sent us a valid packet. */
KASSERT(*start > offset,
("%s: mbuf %p start %d offset %d proto %d", __func__, m,
*start, offset, *proto));
break;
#endif
default:
sc->vtnet_stats.tx_csum_unknown_ethtype++;
return (EINVAL);
}
return (0);
}
static int
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
int offset, struct virtio_net_hdr *hdr)
{
static struct timeval lastecn;
static int curecn;
struct vtnet_softc *sc;
struct tcphdr *tcp, tcphdr;
sc = txq->vtntx_sc;
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
tcp = &tcphdr;
} else
tcp = (struct tcphdr *)(m->m_data + offset);
hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
VIRTIO_NET_HDR_GSO_TCPV6;
if (__predict_false(tcp->th_flags & TH_CWR)) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
* FreeBSD, ECN support is not on a per-interface basis,
* but globally via the net.inet.tcp.ecn.enable sysctl
* knob. The default is off.
*/
if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
if (ppsratecheck(&lastecn, &curecn, 1))
if_printf(sc->vtnet_ifp,
"TSO with ECN not negotiated with host\n");
return (ENOTSUP);
}
hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
}
txq->vtntx_stats.vtxs_tso++;
return (0);
}
static struct mbuf *
vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
int flags, etype, csum_start, proto, error;
sc = txq->vtntx_sc;
flags = m->m_pkthdr.csum_flags;
error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
if (error)
goto drop;
if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
/* Sanity check the parsed mbuf matches the offload flags. */
if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
&& etype != ETHERTYPE_IPV6))) {
sc->vtnet_stats.tx_csum_proto_mismatch++;
goto drop;
}
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = vtnet_gtoh16(sc, csum_start);
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
txq->vtntx_stats.vtxs_csum++;
}
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
/*
* Sanity check the parsed mbuf IP protocol is TCP, and
* VirtIO TSO reqires the checksum offloading above.
*/
if (__predict_false(proto != IPPROTO_TCP)) {
sc->vtnet_stats.tx_tso_not_tcp++;
goto drop;
} else if (__predict_false((hdr->flags &
VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
sc->vtnet_stats.tx_tso_without_csum++;
goto drop;
}
error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
if (error)
goto drop;
}
return (m);
drop:
m_freem(m);
return (NULL);
}
static int
vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
struct vtnet_tx_header *txhdr)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct sglist *sg;
struct mbuf *m;
int error;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
sg = txq->vtntx_sg;
m = *m_head;
sglist_reset(sg);
error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
if (error != 0 || sg->sg_nseg != 1) {
KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
__func__, error, sg->sg_nseg));
goto fail;
}
error = sglist_append_mbuf(sg, m);
if (error) {
m = m_defrag(m, M_NOWAIT);
if (m == NULL)
goto fail;
*m_head = m;
sc->vtnet_stats.tx_defragged++;
error = sglist_append_mbuf(sg, m);
if (error)
goto fail;
}
txhdr->vth_mbuf = m;
error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
return (error);
fail:
sc->vtnet_stats.tx_defrag_failed++;
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
static int
vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
{
struct vtnet_tx_header *txhdr;
struct virtio_net_hdr *hdr;
struct mbuf *m;
int error;
m = *m_head;
M_ASSERTPKTHDR(m);
txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
if (txhdr == NULL) {
m_freem(m);
*m_head = NULL;
return (ENOMEM);
}
/*
* Always use the non-mergeable header, regardless if mergable headers
* were negotiated, because for transmit num_buffers is always zero.
* The vtnet_hdr_size is used to enqueue the right header size segment.
*/
hdr = &txhdr->vth_uhdr.hdr;
if (m->m_flags & M_VLANTAG) {
m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
goto fail;
}
m->m_flags &= ~M_VLANTAG;
}
if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
goto fail;
}
}
error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
fail:
if (error)
uma_zfree(vtnet_tx_header_zone, txhdr);
return (error);
}
#ifdef VTNET_LEGACY_TX
static void
vtnet_start_locked(struct vtnet_txq *txq, if_t ifp)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct mbuf *m0;
int tries, enq;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
tries = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
sc->vtnet_link_active == 0)
return;
vtnet_txq_eof(txq);
again:
enq = 0;
while (!if_sendq_empty(ifp)) {
if (virtqueue_full(vq))
break;
m0 = if_dequeue(ifp);
if (m0 == NULL)
break;
if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
if (m0 != NULL)
if_sendq_prepend(ifp, m0);
break;
}
enq++;
ETHER_BPF_MTAP(ifp, m0);
}
if (enq > 0 && vtnet_txq_notify(txq) != 0) {
if (tries++ < VTNET_NOTIFY_RETRIES)
goto again;
txq->vtntx_stats.vtxs_rescheduled++;
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
}
}
static void
vtnet_start(if_t ifp)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
sc = if_getsoftc(ifp);
txq = &sc->vtnet_txqs[0];
VTNET_TXQ_LOCK(txq);
vtnet_start_locked(txq, ifp);
VTNET_TXQ_UNLOCK(txq);
}
#else /* !VTNET_LEGACY_TX */
static int
vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
{
struct vtnet_softc *sc;
struct virtqueue *vq;
struct buf_ring *br;
if_t ifp;
int enq, tries, error;
sc = txq->vtntx_sc;
vq = txq->vtntx_vq;
br = txq->vtntx_br;
ifp = sc->vtnet_ifp;
tries = 0;
error = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
sc->vtnet_link_active == 0) {
if (m != NULL)
error = drbr_enqueue(ifp, br, m);
return (error);
}
if (m != NULL) {
error = drbr_enqueue(ifp, br, m);
if (error)
return (error);
}
vtnet_txq_eof(txq);
again:
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
if (virtqueue_full(vq)) {
drbr_putback(ifp, br, m);
break;
}
if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
if (m != NULL)
drbr_putback(ifp, br, m);
else
drbr_advance(ifp, br);
break;
}
drbr_advance(ifp, br);
enq++;
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0 && vtnet_txq_notify(txq) != 0) {
if (tries++ < VTNET_NOTIFY_RETRIES)
goto again;
txq->vtntx_stats.vtxs_rescheduled++;
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
}
return (0);
}
static int
vtnet_txq_mq_start(if_t ifp, struct mbuf *m)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
int i, npairs, error;
sc = if_getsoftc(ifp);
npairs = sc->vtnet_act_vq_pairs;
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
i = m->m_pkthdr.flowid % npairs;
else
i = curcpu % npairs;
txq = &sc->vtnet_txqs[i];
if (VTNET_TXQ_TRYLOCK(txq) != 0) {
error = vtnet_txq_mq_start_locked(txq, m);
VTNET_TXQ_UNLOCK(txq);
} else {
error = drbr_enqueue(ifp, txq->vtntx_br, m);
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
}
return (error);
}
static void
vtnet_txq_tq_deferred(void *xtxq, int pending __unused)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
txq = xtxq;
sc = txq->vtntx_sc;
VTNET_TXQ_LOCK(txq);
if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
vtnet_txq_mq_start_locked(txq, NULL);
VTNET_TXQ_UNLOCK(txq);
}
#endif /* VTNET_LEGACY_TX */
static void
vtnet_txq_start(struct vtnet_txq *txq)
{
struct vtnet_softc *sc;
if_t ifp;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
#ifdef VTNET_LEGACY_TX
if (!if_sendq_empty(ifp))
vtnet_start_locked(txq, ifp);
#else
if (!drbr_empty(ifp, txq->vtntx_br))
vtnet_txq_mq_start_locked(txq, NULL);
#endif
}
static void
vtnet_txq_tq_intr(void *xtxq, int pending __unused)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
if_t ifp;
txq = xtxq;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
VTNET_TXQ_LOCK(txq);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
VTNET_TXQ_UNLOCK(txq);
return;
}
vtnet_txq_eof(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
static int
vtnet_txq_eof(struct vtnet_txq *txq)
{
struct virtqueue *vq;
struct vtnet_tx_header *txhdr;
struct mbuf *m;
int deq;
vq = txq->vtntx_vq;
deq = 0;
VTNET_TXQ_LOCK_ASSERT(txq);
while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
m = txhdr->vth_mbuf;
deq++;
txq->vtntx_stats.vtxs_opackets++;
txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
if (m->m_flags & M_MCAST)
txq->vtntx_stats.vtxs_omcasts++;
m_freem(m);
uma_zfree(vtnet_tx_header_zone, txhdr);
}
if (virtqueue_empty(vq))
txq->vtntx_watchdog = 0;
return (deq);
}
static void
vtnet_tx_vq_intr(void *xtxq)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
if_t ifp;
txq = xtxq;
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
/*
* Ignore this interrupt. Either this is a spurious interrupt
* or multiqueue without per-VQ MSIX so every queue needs to
* be polled (a brain dead configuration we could try harder
* to avoid).
*/
vtnet_txq_disable_intr(txq);
return;
}
#ifdef DEV_NETMAP
if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
return;
#endif /* DEV_NETMAP */
VTNET_TXQ_LOCK(txq);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
VTNET_TXQ_UNLOCK(txq);
return;
}
vtnet_txq_eof(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
static void
vtnet_tx_start_all(struct vtnet_softc *sc)
{
struct vtnet_txq *txq;
int i;
VTNET_CORE_LOCK_ASSERT(sc);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
vtnet_txq_start(txq);
VTNET_TXQ_UNLOCK(txq);
}
}
#ifndef VTNET_LEGACY_TX
static void
vtnet_qflush(if_t ifp)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
struct mbuf *m;
int i;
sc = if_getsoftc(ifp);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
m_freem(m);
VTNET_TXQ_UNLOCK(txq);
}
if_qflush(ifp);
}
#endif
static int
vtnet_watchdog(struct vtnet_txq *txq)
{
if_t ifp;
ifp = txq->vtntx_sc->vtnet_ifp;
VTNET_TXQ_LOCK(txq);
if (txq->vtntx_watchdog == 1) {
/*
* Only drain completed frames if the watchdog is about to
* expire. If any frames were drained, there may be enough
* free descriptors now available to transmit queued frames.
* In that case, the timer will immediately be decremented
* below, but the timeout is generous enough that should not
* be a problem.
*/
if (vtnet_txq_eof(txq) != 0)
vtnet_txq_start(txq);
}
if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
VTNET_TXQ_UNLOCK(txq);
return (0);
}
VTNET_TXQ_UNLOCK(txq);
if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
return (1);
}
static void
vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
struct vtnet_txq_stats *txacc)
{
bzero(rxacc, sizeof(struct vtnet_rxq_stats));
bzero(txacc, sizeof(struct vtnet_txq_stats));
for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
struct vtnet_rxq_stats *rxst;
struct vtnet_txq_stats *txst;
rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
rxacc->vrxs_csum += rxst->vrxs_csum;
rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
txst = &sc->vtnet_txqs[i].vtntx_stats;
txacc->vtxs_opackets += txst->vtxs_opackets;
txacc->vtxs_obytes += txst->vtxs_obytes;
txacc->vtxs_csum += txst->vtxs_csum;
txacc->vtxs_tso += txst->vtxs_tso;
txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
}
}
static uint64_t
vtnet_get_counter(if_t ifp, ift_counter cnt)
{
struct vtnet_softc *sc;
struct vtnet_rxq_stats rxaccum;
struct vtnet_txq_stats txaccum;
sc = if_getsoftc(ifp);
vtnet_accum_stats(sc, &rxaccum, &txaccum);
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (rxaccum.vrxs_ipackets);
case IFCOUNTER_IQDROPS:
return (rxaccum.vrxs_iqdrops);
case IFCOUNTER_IERRORS:
return (rxaccum.vrxs_ierrors);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
#ifndef VTNET_LEGACY_TX
case IFCOUNTER_OBYTES:
return (txaccum.vtxs_obytes);
case IFCOUNTER_OMCASTS:
return (txaccum.vtxs_omcasts);
#endif
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
vtnet_tick(void *xsc)
{
struct vtnet_softc *sc;
if_t ifp;
int i, timedout;
sc = xsc;
ifp = sc->vtnet_ifp;
timedout = 0;
VTNET_CORE_LOCK_ASSERT(sc);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
if (timedout != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vtnet_init_locked(sc, 0);
} else
callout_schedule(&sc->vtnet_tick_ch, hz);
}
static void
vtnet_start_taskqueues(struct vtnet_softc *sc)
{
device_t dev;
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i, error;
dev = sc->vtnet_dev;
/*
* Errors here are very difficult to recover from - we cannot
* easily fail because, if this is during boot, we will hang
* when freeing any successfully started taskqueues because
* the scheduler isn't up yet.
*
* Most drivers just ignore the return value - it only fails
* with ENOMEM so an error is not likely.
*/
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
"%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
if (error) {
device_printf(dev, "failed to start rx taskq %d\n",
rxq->vtnrx_id);
}
txq = &sc->vtnet_txqs[i];
error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
"%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
if (error) {
device_printf(dev, "failed to start tx taskq %d\n",
txq->vtntx_id);
}
}
}
static void
vtnet_free_taskqueues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (rxq->vtnrx_tq != NULL) {
taskqueue_free(rxq->vtnrx_tq);
rxq->vtnrx_tq = NULL;
}
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_free(txq->vtntx_tq);
txq->vtntx_tq = NULL;
}
}
}
static void
vtnet_drain_taskqueues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (rxq->vtnrx_tq != NULL)
taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
#ifndef VTNET_LEGACY_TX
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
#endif
}
}
}
static void
vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
vtnet_rxq_free_mbufs(rxq);
txq = &sc->vtnet_txqs[i];
vtnet_txq_free_mbufs(txq);
}
}
static void
vtnet_stop_rendezvous(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
struct vtnet_txq *txq;
int i;
VTNET_CORE_LOCK_ASSERT(sc);
/*
* Lock and unlock the per-queue mutex so we known the stop
* state is visible. Doing only the active queues should be
* sufficient, but it does not cost much extra to do all the
* queues.
*/
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
VTNET_RXQ_LOCK(rxq);
VTNET_RXQ_UNLOCK(rxq);
txq = &sc->vtnet_txqs[i];
VTNET_TXQ_LOCK(txq);
VTNET_TXQ_UNLOCK(txq);
}
}
static void
vtnet_stop(struct vtnet_softc *sc)
{
device_t dev;
if_t ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
sc->vtnet_link_active = 0;
callout_stop(&sc->vtnet_tick_ch);
/* Only advisory. */
vtnet_disable_interrupts(sc);
#ifdef DEV_NETMAP
/* Stop any pending txsync/rxsync and disable them. */
netmap_disable_all_rings(ifp);
#endif /* DEV_NETMAP */
/*
* Stop the host adapter. This resets it to the pre-initialized
* state. It will not generate any interrupts until after it is
* reinitialized.
*/
virtio_stop(dev);
vtnet_stop_rendezvous(sc);
vtnet_drain_rxtx_queues(sc);
sc->vtnet_act_vq_pairs = 1;
}
static int
vtnet_virtio_reinit(struct vtnet_softc *sc)
{
device_t dev;
if_t ifp;
uint64_t features;
int error;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
features = sc->vtnet_negotiated_features;
/*
* Re-negotiate with the host, removing any disabled receive
* features. Transmit features are disabled only on our side
* via if_capenable and if_hwassist.
*/
if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
if ((if_getcapenable(ifp) & IFCAP_LRO) == 0)
features &= ~VTNET_LRO_FEATURES;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
features &= ~VIRTIO_NET_F_CTRL_VLAN;
error = virtio_reinit(dev, features);
if (error) {
device_printf(dev, "virtio reinit error %d\n", error);
return (error);
}
sc->vtnet_features = features;
virtio_reinit_complete(dev);
return (0);
}
static void
vtnet_init_rx_filters(struct vtnet_softc *sc)
{
if_t ifp;
ifp = sc->vtnet_ifp;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
vtnet_rx_filter(sc);
vtnet_rx_filter_mac(sc);
}
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
vtnet_rx_filter_vlan(sc);
}
static int
vtnet_init_rx_queues(struct vtnet_softc *sc)
{
device_t dev;
if_t ifp;
struct vtnet_rxq *rxq;
int i, clustersz, error;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
clustersz = vtnet_rx_cluster_size(sc, if_getmtu(ifp));
sc->vtnet_rx_clustersz = clustersz;
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
VTNET_MAX_RX_SIZE, clustersz);
KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
("%s: too many rx mbufs %d for %d segments", __func__,
sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
} else
sc->vtnet_rx_nmbufs = 1;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
/* Hold the lock to satisfy asserts. */
VTNET_RXQ_LOCK(rxq);
error = vtnet_rxq_populate(rxq);
VTNET_RXQ_UNLOCK(rxq);
if (error) {
device_printf(dev, "cannot populate Rx queue %d\n", i);
return (error);
}
}
return (0);
}
static int
vtnet_init_tx_queues(struct vtnet_softc *sc)
{
struct vtnet_txq *txq;
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
txq = &sc->vtnet_txqs[i];
txq->vtntx_watchdog = 0;
txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
#ifdef DEV_NETMAP
netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0);
#endif /* DEV_NETMAP */
}
return (0);
}
static int
vtnet_init_rxtx_queues(struct vtnet_softc *sc)
{
int error;
error = vtnet_init_rx_queues(sc);
if (error)
return (error);
error = vtnet_init_tx_queues(sc);
if (error)
return (error);
return (0);
}
static void
vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
{
device_t dev;
int npairs;
dev = sc->vtnet_dev;
if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
sc->vtnet_act_vq_pairs = 1;
return;
}
npairs = sc->vtnet_req_vq_pairs;
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
device_printf(dev, "cannot set active queue pairs to %d, "
"falling back to 1 queue pair\n", npairs);
npairs = 1;
}
sc->vtnet_act_vq_pairs = npairs;
}
static void
vtnet_update_rx_offloads(struct vtnet_softc *sc)
{
if_t ifp;
uint64_t features;
int error;
ifp = sc->vtnet_ifp;
features = sc->vtnet_features;
VTNET_CORE_LOCK_ASSERT(sc);
if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
features |= VIRTIO_NET_F_GUEST_CSUM;
else
features &= ~VIRTIO_NET_F_GUEST_CSUM;
}
if (if_getcapabilities(ifp) & IFCAP_LRO && !vtnet_software_lro(sc)) {
if (if_getcapenable(ifp) & IFCAP_LRO)
features |= VTNET_LRO_FEATURES;
else
features &= ~VTNET_LRO_FEATURES;
}
error = vtnet_ctrl_guest_offloads(sc,
features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN |
VIRTIO_NET_F_GUEST_UFO));
if (error) {
device_printf(sc->vtnet_dev,
"%s: cannot update Rx features\n", __func__);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vtnet_init_locked(sc, 0);
}
} else
sc->vtnet_features = features;
}
static int
vtnet_reinit(struct vtnet_softc *sc)
{
if_t ifp;
int error;
ifp = sc->vtnet_ifp;
bcopy(if_getlladdr(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
error = vtnet_virtio_reinit(sc);
if (error)
return (error);
vtnet_set_macaddr(sc);
vtnet_set_active_vq_pairs(sc);
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
vtnet_init_rx_filters(sc);
if_sethwassist(ifp, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD_IPV6, 0);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TSO6)
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
error = vtnet_init_rxtx_queues(sc);
if (error)
return (error);
return (0);
}
static void
vtnet_init_locked(struct vtnet_softc *sc, int init_mode)
{
if_t ifp;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
vtnet_stop(sc);
#ifdef DEV_NETMAP
/* Once stopped we can update the netmap flags, if necessary. */
switch (init_mode) {
case VTNET_INIT_NETMAP_ENTER:
nm_set_native_flags(NA(ifp));
break;
case VTNET_INIT_NETMAP_EXIT:
nm_clear_native_flags(NA(ifp));
break;
}
#endif /* DEV_NETMAP */
if (vtnet_reinit(sc) != 0) {
vtnet_stop(sc);
return;
}
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
vtnet_update_link_status(sc);
vtnet_enable_interrupts(sc);
callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
#ifdef DEV_NETMAP
/* Re-enable txsync/rxsync. */
netmap_enable_all_rings(ifp);
#endif /* DEV_NETMAP */
}
static void
vtnet_init(void *xsc)
{
struct vtnet_softc *sc;
sc = xsc;
VTNET_CORE_LOCK(sc);
vtnet_init_locked(sc, 0);
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
{
/*
* The control virtqueue is only polled and therefore it should
* already be empty.
*/
KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
}
static void
vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
struct sglist *sg, int readable, int writable)
{
struct virtqueue *vq;
vq = sc->vtnet_ctrl_vq;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
VTNET_CORE_LOCK_ASSERT(sc);
if (!virtqueue_empty(vq))
return;
/*
* Poll for the response, but the command is likely completed before
* returning from the notify.
*/
if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0) {
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
}
}
static int
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
s.hdr.class = VIRTIO_NET_CTRL_MAC;
s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint64_t offloads __aligned(8);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
s.offloads = vtnet_gtoh64(sc, offloads);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
struct virtio_net_ctrl_mq mq __aligned(2);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
s.hdr.class = VIRTIO_NET_CTRL_MQ;
s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, bool on)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint8_t onoff;
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
s.hdr.class = VIRTIO_NET_CTRL_RX;
s.hdr.cmd = cmd;
s.onoff = on;
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static int
vtnet_set_promisc(struct vtnet_softc *sc, bool on)
{
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
}
static int
vtnet_set_allmulti(struct vtnet_softc *sc, bool on)
{
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
}
static void
vtnet_rx_filter(struct vtnet_softc *sc)
{
device_t dev;
if_t ifp;
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
if (vtnet_set_promisc(sc, if_getflags(ifp) & IFF_PROMISC) != 0) {
device_printf(dev, "cannot %s promiscuous mode\n",
if_getflags(ifp) & IFF_PROMISC ? "enable" : "disable");
}
if (vtnet_set_allmulti(sc, if_getflags(ifp) & IFF_ALLMULTI) != 0) {
device_printf(dev, "cannot %s all-multicast mode\n",
if_getflags(ifp) & IFF_ALLMULTI ? "enable" : "disable");
}
}
static u_int
vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt)
{
struct vtnet_softc *sc = arg;
if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
return (0);
if (ucnt < VTNET_MAX_MAC_ENTRIES)
bcopy(LLADDR(sdl),
&sc->vtnet_mac_filter->vmf_unicast.macs[ucnt],
ETHER_ADDR_LEN);
return (1);
}
static u_int
vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
struct vtnet_mac_filter *filter = arg;
if (mcnt < VTNET_MAX_MAC_ENTRIES)
bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt],
ETHER_ADDR_LEN);
return (1);
}
static void
vtnet_rx_filter_mac(struct vtnet_softc *sc)
{
struct virtio_net_ctrl_hdr hdr __aligned(2);
struct vtnet_mac_filter *filter;
struct sglist_seg segs[4];
struct sglist sg;
if_t ifp;
bool promisc, allmulti;
u_int ucnt, mcnt;
int error;
uint8_t ack;
ifp = sc->vtnet_ifp;
filter = sc->vtnet_mac_filter;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
VTNET_CORE_LOCK_ASSERT(sc);
/* Unicast MAC addresses: */
ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc);
promisc = (ucnt > VTNET_MAX_MAC_ENTRIES);
if (promisc) {
ucnt = 0;
if_printf(ifp, "more than %d MAC addresses assigned, "
"falling back to promiscuous mode\n",
VTNET_MAX_MAC_ENTRIES);
}
/* Multicast MAC addresses: */
mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter);
allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES);
if (allmulti) {
mcnt = 0;
if_printf(ifp, "more than %d multicast MAC addresses "
"assigned, falling back to all-multicast mode\n",
VTNET_MAX_MAC_ENTRIES);
}
if (promisc && allmulti)
goto out;
filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
hdr.class = VIRTIO_NET_CTRL_MAC;
hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &filter->vmf_unicast,
sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
error |= sglist_append(&sg, &filter->vmf_multicast,
sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
error |= sglist_append(&sg, &ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
if (ack != VIRTIO_NET_OK)
if_printf(ifp, "error setting host MAC filter table\n");
out:
if (promisc != 0 && vtnet_set_promisc(sc, true) != 0)
if_printf(ifp, "cannot enable promiscuous mode\n");
if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0)
if_printf(ifp, "cannot enable all-multicast mode\n");
}
static int
vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
{
struct sglist_seg segs[3];
struct sglist sg;
struct {
struct virtio_net_ctrl_hdr hdr __aligned(2);
uint8_t pad1;
uint16_t tag __aligned(2);
uint8_t pad2;
uint8_t ack;
} s;
int error;
error = 0;
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
s.hdr.class = VIRTIO_NET_CTRL_VLAN;
s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
s.tag = vtnet_gtoh16(sc, tag);
s.ack = VIRTIO_NET_ERR;
sglist_init(&sg, nitems(segs), segs);
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
MPASS(error == 0 && sg.sg_nseg == nitems(segs));
if (error == 0)
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
}
static void
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
{
int i, bit;
uint32_t w;
uint16_t tag;
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
VTNET_CORE_LOCK_ASSERT(sc);
/* Enable the filter for each configured VLAN. */
for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
w = sc->vtnet_vlan_filter[i];
while ((bit = ffs(w) - 1) != -1) {
w &= ~(1 << bit);
tag = sizeof(w) * CHAR_BIT * i + bit;
if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
device_printf(sc->vtnet_dev,
"cannot enable VLAN %d filter\n", tag);
}
}
}
}
static void
vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
{
if_t ifp;
int idx, bit;
ifp = sc->vtnet_ifp;
idx = (tag >> 5) & 0x7F;
bit = tag & 0x1F;
if (tag == 0 || tag > 4095)
return;
VTNET_CORE_LOCK(sc);
if (add)
sc->vtnet_vlan_filter[idx] |= (1 << bit);
else
sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER &&
if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
vtnet_exec_vlan_filter(sc, add, tag) != 0) {
device_printf(sc->vtnet_dev,
"cannot %s VLAN %d %s the host filter table\n",
add ? "add" : "remove", tag, add ? "to" : "from");
}
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_register_vlan(void *arg, if_t ifp, uint16_t tag)
{
if (if_getsoftc(ifp) != arg)
return;
vtnet_update_vlan_filter(arg, 1, tag);
}
static void
vtnet_unregister_vlan(void *arg, if_t ifp, uint16_t tag)
{
if (if_getsoftc(ifp) != arg)
return;
vtnet_update_vlan_filter(arg, 0, tag);
}
static void
vtnet_update_speed_duplex(struct vtnet_softc *sc)
{
if_t ifp;
uint32_t speed;
ifp = sc->vtnet_ifp;
if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
return;
/* BMV: Ignore duplex. */
speed = virtio_read_dev_config_4(sc->vtnet_dev,
offsetof(struct virtio_net_config, speed));
if (speed != UINT32_MAX)
if_setbaudrate(ifp, IF_Mbps(speed));
}
static int
vtnet_is_link_up(struct vtnet_softc *sc)
{
uint16_t status;
if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
return (1);
status = virtio_read_dev_config_2(sc->vtnet_dev,
offsetof(struct virtio_net_config, status));
return ((status & VIRTIO_NET_S_LINK_UP) != 0);
}
static void
vtnet_update_link_status(struct vtnet_softc *sc)
{
if_t ifp;
int link;
ifp = sc->vtnet_ifp;
VTNET_CORE_LOCK_ASSERT(sc);
link = vtnet_is_link_up(sc);
/* Notify if the link status has changed. */
if (link != 0 && sc->vtnet_link_active == 0) {
vtnet_update_speed_duplex(sc);
sc->vtnet_link_active = 1;
if_link_state_change(ifp, LINK_STATE_UP);
} else if (link == 0 && sc->vtnet_link_active != 0) {
sc->vtnet_link_active = 0;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
static int
vtnet_ifmedia_upd(if_t ifp __unused)
{
return (EOPNOTSUPP);
}
static void
vtnet_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct vtnet_softc *sc;
sc = if_getsoftc(ifp);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
VTNET_CORE_LOCK(sc);
if (vtnet_is_link_up(sc) != 0) {
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
} else
ifmr->ifm_active |= IFM_NONE;
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_get_macaddr(struct vtnet_softc *sc)
{
if (sc->vtnet_flags & VTNET_FLAG_MAC) {
virtio_read_device_config_array(sc->vtnet_dev,
offsetof(struct virtio_net_config, mac),
&sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
} else {
/* Generate a random locally administered unicast address. */
sc->vtnet_hwaddr[0] = 0xB2;
arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
}
}
static void
vtnet_set_macaddr(struct vtnet_softc *sc)
{
device_t dev;
int error;
dev = sc->vtnet_dev;
if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
if (error)
device_printf(dev, "unable to set MAC address\n");
return;
}
/* MAC in config is read-only in modern VirtIO. */
if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
virtio_write_dev_config_1(dev,
offsetof(struct virtio_net_config, mac) + i,
sc->vtnet_hwaddr[i]);
}
}
}
static void
vtnet_attached_set_macaddr(struct vtnet_softc *sc)
{
/* Assign MAC address if it was generated. */
if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
vtnet_set_macaddr(sc);
}
static void
vtnet_vlan_tag_remove(struct mbuf *m)
{
struct ether_vlan_header *evh;
evh = mtod(m, struct ether_vlan_header *);
m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
m->m_flags |= M_VLANTAG;
/* Strip the 802.1Q header. */
bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
}
static void
vtnet_set_rx_process_limit(struct vtnet_softc *sc)
{
int limit;
limit = vtnet_tunable_int(sc, "rx_process_limit",
vtnet_rx_process_limit);
if (limit < 0)
limit = INT_MAX;
sc->vtnet_rx_process_limit = limit;
}
static void
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
{
struct sysctl_oid *node;
struct sysctl_oid_list *list;
struct vtnet_rxq_stats *stats;
char namebuf[16];
snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue");
list = SYSCTL_CHILDREN(node);
stats = &rxq->vtnrx_stats;
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
&stats->vrxs_ipackets, "Receive packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
&stats->vrxs_ibytes, "Receive bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
&stats->vrxs_iqdrops, "Receive drops");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
&stats->vrxs_ierrors, "Receive errors");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
&stats->vrxs_csum, "Receive checksum offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
&stats->vrxs_csum_failed, "Receive checksum offload failed");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
&stats->vrxs_host_lro, "Receive host segmentation offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
&stats->vrxs_rescheduled,
"Receive interrupt handler rescheduled");
}
static void
vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_txq *txq)
{
struct sysctl_oid *node;
struct sysctl_oid_list *list;
struct vtnet_txq_stats *stats;
char namebuf[16];
snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue");
list = SYSCTL_CHILDREN(node);
stats = &txq->vtntx_stats;
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
&stats->vtxs_opackets, "Transmit packets");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
&stats->vtxs_obytes, "Transmit bytes");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
&stats->vtxs_omcasts, "Transmit multicasts");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
&stats->vtxs_csum, "Transmit checksum offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
&stats->vtxs_tso, "Transmit TCP segmentation offloaded");
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
&stats->vtxs_rescheduled,
"Transmit interrupt handler rescheduled");
}
static void
vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
int i;
dev = sc->vtnet_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
}
}
static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_softc *sc)
{
struct vtnet_statistics *stats;
struct vtnet_rxq_stats rxaccum;
struct vtnet_txq_stats txaccum;
vtnet_accum_stats(sc, &rxaccum, &txaccum);
stats = &sc->vtnet_stats;
stats->rx_csum_offloaded = rxaccum.vrxs_csum;
stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
stats->tx_csum_offloaded = txaccum.vtxs_csum;
stats->tx_tso_offloaded = txaccum.vtxs_tso;
stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
CTLFLAG_RD, &stats->mbuf_alloc_failed,
"Mbuf cluster allocation failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
CTLFLAG_RD, &stats->rx_frame_too_large,
"Received frame larger than the mbuf chain");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
CTLFLAG_RD, &stats->rx_enq_replacement_failed,
"Enqueuing the replacement receive mbuf failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
CTLFLAG_RD, &stats->rx_mergeable_failed,
"Mergeable buffers receive failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
"Received checksum offloaded buffer with unsupported "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
"Received checksum offloaded buffer with incorrect IP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
CTLFLAG_RD, &stats->rx_csum_bad_offset,
"Received checksum offloaded buffer with incorrect offset");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
CTLFLAG_RD, &stats->rx_csum_bad_proto,
"Received checksum offloaded buffer with incorrect protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
CTLFLAG_RD, &stats->rx_csum_failed,
"Received buffer checksum offload failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
CTLFLAG_RD, &stats->rx_csum_offloaded,
"Received buffer checksum offload succeeded");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
CTLFLAG_RD, &stats->rx_task_rescheduled,
"Times the receive interrupt task rescheduled itself");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
"Aborted transmit of checksum offloaded buffer with unknown "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
"Aborted transmit of checksum offloaded buffer because mismatched "
"protocols");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
CTLFLAG_RD, &stats->tx_tso_not_tcp,
"Aborted transmit of TSO buffer with non TCP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
CTLFLAG_RD, &stats->tx_tso_without_csum,
"Aborted transmit of TSO buffer without TCP checksum offload");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
CTLFLAG_RD, &stats->tx_defragged,
"Transmit mbufs defragged");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
CTLFLAG_RD, &stats->tx_defrag_failed,
"Aborted transmit of buffer because defrag failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
CTLFLAG_RD, &stats->tx_csum_offloaded,
"Offloaded checksum of transmitted buffer");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
CTLFLAG_RD, &stats->tx_tso_offloaded,
"Segmentation offload of transmitted buffer");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
CTLFLAG_RD, &stats->tx_task_rescheduled,
"Times the transmit interrupt task rescheduled itself");
}
static void
vtnet_setup_sysctl(struct vtnet_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vtnet_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
"Number of maximum supported virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
"Number of requested virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
"Number of active virtqueue pairs");
vtnet_setup_stat_sysctl(ctx, child, sc);
}
static void
vtnet_load_tunables(struct vtnet_softc *sc)
{
sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
"lro_entry_count", vtnet_lro_entry_count);
if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
"lro_mbufq_depth", vtnet_lro_mbufq_depth);
}
static int
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
{
return (virtqueue_enable_intr(rxq->vtnrx_vq));
}
static void
vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
{
virtqueue_disable_intr(rxq->vtnrx_vq);
}
static int
vtnet_txq_enable_intr(struct vtnet_txq *txq)
{
struct virtqueue *vq;
vq = txq->vtntx_vq;
if (vtnet_txq_below_threshold(txq) != 0)
return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
/*
* The free count is above our threshold. Keep the Tx interrupt
* disabled until the queue is fuller.
*/
return (0);
}
static void
vtnet_txq_disable_intr(struct vtnet_txq *txq)
{
virtqueue_disable_intr(txq->vtntx_vq);
}
static void
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
{
struct vtnet_rxq *rxq;
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
rxq = &sc->vtnet_rxqs[i];
if (vtnet_rxq_enable_intr(rxq) != 0)
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
}
}
static void
vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
}
static void
vtnet_enable_interrupts(struct vtnet_softc *sc)
{
vtnet_enable_rx_interrupts(sc);
vtnet_enable_tx_interrupts(sc);
}
static void
vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
}
static void
vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
{
int i;
for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
}
static void
vtnet_disable_interrupts(struct vtnet_softc *sc)
{
vtnet_disable_rx_interrupts(sc);
vtnet_disable_tx_interrupts(sc);
}
static int
vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
{
char path[64];
snprintf(path, sizeof(path),
"hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
TUNABLE_INT_FETCH(path, &def);
return (def);
}
#ifdef DEBUGNET
static void
vtnet_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
{
struct vtnet_softc *sc;
sc = if_getsoftc(ifp);
VTNET_CORE_LOCK(sc);
*nrxr = sc->vtnet_req_vq_pairs;
*ncl = DEBUGNET_MAX_IN_FLIGHT;
*clsize = sc->vtnet_rx_clustersz;
VTNET_CORE_UNLOCK(sc);
}
static void
vtnet_debugnet_event(if_t ifp __unused, enum debugnet_ev event)
{
struct vtnet_softc *sc;
static bool sw_lro_enabled = false;
/*
* Disable software LRO, since it would require entering the network
* epoch when calling vtnet_txq_eof() in vtnet_debugnet_poll().
*/
sc = if_getsoftc(ifp);
switch (event) {
case DEBUGNET_START:
sw_lro_enabled = (sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0;
if (sw_lro_enabled)
sc->vtnet_flags &= ~VTNET_FLAG_SW_LRO;
break;
case DEBUGNET_END:
if (sw_lro_enabled)
sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
break;
}
}
static int
vtnet_debugnet_transmit(if_t ifp, struct mbuf *m)
{
struct vtnet_softc *sc;
struct vtnet_txq *txq;
int error;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
txq = &sc->vtnet_txqs[0];
error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE);
if (error == 0)
(void)vtnet_txq_notify(txq);
return (error);
}
static int
vtnet_debugnet_poll(if_t ifp, int count)
{
struct vtnet_softc *sc;
int i;
sc = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/dev/vnic/nicvf_main.c b/sys/dev/vnic/nicvf_main.c
index f5cf99ef0ce4..dd44e420c78f 100644
--- a/sys/dev/vnic/nicvf_main.c
+++ b/sys/dev/vnic/nicvf_main.c
@@ -1,1616 +1,1605 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bitset.h>
#include <sys/bitstring.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/pciio.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stdatomic.h>
#include <sys/cpuset.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <netinet/tcp_lro.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <sys/dnv.h>
#include <sys/nv.h>
#include <sys/iov_schema.h>
#include <machine/bus.h>
#include "thunder_bgx.h"
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#define VNIC_VF_DEVSTR "Cavium Thunder NIC Virtual Function Driver"
#define VNIC_VF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM)
/* Lock for core interface settings */
#define NICVF_CORE_LOCK_INIT(nic) \
sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
#define NICVF_CORE_LOCK_DESTROY(nic) \
sx_destroy(&(nic)->core_sx)
#define NICVF_CORE_LOCK(nic) sx_xlock(&(nic)->core_sx)
#define NICVF_CORE_UNLOCK(nic) sx_xunlock(&(nic)->core_sx)
#define NICVF_CORE_LOCK_ASSERT(nic) sx_assert(&(nic)->core_sx, SA_XLOCKED)
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_10000 10000
#define SPEED_40000 40000
MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
static int nicvf_probe(device_t);
static int nicvf_attach(device_t);
static int nicvf_detach(device_t);
static device_method_t nicvf_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, nicvf_probe),
DEVMETHOD(device_attach, nicvf_attach),
DEVMETHOD(device_detach, nicvf_detach),
DEVMETHOD_END,
};
static driver_t nicvf_driver = {
"vnic",
nicvf_methods,
sizeof(struct nicvf),
};
DRIVER_MODULE(vnicvf, pci, nicvf_driver, 0, 0);
MODULE_VERSION(vnicvf, 1);
MODULE_DEPEND(vnicvf, pci, 1, 1, 1);
MODULE_DEPEND(vnicvf, ether, 1, 1, 1);
MODULE_DEPEND(vnicvf, vnicpf, 1, 1, 1);
static int nicvf_allocate_misc_interrupt(struct nicvf *);
static int nicvf_enable_misc_interrupt(struct nicvf *);
static int nicvf_allocate_net_interrupts(struct nicvf *);
static void nicvf_release_all_interrupts(struct nicvf *);
static int nicvf_update_hw_max_frs(struct nicvf *, int);
static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
static void nicvf_config_cpi(struct nicvf *);
static int nicvf_rss_init(struct nicvf *);
static int nicvf_init_resources(struct nicvf *);
-static int nicvf_setup_ifnet(struct nicvf *);
+static void nicvf_setup_ifnet(struct nicvf *);
static int nicvf_setup_ifmedia(struct nicvf *);
static void nicvf_hw_addr_random(uint8_t *);
static int nicvf_if_ioctl(if_t, u_long, caddr_t);
static void nicvf_if_init(void *);
static void nicvf_if_init_locked(struct nicvf *);
static int nicvf_if_transmit(if_t, struct mbuf *);
static void nicvf_if_qflush(if_t);
static uint64_t nicvf_if_getcounter(if_t, ift_counter);
static int nicvf_stop_locked(struct nicvf *);
static void nicvf_media_status(if_t, struct ifmediareq *);
static int nicvf_media_change(if_t);
static void nicvf_tick_stats(void *);
static int
nicvf_probe(device_t dev)
{
uint16_t vendor_id;
uint16_t device_id;
vendor_id = pci_get_vendor(dev);
device_id = pci_get_device(dev);
if (vendor_id != PCI_VENDOR_ID_CAVIUM)
return (ENXIO);
if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
device_set_desc(dev, VNIC_VF_DEVSTR);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
nicvf_attach(device_t dev)
{
struct nicvf *nic;
int rid, qcount;
int err = 0;
uint8_t hwaddr[ETHER_ADDR_LEN];
uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
nic = device_get_softc(dev);
nic->dev = dev;
nic->pnicvf = nic;
NICVF_CORE_LOCK_INIT(nic);
/* Enable HW TSO on Pass2 */
if (!pass1_silicon(dev))
nic->hw_tso = TRUE;
rid = VNIC_VF_REG_RID;
nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (nic->reg_base == NULL) {
device_printf(dev, "Could not allocate registers memory\n");
return (ENXIO);
}
qcount = MAX_CMP_QUEUES_PER_QS;
nic->max_queues = qcount;
err = nicvf_set_qset_resources(nic);
if (err != 0)
goto err_free_res;
/* Check if PF is alive and get MAC address for this VF */
err = nicvf_allocate_misc_interrupt(nic);
if (err != 0)
goto err_free_res;
NICVF_CORE_LOCK(nic);
err = nicvf_enable_misc_interrupt(nic);
NICVF_CORE_UNLOCK(nic);
if (err != 0)
goto err_release_intr;
err = nicvf_allocate_net_interrupts(nic);
if (err != 0) {
device_printf(dev,
"Could not allocate network interface interrupts\n");
goto err_free_ifnet;
}
/* If no MAC address was obtained we generate random one */
if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
nicvf_hw_addr_random(hwaddr);
memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
NICVF_CORE_LOCK(nic);
nicvf_hw_set_mac_addr(nic, hwaddr);
NICVF_CORE_UNLOCK(nic);
}
/* Configure CPI alorithm */
nic->cpi_alg = CPI_ALG_NONE;
NICVF_CORE_LOCK(nic);
nicvf_config_cpi(nic);
/* Configure receive side scaling */
if (nic->qs->rq_cnt > 1)
nicvf_rss_init(nic);
NICVF_CORE_UNLOCK(nic);
- err = nicvf_setup_ifnet(nic);
- if (err != 0) {
- device_printf(dev, "Could not set-up ifnet\n");
- goto err_release_intr;
- }
+ nicvf_setup_ifnet(nic);
err = nicvf_setup_ifmedia(nic);
if (err != 0) {
device_printf(dev, "Could not set-up ifmedia\n");
goto err_free_ifnet;
}
mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
ether_ifattach(nic->ifp, nic->hwaddr);
return (0);
err_free_ifnet:
if_free(nic->ifp);
err_release_intr:
nicvf_release_all_interrupts(nic);
err_free_res:
bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
nic->reg_base);
return (err);
}
static int
nicvf_detach(device_t dev)
{
struct nicvf *nic;
nic = device_get_softc(dev);
NICVF_CORE_LOCK(nic);
/* Shut down the port and release ring resources */
nicvf_stop_locked(nic);
/* Release stats lock */
mtx_destroy(&nic->stats_mtx);
/* Release interrupts */
nicvf_release_all_interrupts(nic);
/* Release memory resource */
if (nic->reg_base != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(nic->reg_base), nic->reg_base);
}
/* Remove all ifmedia configurations */
ifmedia_removeall(&nic->if_media);
/* Free this ifnet */
if_free(nic->ifp);
NICVF_CORE_UNLOCK(nic);
/* Finally destroy the lock */
NICVF_CORE_LOCK_DESTROY(nic);
return (0);
}
static void
nicvf_hw_addr_random(uint8_t *hwaddr)
{
uint32_t rnd;
uint8_t addr[ETHER_ADDR_LEN];
/*
* Create randomized MAC address.
* Set 'bsd' + random 24 low-order bits.
*/
rnd = arc4random() & 0x00ffffff;
addr[0] = 'b';
addr[1] = 's';
addr[2] = 'd';
addr[3] = rnd >> 16;
addr[4] = rnd >> 8;
addr[5] = rnd >> 0;
memcpy(hwaddr, addr, ETHER_ADDR_LEN);
}
-static int
+static void
nicvf_setup_ifnet(struct nicvf *nic)
{
if_t ifp;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(nic->dev, "Could not allocate ifnet structure\n");
- return (ENOMEM);
- }
-
nic->ifp = ifp;
if_setsoftc(ifp, nic);
if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_settransmitfn(ifp, nicvf_if_transmit);
if_setqflushfn(ifp, nicvf_if_qflush);
if_setioctlfn(ifp, nicvf_if_ioctl);
if_setinitfn(ifp, nicvf_if_init);
if_setgetcounterfn(ifp, nicvf_if_getcounter);
if_setmtu(ifp, ETHERMTU);
/* Reset caps */
if_setcapabilities(ifp, 0);
/* Set the default values */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
if (nic->hw_tso) {
/* TSO */
if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
/* TSO parameters */
if_sethwtsomax(ifp, NICVF_TSO_MAXSIZE);
if_sethwtsomaxsegcount(ifp, NICVF_TSO_NSEGS);
if_sethwtsomaxsegsize(ifp, MCLBYTES);
}
/* IP/TCP/UDP HW checksums */
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
/*
* HW offload enable
*/
if_clearhwassist(ifp);
if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP), 0);
if (nic->hw_tso)
if_sethwassistbits(ifp, (CSUM_TSO), 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
-
- return (0);
}
static int
nicvf_setup_ifmedia(struct nicvf *nic)
{
ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
nicvf_media_status);
/*
* Advertise availability of all possible connection types,
* even though not all are possible at the same time.
*/
ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
0, NULL);
ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
0, NULL);
ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
0, NULL);
ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
0, NULL);
ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
0, NULL);
ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
0, NULL);
ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
return (0);
}
static int
nicvf_if_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct nicvf *nic;
struct rcv_queue *rq;
struct ifreq *ifr;
uint32_t flags;
int mask, err;
int rq_idx;
#if defined(INET) || defined(INET6)
struct ifaddr *ifa;
boolean_t avoid_reset = FALSE;
#endif
nic = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
#if defined(INET) || defined(INET6)
ifa = (struct ifaddr *)data;
#endif
err = 0;
switch (cmd) {
case SIOCSIFADDR:
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
avoid_reset = TRUE;
#endif
#ifdef INET6
if (ifa->ifa_addr->sa_family == AF_INET6)
avoid_reset = TRUE;
#endif
#if defined(INET) || defined(INET6)
/* Avoid reinitialization unless it's necessary */
if (avoid_reset) {
if_setflagbits(ifp, IFF_UP, 0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
nicvf_if_init(nic);
#ifdef INET
if (!(if_getflags(ifp) & IFF_NOARP))
arp_ifinit(ifp, ifa);
#endif
return (0);
}
#endif
err = ether_ioctl(ifp, cmd, data);
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu < NIC_HW_MIN_FRS ||
ifr->ifr_mtu > NIC_HW_MAX_FRS) {
err = EINVAL;
} else {
NICVF_CORE_LOCK(nic);
err = nicvf_update_hw_max_frs(nic, ifr->ifr_mtu);
if (err == 0)
if_setmtu(ifp, ifr->ifr_mtu);
NICVF_CORE_UNLOCK(nic);
}
break;
case SIOCSIFFLAGS:
NICVF_CORE_LOCK(nic);
flags = if_getflags(ifp);
if (flags & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((flags ^ nic->if_flags) & IFF_PROMISC) {
/* Change promiscous mode */
#if 0 /* XXX */
nicvf_set_promiscous(nic);
#endif
}
if ((flags ^ nic->if_flags) & IFF_ALLMULTI) {
/* Change multicasting settings */
#if 0 /* XXX */
nicvf_set_multicast(nic);
#endif
}
} else {
nicvf_if_init_locked(nic);
}
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
nicvf_stop_locked(nic);
nic->if_flags = flags;
NICVF_CORE_UNLOCK(nic);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
#if 0
NICVF_CORE_LOCK(nic);
/* ARM64TODO */
nicvf_set_multicast(nic);
NICVF_CORE_UNLOCK(nic);
#endif
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
break;
case SIOCSIFCAP:
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if (mask & IFCAP_VLAN_MTU) {
/* No work to do except acknowledge the change took. */
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
}
if (mask & IFCAP_TXCSUM)
if_togglecapenable(ifp, IFCAP_TXCSUM);
if (mask & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_TSO4) && nic->hw_tso)
if_togglecapenable(ifp, IFCAP_TSO4);
if (mask & IFCAP_LRO) {
/*
* Lock the driver for a moment to avoid
* mismatch in per-queue settings.
*/
NICVF_CORE_LOCK(nic);
if_togglecapenable(ifp, IFCAP_LRO);
if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) {
/*
* Now disable LRO for subsequent packets.
* Atomicity of this change is not necessary
* as we don't need precise toggle of this
* feature for all threads processing the
* completion queue.
*/
for (rq_idx = 0;
rq_idx < nic->qs->rq_cnt; rq_idx++) {
rq = &nic->qs->rq[rq_idx];
rq->lro_enabled = !rq->lro_enabled;
}
}
NICVF_CORE_UNLOCK(nic);
}
break;
default:
err = ether_ioctl(ifp, cmd, data);
break;
}
return (err);
}
static void
nicvf_if_init_locked(struct nicvf *nic)
{
struct queue_set *qs = nic->qs;
if_t ifp;
int qidx;
int err;
caddr_t if_addr;
NICVF_CORE_LOCK_ASSERT(nic);
ifp = nic->ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
nicvf_stop_locked(nic);
err = nicvf_enable_misc_interrupt(nic);
if (err != 0) {
if_printf(ifp, "Could not reenable Mbox interrupt\n");
return;
}
/* Get the latest MAC address */
if_addr = if_getlladdr(ifp);
/* Update MAC address if changed */
if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
nicvf_hw_set_mac_addr(nic, if_addr);
}
/* Initialize the queues */
err = nicvf_init_resources(nic);
if (err != 0)
goto error;
/* Make sure queue initialization is written */
wmb();
nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
/* Enable Qset err interrupt */
nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
/* Enable completion queue interrupt */
for (qidx = 0; qidx < qs->cq_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
/* Enable RBDR threshold interrupt */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
nic->drv_stats.txq_stop = 0;
nic->drv_stats.txq_wake = 0;
/* Activate network interface */
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
/* Schedule callout to update stats */
callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
return;
error:
/* Something went very wrong. Disable this ifnet for good */
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
}
static void
nicvf_if_init(void *if_softc)
{
struct nicvf *nic = if_softc;
NICVF_CORE_LOCK(nic);
nicvf_if_init_locked(nic);
NICVF_CORE_UNLOCK(nic);
}
static int
nicvf_if_transmit(if_t ifp, struct mbuf *mbuf)
{
struct nicvf *nic = if_getsoftc(ifp);
struct queue_set *qs = nic->qs;
struct snd_queue *sq;
struct mbuf *mtmp;
int qidx;
int err = 0;
if (__predict_false(qs == NULL)) {
panic("%s: missing queue set for %s", __func__,
device_get_nameunit(nic->dev));
}
/* Select queue */
if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
else
qidx = curcpu % qs->sq_cnt;
sq = &qs->sq[qidx];
if (mbuf->m_next != NULL &&
(mbuf->m_pkthdr.csum_flags &
(CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) {
if (M_WRITABLE(mbuf) == 0) {
mtmp = m_dup(mbuf, M_NOWAIT);
m_freem(mbuf);
if (mtmp == NULL)
return (ENOBUFS);
mbuf = mtmp;
}
}
err = drbr_enqueue(ifp, sq->br, mbuf);
if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) {
/*
* Try to enqueue packet to the ring buffer.
* If the driver is not active, link down or enqueue operation
* failed, return with the appropriate error code.
*/
return (err);
}
if (NICVF_TX_TRYLOCK(sq) != 0) {
err = nicvf_xmit_locked(sq);
NICVF_TX_UNLOCK(sq);
return (err);
} else
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
return (0);
}
static void
nicvf_if_qflush(if_t ifp)
{
struct nicvf *nic;
struct queue_set *qs;
struct snd_queue *sq;
struct mbuf *mbuf;
size_t idx;
nic = if_getsoftc(ifp);
qs = nic->qs;
for (idx = 0; idx < qs->sq_cnt; idx++) {
sq = &qs->sq[idx];
NICVF_TX_LOCK(sq);
while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
m_freem(mbuf);
NICVF_TX_UNLOCK(sq);
}
if_qflush(ifp);
}
static uint64_t
nicvf_if_getcounter(if_t ifp, ift_counter cnt)
{
struct nicvf *nic;
struct nicvf_hw_stats *hw_stats;
struct nicvf_drv_stats *drv_stats;
nic = if_getsoftc(ifp);
hw_stats = &nic->hw_stats;
drv_stats = &nic->drv_stats;
switch (cnt) {
case IFCOUNTER_IPACKETS:
return (drv_stats->rx_frames_ok);
case IFCOUNTER_OPACKETS:
return (drv_stats->tx_frames_ok);
case IFCOUNTER_IBYTES:
return (hw_stats->rx_bytes);
case IFCOUNTER_OBYTES:
return (hw_stats->tx_bytes_ok);
case IFCOUNTER_IMCASTS:
return (hw_stats->rx_mcast_frames);
case IFCOUNTER_COLLISIONS:
return (0);
case IFCOUNTER_IQDROPS:
return (drv_stats->rx_drops);
case IFCOUNTER_OQDROPS:
return (drv_stats->tx_drops);
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
nicvf_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct nicvf *nic = if_getsoftc(ifp);
NICVF_CORE_LOCK(nic);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (nic->link_up) {
/* Device attached to working network */
ifmr->ifm_status |= IFM_ACTIVE;
}
switch (nic->speed) {
case SPEED_10:
ifmr->ifm_active |= IFM_10_T;
break;
case SPEED_100:
ifmr->ifm_active |= IFM_100_TX;
break;
case SPEED_1000:
ifmr->ifm_active |= IFM_1000_T;
break;
case SPEED_10000:
ifmr->ifm_active |= IFM_10G_SR;
break;
case SPEED_40000:
ifmr->ifm_active |= IFM_40G_CR4;
break;
default:
ifmr->ifm_active |= IFM_AUTO;
break;
}
if (nic->duplex)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
NICVF_CORE_UNLOCK(nic);
}
static int
nicvf_media_change(if_t ifp __unused)
{
return (0);
}
/* Register read/write APIs */
void
nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
{
bus_write_8(nic->reg_base, offset, val);
}
uint64_t
nicvf_reg_read(struct nicvf *nic, uint64_t offset)
{
return (bus_read_8(nic->reg_base, offset));
}
void
nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
uint64_t qidx, uint64_t val)
{
bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
}
uint64_t
nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
uint64_t qidx)
{
return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
}
/* VF -> PF mailbox communication */
static void
nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
{
uint64_t *msg = (uint64_t *)mbx;
nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
}
int
nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
int sleep = 2;
NICVF_CORE_LOCK_ASSERT(nic);
nic->pf_acked = FALSE;
nic->pf_nacked = FALSE;
nicvf_write_to_mbx(nic, mbx);
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
if (nic->pf_nacked)
return (EINVAL);
DELAY(sleep * 1000);
if (nic->pf_acked)
break;
timeout -= sleep;
if (!timeout) {
device_printf(nic->dev,
"PF didn't ack to mbox msg %d from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
return (EBUSY);
}
}
return (0);
}
/*
* Checks if VF is able to comminicate with PF
* and also gets the VNIC number this VF is associated to.
*/
static int
nicvf_check_pf_ready(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_READY;
if (nicvf_send_msg_to_pf(nic, &mbx)) {
device_printf(nic->dev,
"PF didn't respond to READY msg\n");
return 0;
}
return 1;
}
static void
nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
{
if (bgx->rx)
nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
else
nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
}
static void
nicvf_handle_mbx_intr(struct nicvf *nic)
{
union nic_mbx mbx = {};
uint64_t *mbx_data;
uint64_t mbx_addr;
int i;
mbx_addr = NIC_VF_PF_MAILBOX_0_1;
mbx_data = (uint64_t *)&mbx;
for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
*mbx_data = nicvf_reg_read(nic, mbx_addr);
mbx_data++;
mbx_addr += sizeof(uint64_t);
}
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic->pf_acked = TRUE;
nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = FALSE;
nic->duplex = 0;
nic->speed = 0;
break;
case NIC_MBOX_MSG_ACK:
nic->pf_acked = TRUE;
break;
case NIC_MBOX_MSG_NACK:
nic->pf_nacked = TRUE;
break;
case NIC_MBOX_MSG_RSS_SIZE:
nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
nic->pf_acked = TRUE;
break;
case NIC_MBOX_MSG_BGX_STATS:
nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
nic->pf_acked = TRUE;
break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = TRUE;
nic->link_up = mbx.link_status.link_up;
nic->duplex = mbx.link_status.duplex;
nic->speed = mbx.link_status.speed;
if (nic->link_up) {
if_setbaudrate(nic->ifp, nic->speed * 1000000);
if_link_state_change(nic->ifp, LINK_STATE_UP);
} else {
if_setbaudrate(nic->ifp, 0);
if_link_state_change(nic->ifp, LINK_STATE_DOWN);
}
break;
default:
device_printf(nic->dev,
"Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
break;
}
nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
}
static int
nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
mbx.frs.max_frs = mtu;
mbx.frs.vf_id = nic->vf_id;
return nicvf_send_msg_to_pf(nic, &mbx);
}
static int
nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
{
union nic_mbx mbx = {};
mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
mbx.mac.vf_id = nic->vf_id;
memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
return (nicvf_send_msg_to_pf(nic, &mbx));
}
static void
nicvf_config_cpi(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
mbx.cpi_cfg.vf_id = nic->vf_id;
mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
nicvf_send_msg_to_pf(nic, &mbx);
}
static void
nicvf_get_rss_size(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
mbx.rss_size.vf_id = nic->vf_id;
nicvf_send_msg_to_pf(nic, &mbx);
}
static void
nicvf_config_rss(struct nicvf *nic)
{
union nic_mbx mbx = {};
struct nicvf_rss_info *rss;
int ind_tbl_len;
int i, nextq;
rss = &nic->rss_info;
ind_tbl_len = rss->rss_size;
nextq = 0;
mbx.rss_cfg.vf_id = nic->vf_id;
mbx.rss_cfg.hash_bits = rss->hash_bits;
while (ind_tbl_len != 0) {
mbx.rss_cfg.tbl_offset = nextq;
mbx.rss_cfg.tbl_len = MIN(ind_tbl_len,
RSS_IND_TBL_LEN_PER_MBX_MSG);
mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
nicvf_send_msg_to_pf(nic, &mbx);
ind_tbl_len -= mbx.rss_cfg.tbl_len;
}
}
static void
nicvf_set_rss_key(struct nicvf *nic)
{
struct nicvf_rss_info *rss;
uint64_t key_addr;
int idx;
rss = &nic->rss_info;
key_addr = NIC_VNIC_RSS_KEY_0_4;
for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
nicvf_reg_write(nic, key_addr, rss->key[idx]);
key_addr += sizeof(uint64_t);
}
}
static int
nicvf_rss_init(struct nicvf *nic)
{
struct nicvf_rss_info *rss;
int idx;
nicvf_get_rss_size(nic);
rss = &nic->rss_info;
if (nic->cpi_alg != CPI_ALG_NONE) {
rss->enable = FALSE;
rss->hash_bits = 0;
return (ENXIO);
}
rss->enable = TRUE;
/* Using the HW reset value for now */
rss->key[0] = 0xFEED0BADFEED0BADUL;
rss->key[1] = 0xFEED0BADFEED0BADUL;
rss->key[2] = 0xFEED0BADFEED0BADUL;
rss->key[3] = 0xFEED0BADFEED0BADUL;
rss->key[4] = 0xFEED0BADFEED0BADUL;
nicvf_set_rss_key(nic);
rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
rss->hash_bits = fls(rss->rss_size) - 1;
for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = idx % nic->rx_queues;
nicvf_config_rss(nic);
return (0);
}
static int
nicvf_init_resources(struct nicvf *nic)
{
int err;
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
/* Enable Qset */
nicvf_qset_config(nic, TRUE);
/* Initialize queues and HW for data transfer */
err = nicvf_config_data_transfer(nic, TRUE);
if (err) {
device_printf(nic->dev,
"Failed to alloc/config VF's QSet resources\n");
return (err);
}
/* Send VF config done msg to PF */
nicvf_write_to_mbx(nic, &mbx);
return (0);
}
static void
nicvf_misc_intr_handler(void *arg)
{
struct nicvf *nic = (struct nicvf *)arg;
uint64_t intr;
intr = nicvf_reg_read(nic, NIC_VF_INT);
/* Check for spurious interrupt */
if (!(intr & NICVF_INTR_MBOX_MASK))
return;
nicvf_handle_mbx_intr(nic);
}
static int
nicvf_intr_handler(void *arg)
{
struct nicvf *nic;
struct cmp_queue *cq;
int qidx;
cq = (struct cmp_queue *)arg;
nic = cq->nic;
qidx = cq->idx;
/* Disable interrupts */
nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
/* Clear interrupt */
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
return (FILTER_HANDLED);
}
static void
nicvf_rbdr_intr_handler(void *arg)
{
struct nicvf *nic;
struct queue_set *qs;
struct rbdr *rbdr;
int qidx;
nic = (struct nicvf *)arg;
/* Disable RBDR interrupt and schedule softirq */
for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
continue;
nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
qs = nic->qs;
rbdr = &qs->rbdr[qidx];
taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
/* Clear interrupt */
nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
}
}
static void
nicvf_qs_err_intr_handler(void *arg)
{
struct nicvf *nic = (struct nicvf *)arg;
struct queue_set *qs = nic->qs;
/* Disable Qset err interrupt and schedule softirq */
nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
}
static int
nicvf_enable_msix(struct nicvf *nic)
{
struct pci_devinfo *dinfo;
int rid, count;
int ret;
dinfo = device_get_ivars(nic->dev);
rid = dinfo->cfg.msix.msix_table_bar;
nic->msix_table_res =
bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (nic->msix_table_res == NULL) {
device_printf(nic->dev,
"Could not allocate memory for MSI-X table\n");
return (ENXIO);
}
count = nic->num_vec = NIC_VF_MSIX_VECTORS;
ret = pci_alloc_msix(nic->dev, &count);
if ((ret != 0) || (count != nic->num_vec)) {
device_printf(nic->dev,
"Request for #%d msix vectors failed, error: %d\n",
nic->num_vec, ret);
return (ret);
}
nic->msix_enabled = 1;
return (0);
}
static void
nicvf_disable_msix(struct nicvf *nic)
{
if (nic->msix_enabled) {
pci_release_msi(nic->dev);
nic->msix_enabled = 0;
nic->num_vec = 0;
}
}
static void
nicvf_release_all_interrupts(struct nicvf *nic)
{
struct resource *res;
int irq;
int err __diagused;
/* Free registered interrupts */
for (irq = 0; irq < nic->num_vec; irq++) {
res = nic->msix_entries[irq].irq_res;
if (res == NULL)
continue;
/* Teardown interrupt first */
if (nic->msix_entries[irq].handle != NULL) {
err = bus_teardown_intr(nic->dev,
nic->msix_entries[irq].irq_res,
nic->msix_entries[irq].handle);
KASSERT(err == 0,
("ERROR: Unable to teardown interrupt %d", irq));
nic->msix_entries[irq].handle = NULL;
}
bus_release_resource(nic->dev, SYS_RES_IRQ,
rman_get_rid(res), nic->msix_entries[irq].irq_res);
nic->msix_entries[irq].irq_res = NULL;
}
/* Disable MSI-X */
nicvf_disable_msix(nic);
}
/*
* Initialize MSIX vectors and register MISC interrupt.
* Send READY message to PF to check if its alive
*/
static int
nicvf_allocate_misc_interrupt(struct nicvf *nic)
{
struct resource *res;
int irq, rid;
int ret = 0;
/* Return if mailbox interrupt is already registered */
if (nic->msix_enabled)
return (0);
/* Enable MSI-X */
if (nicvf_enable_msix(nic) != 0)
return (ENXIO);
irq = NICVF_INTR_ID_MISC;
rid = irq + 1;
nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
if (nic->msix_entries[irq].irq_res == NULL) {
device_printf(nic->dev,
"Could not allocate Mbox interrupt for VF%d\n",
device_get_unit(nic->dev));
return (ENXIO);
}
ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
(INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
&nic->msix_entries[irq].handle);
if (ret != 0) {
res = nic->msix_entries[irq].irq_res;
bus_release_resource(nic->dev, SYS_RES_IRQ,
rman_get_rid(res), res);
nic->msix_entries[irq].irq_res = NULL;
return (ret);
}
return (0);
}
static int
nicvf_enable_misc_interrupt(struct nicvf *nic)
{
/* Enable mailbox interrupt */
nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
/* Check if VF is able to communicate with PF */
if (!nicvf_check_pf_ready(nic)) {
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
return (ENXIO);
}
return (0);
}
static void
nicvf_release_net_interrupts(struct nicvf *nic)
{
struct resource *res;
int irq;
int err;
for_each_cq_irq(irq) {
res = nic->msix_entries[irq].irq_res;
if (res == NULL)
continue;
/* Teardown active interrupts first */
if (nic->msix_entries[irq].handle != NULL) {
err = bus_teardown_intr(nic->dev,
nic->msix_entries[irq].irq_res,
nic->msix_entries[irq].handle);
KASSERT(err == 0,
("ERROR: Unable to teardown CQ interrupt %d",
(irq - NICVF_INTR_ID_CQ)));
if (err != 0)
continue;
}
/* Release resource */
bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
res);
nic->msix_entries[irq].irq_res = NULL;
}
for_each_rbdr_irq(irq) {
res = nic->msix_entries[irq].irq_res;
if (res == NULL)
continue;
/* Teardown active interrupts first */
if (nic->msix_entries[irq].handle != NULL) {
err = bus_teardown_intr(nic->dev,
nic->msix_entries[irq].irq_res,
nic->msix_entries[irq].handle);
KASSERT(err == 0,
("ERROR: Unable to teardown RDBR interrupt %d",
(irq - NICVF_INTR_ID_RBDR)));
if (err != 0)
continue;
}
/* Release resource */
bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
res);
nic->msix_entries[irq].irq_res = NULL;
}
irq = NICVF_INTR_ID_QS_ERR;
res = nic->msix_entries[irq].irq_res;
if (res != NULL) {
/* Teardown active interrupts first */
if (nic->msix_entries[irq].handle != NULL) {
err = bus_teardown_intr(nic->dev,
nic->msix_entries[irq].irq_res,
nic->msix_entries[irq].handle);
KASSERT(err == 0,
("ERROR: Unable to teardown QS Error interrupt %d",
irq));
if (err != 0)
return;
}
/* Release resource */
bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
res);
nic->msix_entries[irq].irq_res = NULL;
}
}
static int
nicvf_allocate_net_interrupts(struct nicvf *nic)
{
u_int cpuid;
int irq, rid;
int qidx;
int ret = 0;
/* MSI-X must be configured by now */
if (!nic->msix_enabled) {
device_printf(nic->dev, "Cannot alloacte queue interrups. "
"MSI-X interrupts disabled.\n");
return (ENXIO);
}
/* Register CQ interrupts */
for_each_cq_irq(irq) {
if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
break;
qidx = irq - NICVF_INTR_ID_CQ;
rid = irq + 1;
nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
if (nic->msix_entries[irq].irq_res == NULL) {
device_printf(nic->dev,
"Could not allocate CQ interrupt %d for VF%d\n",
(irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
ret = ENXIO;
goto error;
}
ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
(INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
if (ret != 0) {
device_printf(nic->dev,
"Could not setup CQ interrupt %d for VF%d\n",
(irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
goto error;
}
cpuid = (device_get_unit(nic->dev) * CMP_QUEUE_CNT) + qidx;
cpuid %= mp_ncpus;
/*
* Save CPU ID for later use when system-wide RSS is enabled.
* It will be used to pit the CQ task to the same CPU that got
* interrupted.
*/
nic->qs->cq[qidx].cmp_cpuid = cpuid;
if (bootverbose) {
device_printf(nic->dev, "bind CQ%d IRQ to CPU%d\n",
qidx, cpuid);
}
/* Bind interrupts to the given CPU */
bus_bind_intr(nic->dev, nic->msix_entries[irq].irq_res, cpuid);
}
/* Register RBDR interrupt */
for_each_rbdr_irq(irq) {
if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
break;
rid = irq + 1;
nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
if (nic->msix_entries[irq].irq_res == NULL) {
device_printf(nic->dev,
"Could not allocate RBDR interrupt %d for VF%d\n",
(irq - NICVF_INTR_ID_RBDR),
device_get_unit(nic->dev));
ret = ENXIO;
goto error;
}
ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
(INTR_MPSAFE | INTR_TYPE_NET), NULL,
nicvf_rbdr_intr_handler, nic,
&nic->msix_entries[irq].handle);
if (ret != 0) {
device_printf(nic->dev,
"Could not setup RBDR interrupt %d for VF%d\n",
(irq - NICVF_INTR_ID_RBDR),
device_get_unit(nic->dev));
goto error;
}
}
/* Register QS error interrupt */
irq = NICVF_INTR_ID_QS_ERR;
rid = irq + 1;
nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
if (nic->msix_entries[irq].irq_res == NULL) {
device_printf(nic->dev,
"Could not allocate QS Error interrupt for VF%d\n",
device_get_unit(nic->dev));
ret = ENXIO;
goto error;
}
ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
(INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
nic, &nic->msix_entries[irq].handle);
if (ret != 0) {
device_printf(nic->dev,
"Could not setup QS Error interrupt for VF%d\n",
device_get_unit(nic->dev));
goto error;
}
return (0);
error:
nicvf_release_net_interrupts(nic);
return (ret);
}
static int
nicvf_stop_locked(struct nicvf *nic)
{
if_t ifp;
int qidx;
struct queue_set *qs = nic->qs;
union nic_mbx mbx = {};
NICVF_CORE_LOCK_ASSERT(nic);
/* Stop callout. Can block here since holding SX lock */
callout_drain(&nic->stats_callout);
ifp = nic->ifp;
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
/* Disable RBDR & QS error interrupts */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
}
nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
/* Deactivate network interface */
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
/* Free resources */
nicvf_config_data_transfer(nic, FALSE);
/* Disable HW Qset */
nicvf_qset_config(nic, FALSE);
/* disable mailbox interrupt */
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
return (0);
}
static void
nicvf_update_stats(struct nicvf *nic)
{
int qidx;
struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
struct queue_set *qs = nic->qs;
#define GET_RX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
#define GET_TX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
stats->rx_bytes = GET_RX_STATS(RX_OCTS);
stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
stats->rx_drop_red = GET_RX_STATS(RX_RED);
stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP);
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
drv_stats->tx_drops = stats->tx_drops;
/* Update RQ and SQ stats */
for (qidx = 0; qidx < qs->rq_cnt; qidx++)
nicvf_update_rq_stats(nic, qidx);
for (qidx = 0; qidx < qs->sq_cnt; qidx++)
nicvf_update_sq_stats(nic, qidx);
}
static void
nicvf_tick_stats(void *arg)
{
struct nicvf *nic;
nic = (struct nicvf *)arg;
/* Read the statistics */
nicvf_update_stats(nic);
callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
}
diff --git a/sys/dev/vnic/thunder_mdio.c b/sys/dev/vnic/thunder_mdio.c
index 7017d208bc80..4545fe6658f5 100644
--- a/sys/dev/vnic/thunder_mdio.c
+++ b/sys/dev/vnic/thunder_mdio.c
@@ -1,509 +1,505 @@
/*-
* Copyright (c) 2015 The FreeBSD Foundation
*
* This software was developed by Semihalf under
* the sponsorship of the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/resource.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/queue.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include "thunder_mdio_var.h"
#include "lmac_if.h"
#include "miibus_if.h"
#define REG_BASE_RID 0
#define SMI_CMD 0x00
#define SMI_CMD_PHY_REG_ADR_SHIFT (0)
#define SMI_CMD_PHY_REG_ADR_MASK (0x1FUL << SMI_CMD_PHY_REG_ADR_SHIFT)
#define SMI_CMD_PHY_ADR_SHIFT (8)
#define SMI_CMD_PHY_ADR_MASK (0x1FUL << SMI_CMD_PHY_ADR_SHIFT)
#define SMI_CMD_PHY_OP_MASK (0x3UL << 16)
#define SMI_CMD_PHY_OP_C22_READ (0x1UL << 16)
#define SMI_CMD_PHY_OP_C22_WRITE (0x0UL << 16)
#define SMI_CMD_PHY_OP_C45_READ (0x3UL << 16)
#define SMI_CMD_PHY_OP_C45_WRITE (0x1UL << 16)
#define SMI_CMD_PHY_OP_C45_ADDR (0x0UL << 16)
#define SMI_WR_DAT 0x08
#define SMI_WR_DAT_PENDING (1UL << 17)
#define SMI_WR_DAT_VAL (1UL << 16)
#define SMI_WR_DAT_DAT_MASK (0xFFFFUL << 0)
#define SMI_RD_DAT 0x10
#define SMI_RD_DAT_PENDING (1UL << 17)
#define SMI_RD_DAT_VAL (1UL << 16)
#define SMI_RD_DAT_DAT_MASK (0xFFFFUL << 0)
#define SMI_CLK 0x18
#define SMI_CLK_PREAMBLE (1UL << 12)
#define SMI_CLK_MODE (1UL << 24)
#define SMI_EN 0x20
#define SMI_EN_EN (1UL << 0) /* Enable interface */
#define SMI_DRV_CTL 0x28
static int thunder_mdio_detach(device_t);
static int thunder_mdio_read(device_t, int, int);
static int thunder_mdio_write(device_t, int, int, int);
static int thunder_ifmedia_change_stub(if_t);
static void thunder_ifmedia_status_stub(if_t, struct ifmediareq *);
static int thunder_mdio_media_status(device_t, int, int *, int *, int *);
static int thunder_mdio_media_change(device_t, int, int, int, int);
static int thunder_mdio_phy_connect(device_t, int, int);
static int thunder_mdio_phy_disconnect(device_t, int, int);
static device_method_t thunder_mdio_methods[] = {
/* Device interface */
DEVMETHOD(device_detach, thunder_mdio_detach),
/* LMAC interface */
DEVMETHOD(lmac_media_status, thunder_mdio_media_status),
DEVMETHOD(lmac_media_change, thunder_mdio_media_change),
DEVMETHOD(lmac_phy_connect, thunder_mdio_phy_connect),
DEVMETHOD(lmac_phy_disconnect, thunder_mdio_phy_disconnect),
/* MII interface */
DEVMETHOD(miibus_readreg, thunder_mdio_read),
DEVMETHOD(miibus_writereg, thunder_mdio_write),
/* End */
DEVMETHOD_END
};
DEFINE_CLASS_0(thunder_mdio, thunder_mdio_driver, thunder_mdio_methods,
sizeof(struct thunder_mdio_softc));
DRIVER_MODULE(miibus, thunder_mdio, miibus_driver, 0, 0);
MODULE_VERSION(thunder_mdio, 1);
MODULE_DEPEND(thunder_mdio, ether, 1, 1, 1);
MODULE_DEPEND(thunder_mdio, miibus, 1, 1, 1);
MODULE_DEPEND(thunder_mdio, mrmlbus, 1, 1, 1);
MALLOC_DEFINE(M_THUNDER_MDIO, "ThunderX MDIO",
"Cavium ThunderX MDIO dynamic memory");
#define MDIO_LOCK_INIT(sc, name) \
mtx_init(&(sc)->mtx, name, NULL, MTX_DEF)
#define MDIO_LOCK_DESTROY(sc) \
mtx_destroy(&(sc)->mtx)
#define MDIO_LOCK(sc) mtx_lock(&(sc)->mtx)
#define MDIO_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
#define MDIO_LOCK_ASSERT(sc) \
mtx_assert(&(sc)->mtx, MA_OWNED)
#define mdio_reg_read(sc, reg) \
bus_read_8((sc)->reg_base, (reg))
#define mdio_reg_write(sc, reg, val) \
bus_write_8((sc)->reg_base, (reg), (val))
int
thunder_mdio_attach(device_t dev)
{
struct thunder_mdio_softc *sc;
int rid;
sc = device_get_softc(dev);
sc->dev = dev;
/* Allocate memory resources */
rid = REG_BASE_RID;
sc->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->reg_base == NULL) {
device_printf(dev, "Could not allocate memory\n");
return (ENXIO);
}
TAILQ_INIT(&sc->phy_desc_head);
MDIO_LOCK_INIT(sc, "ThunderX MDIO lock");
/* Enable SMI/MDIO interface */
mdio_reg_write(sc, SMI_EN, SMI_EN_EN);
return (0);
}
static int
thunder_mdio_detach(device_t dev)
{
struct thunder_mdio_softc *sc;
sc = device_get_softc(dev);
if (sc->reg_base != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, REG_BASE_RID,
sc->reg_base);
}
return (0);
}
static __inline void
thunder_mdio_set_mode(struct thunder_mdio_softc *sc,
enum thunder_mdio_mode mode)
{
uint64_t smi_clk;
if (sc->mode == mode)
return;
/* Set mode, IEEE CLAUSE 22 or IEEE CAUSE 45 */
smi_clk = mdio_reg_read(sc, SMI_CLK);
if (mode == MODE_IEEE_C22)
smi_clk &= ~SMI_CLK_MODE;
else
smi_clk |= SMI_CLK_MODE;
/* Enable sending 32 bit preable on SMI transactions */
smi_clk |= SMI_CLK_PREAMBLE;
/* Saved settings */
mdio_reg_write(sc, SMI_CLK, smi_clk);
sc->mode = mode;
}
static int
thunder_mdio_c45_addr(struct thunder_mdio_softc *sc, int phy, int reg)
{
uint64_t smi_cmd, smi_wr_dat;
ssize_t timeout;
thunder_mdio_set_mode(sc, MODE_IEEE_C45);
/* Prepare data for transmission */
mdio_reg_write(sc, SMI_WR_DAT, reg & SMI_WR_DAT_DAT_MASK);
/*
* Assemble command
*/
smi_cmd = 0;
/* Set opcode */
smi_cmd |= SMI_CMD_PHY_OP_C45_WRITE;
/* Set PHY address */
smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK);
/* Set PHY register offset */
smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) &
SMI_CMD_PHY_REG_ADR_MASK);
mdio_reg_write(sc, SMI_CMD, smi_cmd);
for (timeout = 1000; timeout > 0; timeout--) {
smi_wr_dat = mdio_reg_read(sc, SMI_WR_DAT);
if (smi_wr_dat & SMI_WR_DAT_PENDING)
DELAY(1000);
else
break;
}
if (timeout <= 0)
return (EIO);
else {
/* Return 0 on success */
return (0);
}
}
static int
thunder_mdio_read(device_t dev, int phy, int reg)
{
struct thunder_mdio_softc *sc;
uint64_t smi_cmd, smi_rd_dat;
ssize_t timeout;
int err;
sc = device_get_softc(dev);
/* XXX Always C22 - for <= 1Gbps only */
thunder_mdio_set_mode(sc, MODE_IEEE_C22);
/*
* Assemble command
*/
smi_cmd = 0;
/* Set opcode */
if (sc->mode == MODE_IEEE_C22)
smi_cmd |= SMI_CMD_PHY_OP_C22_READ;
else {
smi_cmd |= SMI_CMD_PHY_OP_C45_READ;
err = thunder_mdio_c45_addr(sc, phy, reg);
if (err != 0)
return (err);
reg = (reg >> 16) & 0x1F;
}
/* Set PHY address */
smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK);
/* Set PHY register offset */
smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) &
SMI_CMD_PHY_REG_ADR_MASK);
mdio_reg_write(sc, SMI_CMD, smi_cmd);
for (timeout = 1000; timeout > 0; timeout--) {
smi_rd_dat = mdio_reg_read(sc, SMI_RD_DAT);
if (smi_rd_dat & SMI_RD_DAT_PENDING)
DELAY(1000);
else
break;
}
if (smi_rd_dat & SMI_RD_DAT_VAL)
return (smi_rd_dat & SMI_RD_DAT_DAT_MASK);
else {
/* Return 0 on error */
return (0);
}
}
static int
thunder_mdio_write(device_t dev, int phy, int reg, int data)
{
struct thunder_mdio_softc *sc;
uint64_t smi_cmd, smi_wr_dat;
ssize_t timeout;
sc = device_get_softc(dev);
/* XXX Always C22 - for <= 1Gbps only */
thunder_mdio_set_mode(sc, MODE_IEEE_C22);
/* Prepare data for transmission */
mdio_reg_write(sc, SMI_WR_DAT, data & SMI_WR_DAT_DAT_MASK);
/*
* Assemble command
*/
smi_cmd = 0;
/* Set opcode */
if (sc->mode == MODE_IEEE_C22)
smi_cmd |= SMI_CMD_PHY_OP_C22_WRITE;
else
smi_cmd |= SMI_CMD_PHY_OP_C45_WRITE;
/* Set PHY address */
smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK);
/* Set PHY register offset */
smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) &
SMI_CMD_PHY_REG_ADR_MASK);
mdio_reg_write(sc, SMI_CMD, smi_cmd);
for (timeout = 1000; timeout > 0; timeout--) {
smi_wr_dat = mdio_reg_read(sc, SMI_WR_DAT);
if (smi_wr_dat & SMI_WR_DAT_PENDING)
DELAY(1000);
else
break;
}
if (timeout <= 0)
return (EIO);
else {
/* Return 0 on success */
return (0);
}
}
static int
thunder_ifmedia_change_stub(if_t ifp __unused)
{
/* Will never be called by if_media */
return (0);
}
static void
thunder_ifmedia_status_stub(if_t ifp __unused, struct ifmediareq
*ifmr __unused)
{
/* Will never be called by if_media */
}
static __inline struct phy_desc *
get_phy_desc(struct thunder_mdio_softc *sc, int lmacid)
{
struct phy_desc *pd = NULL;
MDIO_LOCK_ASSERT(sc);
TAILQ_FOREACH(pd, &sc->phy_desc_head, phy_desc_list) {
if (pd->lmacid == lmacid)
break;
}
return (pd);
}
static int
thunder_mdio_media_status(device_t dev, int lmacid, int *link, int *duplex,
int *speed)
{
struct thunder_mdio_softc *sc;
struct mii_data *mii_sc;
struct phy_desc *pd;
sc = device_get_softc(dev);
MDIO_LOCK(sc);
pd = get_phy_desc(sc, lmacid);
if (pd == NULL) {
/* Panic when invariants are enabled, fail otherwise. */
KASSERT(0, ("%s: no PHY descriptor for LMAC%d",
__func__, lmacid));
MDIO_UNLOCK(sc);
return (ENXIO);
}
mii_sc = device_get_softc(pd->miibus);
mii_tick(mii_sc);
if ((mii_sc->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
/* Link is up */
*link = 1;
} else
*link = 0;
switch (IFM_SUBTYPE(mii_sc->mii_media_active)) {
case IFM_10_T:
*speed = 10;
break;
case IFM_100_TX:
*speed = 100;
break;
case IFM_1000_T:
*speed = 1000;
break;
default:
/* IFM_NONE */
*speed = 0;
}
if ((IFM_OPTIONS(mii_sc->mii_media_active) & IFM_FDX) != 0)
*duplex = 1;
else
*duplex = 0;
MDIO_UNLOCK(sc);
return (0);
}
static int
thunder_mdio_media_change(device_t dev, int lmacid, int link, int duplex,
int speed)
{
return (EIO);
}
static int
thunder_mdio_phy_connect(device_t dev, int lmacid, int phy)
{
struct thunder_mdio_softc *sc;
struct phy_desc *pd;
int err;
sc = device_get_softc(dev);
MDIO_LOCK(sc);
pd = get_phy_desc(sc, lmacid);
MDIO_UNLOCK(sc);
if (pd == NULL) {
pd = malloc(sizeof(*pd), M_THUNDER_MDIO, (M_NOWAIT | M_ZERO));
if (pd == NULL)
return (ENOMEM);
pd->ifp = if_alloc(IFT_ETHER);
- if (pd->ifp == NULL) {
- free(pd, M_THUNDER_MDIO);
- return (ENOMEM);
- }
pd->lmacid = lmacid;
}
err = mii_attach(dev, &pd->miibus, pd->ifp,
thunder_ifmedia_change_stub, thunder_ifmedia_status_stub,
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
if (err != 0) {
device_printf(dev, "Could not attach PHY%d\n", phy);
if_free(pd->ifp);
free(pd, M_THUNDER_MDIO);
return (ENXIO);
}
MDIO_LOCK(sc);
TAILQ_INSERT_TAIL(&sc->phy_desc_head, pd, phy_desc_list);
MDIO_UNLOCK(sc);
return (0);
}
static int
thunder_mdio_phy_disconnect(device_t dev, int lmacid, int phy)
{
struct thunder_mdio_softc *sc;
struct phy_desc *pd;
sc = device_get_softc(dev);
MDIO_LOCK(sc);
pd = get_phy_desc(sc, lmacid);
if (pd == NULL) {
MDIO_UNLOCK(sc);
return (EINVAL);
}
/* Remove this PHY descriptor from the list */
TAILQ_REMOVE(&sc->phy_desc_head, pd, phy_desc_list);
/* Detach miibus */
bus_generic_detach(dev);
device_delete_child(dev, pd->miibus);
/* Free fake ifnet */
if_free(pd->ifp);
/* Free memory under phy descriptor */
free(pd, M_THUNDER_MDIO);
MDIO_UNLOCK(sc);
return (0);
}
diff --git a/sys/dev/vr/if_vr.c b/sys/dev/vr/if_vr.c
index 2afe16e3c6d2..02b55ef4f304 100644
--- a/sys/dev/vr/if_vr.c
+++ b/sys/dev/vr/if_vr.c
@@ -1,2664 +1,2659 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1997, 1998
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* VIA Rhine fast ethernet PCI NIC driver
*
* Supports various network adapters based on the VIA Rhine
* and Rhine II PCI controllers, including the D-Link DFE530TX.
* Datasheets are available at http://www.via.com.tw.
*
* Written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
/*
* The VIA Rhine controllers are similar in some respects to the
* the DEC tulip chips, except less complicated. The controller
* uses an MII bus and an external physical layer interface. The
* receiver has a one entry perfect filter and a 64-bit hash table
* multicast filter. Transmit and receive descriptors are similar
* to the tulip.
*
* Some Rhine chips has a serious flaw in its transmit DMA mechanism:
* transmit buffers must be longword aligned. Unfortunately,
* FreeBSD doesn't guarantee that mbufs will be filled in starting
* at longword boundaries, so we have to do a buffer copy before
* transmission.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <dev/vr/if_vrreg.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
MODULE_DEPEND(vr, pci, 1, 1, 1);
MODULE_DEPEND(vr, ether, 1, 1, 1);
MODULE_DEPEND(vr, miibus, 1, 1, 1);
/* Define to show Rx/Tx error status. */
#undef VR_SHOW_ERRORS
#define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
/*
* Various supported device vendors/types, their names & quirks.
*/
#define VR_Q_NEEDALIGN (1<<0)
#define VR_Q_CSUM (1<<1)
#define VR_Q_CAM (1<<2)
static const struct vr_type {
u_int16_t vr_vid;
u_int16_t vr_did;
int vr_quirks;
const char *vr_name;
} vr_devs[] = {
{ VIA_VENDORID, VIA_DEVICEID_RHINE,
VR_Q_NEEDALIGN,
"VIA VT3043 Rhine I 10/100BaseTX" },
{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
VR_Q_NEEDALIGN,
"VIA VT86C100A Rhine II 10/100BaseTX" },
{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
0,
"VIA VT6102 Rhine II 10/100BaseTX" },
{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
0,
"VIA VT6105 Rhine III 10/100BaseTX" },
{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
VR_Q_CSUM,
"VIA VT6105M Rhine III 10/100BaseTX" },
{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
VR_Q_NEEDALIGN,
"Delta Electronics Rhine II 10/100BaseTX" },
{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
VR_Q_NEEDALIGN,
"Addtron Technology Rhine II 10/100BaseTX" },
{ 0, 0, 0, NULL }
};
static int vr_probe(device_t);
static int vr_attach(device_t);
static int vr_detach(device_t);
static int vr_shutdown(device_t);
static int vr_suspend(device_t);
static int vr_resume(device_t);
static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int vr_dma_alloc(struct vr_softc *);
static void vr_dma_free(struct vr_softc *);
static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
static int vr_newbuf(struct vr_softc *, int);
#ifndef __NO_STRICT_ALIGNMENT
static __inline void vr_fixup_rx(struct mbuf *);
#endif
static int vr_rxeof(struct vr_softc *);
static void vr_txeof(struct vr_softc *);
static void vr_tick(void *);
static int vr_error(struct vr_softc *, uint16_t);
static void vr_tx_underrun(struct vr_softc *);
static int vr_intr(void *);
static void vr_int_task(void *, int);
static void vr_start(if_t);
static void vr_start_locked(if_t);
static int vr_encap(struct vr_softc *, struct mbuf **);
static int vr_ioctl(if_t, u_long, caddr_t);
static void vr_init(void *);
static void vr_init_locked(struct vr_softc *);
static void vr_tx_start(struct vr_softc *);
static void vr_rx_start(struct vr_softc *);
static int vr_tx_stop(struct vr_softc *);
static int vr_rx_stop(struct vr_softc *);
static void vr_stop(struct vr_softc *);
static void vr_watchdog(struct vr_softc *);
static int vr_ifmedia_upd(if_t);
static void vr_ifmedia_sts(if_t, struct ifmediareq *);
static int vr_miibus_readreg(device_t, int, int);
static int vr_miibus_writereg(device_t, int, int, int);
static void vr_miibus_statchg(device_t);
static void vr_cam_mask(struct vr_softc *, uint32_t, int);
static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
static void vr_set_filter(struct vr_softc *);
static void vr_reset(const struct vr_softc *);
static int vr_tx_ring_init(struct vr_softc *);
static int vr_rx_ring_init(struct vr_softc *);
static void vr_setwol(struct vr_softc *);
static void vr_clrwol(struct vr_softc *);
static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
static const struct vr_tx_threshold_table {
int tx_cfg;
int bcr_cfg;
int value;
} vr_tx_threshold_tables[] = {
{ VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 },
{ VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
{ VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
{ VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
{ VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
{ VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
};
static device_method_t vr_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, vr_probe),
DEVMETHOD(device_attach, vr_attach),
DEVMETHOD(device_detach, vr_detach),
DEVMETHOD(device_shutdown, vr_shutdown),
DEVMETHOD(device_suspend, vr_suspend),
DEVMETHOD(device_resume, vr_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, vr_miibus_readreg),
DEVMETHOD(miibus_writereg, vr_miibus_writereg),
DEVMETHOD(miibus_statchg, vr_miibus_statchg),
DEVMETHOD_END
};
static driver_t vr_driver = {
"vr",
vr_methods,
sizeof(struct vr_softc)
};
DRIVER_MODULE(vr, pci, vr_driver, 0, 0);
DRIVER_MODULE(miibus, vr, miibus_driver, 0, 0);
static int
vr_miibus_readreg(device_t dev, int phy, int reg)
{
struct vr_softc *sc;
int i;
sc = device_get_softc(dev);
/* Set the register address. */
CSR_WRITE_1(sc, VR_MIIADDR, reg);
VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
for (i = 0; i < VR_MII_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
break;
}
if (i == VR_MII_TIMEOUT)
device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
return (CSR_READ_2(sc, VR_MIIDATA));
}
static int
vr_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct vr_softc *sc;
int i;
sc = device_get_softc(dev);
/* Set the register address and data to write. */
CSR_WRITE_1(sc, VR_MIIADDR, reg);
CSR_WRITE_2(sc, VR_MIIDATA, data);
VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
for (i = 0; i < VR_MII_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
break;
}
if (i == VR_MII_TIMEOUT)
device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
reg);
return (0);
}
/*
* In order to fiddle with the
* 'full-duplex' and '100Mbps' bits in the netconfig register, we
* first have to put the transmit and/or receive logic in the idle state.
*/
static void
vr_miibus_statchg(device_t dev)
{
struct vr_softc *sc;
struct mii_data *mii;
if_t ifp;
int lfdx, mfdx;
uint8_t cr0, cr1, fc;
sc = device_get_softc(dev);
mii = device_get_softc(sc->vr_miibus);
ifp = sc->vr_ifp;
if (mii == NULL || ifp == NULL ||
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->vr_flags |= VR_F_LINK;
break;
default:
break;
}
}
if ((sc->vr_flags & VR_F_LINK) != 0) {
cr0 = CSR_READ_1(sc, VR_CR0);
cr1 = CSR_READ_1(sc, VR_CR1);
mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
if (mfdx != lfdx) {
if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
if (vr_tx_stop(sc) != 0 ||
vr_rx_stop(sc) != 0) {
device_printf(sc->vr_dev,
"%s: Tx/Rx shutdown error -- "
"resetting\n", __func__);
sc->vr_flags |= VR_F_RESTART;
VR_UNLOCK(sc);
return;
}
}
if (lfdx)
cr1 |= VR_CR1_FULLDUPLEX;
else
cr1 &= ~VR_CR1_FULLDUPLEX;
CSR_WRITE_1(sc, VR_CR1, cr1);
}
fc = 0;
/* Configure flow-control. */
if (sc->vr_revid >= REV_ID_VT6105_A0) {
fc = CSR_READ_1(sc, VR_FLOWCR1);
fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
fc |= VR_FLOWCR1_RXPAUSE;
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_TXPAUSE) != 0) {
fc |= VR_FLOWCR1_TXPAUSE;
sc->vr_flags |= VR_F_TXPAUSE;
}
CSR_WRITE_1(sc, VR_FLOWCR1, fc);
} else if (sc->vr_revid >= REV_ID_VT6102_A) {
/* No Tx puase capability available for Rhine II. */
fc = CSR_READ_1(sc, VR_MISC_CR0);
fc &= ~VR_MISCCR0_RXPAUSE;
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
fc |= VR_MISCCR0_RXPAUSE;
CSR_WRITE_1(sc, VR_MISC_CR0, fc);
}
vr_rx_start(sc);
vr_tx_start(sc);
} else {
if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
device_printf(sc->vr_dev,
"%s: Tx/Rx shutdown error -- resetting\n",
__func__);
sc->vr_flags |= VR_F_RESTART;
}
}
}
static void
vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
{
if (type == VR_MCAST_CAM)
CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
else
CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
CSR_WRITE_4(sc, VR_CAMMASK, mask);
CSR_WRITE_1(sc, VR_CAMCTL, 0);
}
static int
vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
{
int i;
if (type == VR_MCAST_CAM) {
if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
return (EINVAL);
CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
} else
CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
/* Set CAM entry address. */
CSR_WRITE_1(sc, VR_CAMADDR, idx);
/* Set CAM entry data. */
if (type == VR_MCAST_CAM) {
for (i = 0; i < ETHER_ADDR_LEN; i++)
CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
} else {
CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
}
DELAY(10);
/* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
for (i = 0; i < VR_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
break;
}
if (i == VR_TIMEOUT)
device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
__func__);
CSR_WRITE_1(sc, VR_CAMCTL, 0);
return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
}
struct vr_hash_maddr_cam_ctx {
struct vr_softc *sc;
uint32_t mask;
int error;
};
static u_int
vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
{
struct vr_hash_maddr_cam_ctx *ctx = arg;
if (ctx->error != 0)
return (0);
ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl));
if (ctx->error != 0) {
ctx->mask = 0;
return (0);
}
ctx->mask |= 1 << mcnt;
return (1);
}
static u_int
vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint32_t *hashes = arg;
int h;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
return (1);
}
/*
* Program the 64-bit multicast hash filter.
*/
static void
vr_set_filter(struct vr_softc *sc)
{
if_t ifp;
uint32_t hashes[2] = { 0, 0 };
uint8_t rxfilt;
int error, mcnt;
VR_LOCK_ASSERT(sc);
ifp = sc->vr_ifp;
rxfilt = CSR_READ_1(sc, VR_RXCFG);
rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
VR_RXCFG_RX_MULTI);
if (if_getflags(ifp) & IFF_BROADCAST)
rxfilt |= VR_RXCFG_RX_BROAD;
if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
rxfilt |= VR_RXCFG_RX_MULTI;
if (if_getflags(ifp) & IFF_PROMISC)
rxfilt |= VR_RXCFG_RX_PROMISC;
CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
return;
}
/* Now program new ones. */
error = 0;
if ((sc->vr_quirks & VR_Q_CAM) != 0) {
struct vr_hash_maddr_cam_ctx ctx;
/*
* For hardwares that have CAM capability, use
* 32 entries multicast perfect filter.
*/
ctx.sc = sc;
ctx.mask = 0;
ctx.error = 0;
mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx);
vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask);
}
if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
/*
* If there are too many multicast addresses or
* setting multicast CAM filter failed, use hash
* table based filtering.
*/
mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes);
}
if (mcnt > 0)
rxfilt |= VR_RXCFG_RX_MULTI;
CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
}
static void
vr_reset(const struct vr_softc *sc)
{
int i;
/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
if (sc->vr_revid < REV_ID_VT6102_A) {
/* VT86C100A needs more delay after reset. */
DELAY(100);
}
for (i = 0; i < VR_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
break;
}
if (i == VR_TIMEOUT) {
if (sc->vr_revid < REV_ID_VT6102_A)
device_printf(sc->vr_dev, "reset never completed!\n");
else {
/* Use newer force reset command. */
device_printf(sc->vr_dev,
"Using force reset command.\n");
VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
/*
* Wait a little while for the chip to get its brains
* in order.
*/
DELAY(2000);
}
}
}
/*
* Probe for a VIA Rhine chip. Check the PCI vendor and device
* IDs against our list and return a match or NULL
*/
static const struct vr_type *
vr_match(device_t dev)
{
const struct vr_type *t = vr_devs;
for (t = vr_devs; t->vr_name != NULL; t++)
if ((pci_get_vendor(dev) == t->vr_vid) &&
(pci_get_device(dev) == t->vr_did))
return (t);
return (NULL);
}
/*
* Probe for a VIA Rhine chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
vr_probe(device_t dev)
{
const struct vr_type *t;
t = vr_match(dev);
if (t != NULL) {
device_set_desc(dev, t->vr_name);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
vr_attach(device_t dev)
{
struct vr_softc *sc;
if_t ifp;
const struct vr_type *t;
uint8_t eaddr[ETHER_ADDR_LEN];
int error, rid;
int i, phy, pmc;
sc = device_get_softc(dev);
sc->vr_dev = dev;
t = vr_match(dev);
KASSERT(t != NULL, ("Lost if_vr device match"));
sc->vr_quirks = t->vr_quirks;
device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
sc, 0, vr_sysctl_stats, "I", "Statistics");
error = 0;
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
sc->vr_revid = pci_get_revid(dev);
device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
sc->vr_res_id = PCIR_BAR(0);
sc->vr_res_type = SYS_RES_IOPORT;
sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
&sc->vr_res_id, RF_ACTIVE);
if (sc->vr_res == NULL) {
device_printf(dev, "couldn't map ports\n");
error = ENXIO;
goto fail;
}
/* Allocate interrupt. */
rid = 0;
sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->vr_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
/* Allocate ifnet structure. */
ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "couldn't allocate ifnet structure\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, vr_ioctl);
if_setstartfn(ifp, vr_start);
if_setinitfn(ifp, vr_init);
if_setsendqlen(ifp, VR_TX_RING_CNT - 1);
if_setsendqready(ifp);
NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
/* Configure Tx FIFO threshold. */
sc->vr_txthresh = VR_TXTHRESH_MIN;
if (sc->vr_revid < REV_ID_VT6105_A0) {
/*
* Use store and forward mode for Rhine I/II.
* Otherwise they produce a lot of Tx underruns and
* it would take a while to get working FIFO threshold
* value.
*/
sc->vr_txthresh = VR_TXTHRESH_MAX;
}
if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
if_sethwassist(ifp, VR_CSUM_FEATURES);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
/*
* To update checksum field the hardware may need to
* store entire frames into FIFO before transmitting.
*/
sc->vr_txthresh = VR_TXTHRESH_MAX;
}
if (sc->vr_revid >= REV_ID_VT6102_A &&
pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
if_setcapabilitiesbit(ifp, IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC, 0);
/* Rhine supports oversized VLAN frame. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
* Windows may put the chip in suspend mode when it
* shuts down. Be sure to kick it in the head to wake it
* up again.
*/
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
/*
* Get station address. The way the Rhine chips work,
* you're not allowed to directly access the EEPROM once
* they've been programmed a special way. Consequently,
* we need to read the node address from the PAR0 and PAR1
* registers.
* Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
* VR_CFGC and VR_CFGD such that memory mapped IO configured
* by driver is reset to default state.
*/
VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
for (i = VR_TIMEOUT; i > 0; i--) {
DELAY(1);
if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
break;
}
if (i == 0)
device_printf(dev, "Reloading EEPROM timeout!\n");
for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
/* Reset the adapter. */
vr_reset(sc);
/* Ack intr & disable further interrupts. */
CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
CSR_WRITE_2(sc, VR_IMR, 0);
if (sc->vr_revid >= REV_ID_VT6102_A)
CSR_WRITE_2(sc, VR_MII_IMR, 0);
if (sc->vr_revid < REV_ID_VT6102_A) {
pci_write_config(dev, VR_PCI_MODE2,
pci_read_config(dev, VR_PCI_MODE2, 1) |
VR_MODE2_MODE10T, 1);
} else {
/* Report error instead of retrying forever. */
pci_write_config(dev, VR_PCI_MODE2,
pci_read_config(dev, VR_PCI_MODE2, 1) |
VR_MODE2_PCEROPT, 1);
/* Detect MII coding error. */
pci_write_config(dev, VR_PCI_MODE3,
pci_read_config(dev, VR_PCI_MODE3, 1) |
VR_MODE3_MIION, 1);
if (sc->vr_revid >= REV_ID_VT6105_LOM &&
sc->vr_revid < REV_ID_VT6105M_A0)
pci_write_config(dev, VR_PCI_MODE2,
pci_read_config(dev, VR_PCI_MODE2, 1) |
VR_MODE2_MODE10T, 1);
/* Enable Memory-Read-Multiple. */
if (sc->vr_revid >= REV_ID_VT6107_A1 &&
sc->vr_revid < REV_ID_VT6105M_A0)
pci_write_config(dev, VR_PCI_MODE2,
pci_read_config(dev, VR_PCI_MODE2, 1) |
VR_MODE2_MRDPL, 1);
}
/* Disable MII AUTOPOLL. */
VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
if (vr_dma_alloc(sc) != 0) {
error = ENXIO;
goto fail;
}
/* Do MII setup. */
if (sc->vr_revid >= REV_ID_VT6105_A0)
phy = 1;
else
phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd,
vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
/* Call MI attach routine. */
ether_ifattach(ifp, eaddr);
/*
* Tell the upper layer(s) we support long frames.
* Must appear after the call to ether_ifattach() because
* ether_ifattach() sets ifi_hdrlen to the default value.
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/* Hook interrupt last to avoid having to lock softc. */
error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
vr_intr, NULL, sc, &sc->vr_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
vr_detach(dev);
return (error);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
vr_detach(device_t dev)
{
struct vr_softc *sc = device_get_softc(dev);
if_t ifp = sc->vr_ifp;
KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
#ifdef DEVICE_POLLING
if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
/* These should only be active if attach succeeded. */
if (device_is_attached(dev)) {
VR_LOCK(sc);
sc->vr_flags |= VR_F_DETACHED;
vr_stop(sc);
VR_UNLOCK(sc);
callout_drain(&sc->vr_stat_callout);
taskqueue_drain(taskqueue_fast, &sc->vr_inttask);
ether_ifdetach(ifp);
}
if (sc->vr_miibus)
device_delete_child(dev, sc->vr_miibus);
bus_generic_detach(dev);
if (sc->vr_intrhand)
bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
if (sc->vr_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
if (sc->vr_res)
bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
sc->vr_res);
if (ifp)
if_free(ifp);
vr_dma_free(sc);
mtx_destroy(&sc->vr_mtx);
return (0);
}
struct vr_dmamap_arg {
bus_addr_t vr_busaddr;
};
static void
vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct vr_dmamap_arg *ctx;
if (error != 0)
return;
ctx = arg;
ctx->vr_busaddr = segs[0].ds_addr;
}
static int
vr_dma_alloc(struct vr_softc *sc)
{
struct vr_dmamap_arg ctx;
struct vr_txdesc *txd;
struct vr_rxdesc *rxd;
bus_size_t tx_alignment;
int error, i;
/* Create parent DMA tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->vr_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vr_cdata.vr_parent_tag);
if (error != 0) {
device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
goto fail;
}
/* Create tag for Tx ring. */
error = bus_dma_tag_create(
sc->vr_cdata.vr_parent_tag, /* parent */
VR_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
VR_TX_RING_SIZE, /* maxsize */
1, /* nsegments */
VR_TX_RING_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vr_cdata.vr_tx_ring_tag);
if (error != 0) {
device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
goto fail;
}
/* Create tag for Rx ring. */
error = bus_dma_tag_create(
sc->vr_cdata.vr_parent_tag, /* parent */
VR_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
VR_RX_RING_SIZE, /* maxsize */
1, /* nsegments */
VR_RX_RING_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vr_cdata.vr_rx_ring_tag);
if (error != 0) {
device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
goto fail;
}
if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
tx_alignment = sizeof(uint32_t);
else
tx_alignment = 1;
/* Create tag for Tx buffers. */
error = bus_dma_tag_create(
sc->vr_cdata.vr_parent_tag, /* parent */
tx_alignment, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES * VR_MAXFRAGS, /* maxsize */
VR_MAXFRAGS, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vr_cdata.vr_tx_tag);
if (error != 0) {
device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
goto fail;
}
/* Create tag for Rx buffers. */
error = bus_dma_tag_create(
sc->vr_cdata.vr_parent_tag, /* parent */
VR_RX_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vr_cdata.vr_rx_tag);
if (error != 0) {
device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for Tx ring. */
error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
(void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
if (error != 0) {
device_printf(sc->vr_dev,
"failed to allocate DMA'able memory for Tx ring\n");
goto fail;
}
ctx.vr_busaddr = 0;
error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.vr_busaddr == 0) {
device_printf(sc->vr_dev,
"failed to load DMA'able memory for Tx ring\n");
goto fail;
}
sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
/* Allocate DMA'able memory and load the DMA map for Rx ring. */
error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
(void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
if (error != 0) {
device_printf(sc->vr_dev,
"failed to allocate DMA'able memory for Rx ring\n");
goto fail;
}
ctx.vr_busaddr = 0;
error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.vr_busaddr == 0) {
device_printf(sc->vr_dev,
"failed to load DMA'able memory for Rx ring\n");
goto fail;
}
sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
/* Create DMA maps for Tx buffers. */
for (i = 0; i < VR_TX_RING_CNT; i++) {
txd = &sc->vr_cdata.vr_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->vr_dev,
"failed to create Tx dmamap\n");
goto fail;
}
}
/* Create DMA maps for Rx buffers. */
if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
&sc->vr_cdata.vr_rx_sparemap)) != 0) {
device_printf(sc->vr_dev,
"failed to create spare Rx dmamap\n");
goto fail;
}
for (i = 0; i < VR_RX_RING_CNT; i++) {
rxd = &sc->vr_cdata.vr_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->vr_dev,
"failed to create Rx dmamap\n");
goto fail;
}
}
fail:
return (error);
}
static void
vr_dma_free(struct vr_softc *sc)
{
struct vr_txdesc *txd;
struct vr_rxdesc *rxd;
int i;
/* Tx ring. */
if (sc->vr_cdata.vr_tx_ring_tag) {
if (sc->vr_rdata.vr_tx_ring_paddr)
bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
sc->vr_cdata.vr_tx_ring_map);
if (sc->vr_rdata.vr_tx_ring)
bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
sc->vr_rdata.vr_tx_ring,
sc->vr_cdata.vr_tx_ring_map);
sc->vr_rdata.vr_tx_ring = NULL;
sc->vr_rdata.vr_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
sc->vr_cdata.vr_tx_ring_tag = NULL;
}
/* Rx ring. */
if (sc->vr_cdata.vr_rx_ring_tag) {
if (sc->vr_rdata.vr_rx_ring_paddr)
bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
sc->vr_cdata.vr_rx_ring_map);
if (sc->vr_rdata.vr_rx_ring)
bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
sc->vr_rdata.vr_rx_ring,
sc->vr_cdata.vr_rx_ring_map);
sc->vr_rdata.vr_rx_ring = NULL;
sc->vr_rdata.vr_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
sc->vr_cdata.vr_rx_ring_tag = NULL;
}
/* Tx buffers. */
if (sc->vr_cdata.vr_tx_tag) {
for (i = 0; i < VR_TX_RING_CNT; i++) {
txd = &sc->vr_cdata.vr_txdesc[i];
if (txd->tx_dmamap) {
bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
sc->vr_cdata.vr_tx_tag = NULL;
}
/* Rx buffers. */
if (sc->vr_cdata.vr_rx_tag) {
for (i = 0; i < VR_RX_RING_CNT; i++) {
rxd = &sc->vr_cdata.vr_rxdesc[i];
if (rxd->rx_dmamap) {
bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc->vr_cdata.vr_rx_sparemap) {
bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
sc->vr_cdata.vr_rx_sparemap);
sc->vr_cdata.vr_rx_sparemap = 0;
}
bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
sc->vr_cdata.vr_rx_tag = NULL;
}
if (sc->vr_cdata.vr_parent_tag) {
bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
sc->vr_cdata.vr_parent_tag = NULL;
}
}
/*
* Initialize the transmit descriptors.
*/
static int
vr_tx_ring_init(struct vr_softc *sc)
{
struct vr_ring_data *rd;
struct vr_txdesc *txd;
bus_addr_t addr;
int i;
sc->vr_cdata.vr_tx_prod = 0;
sc->vr_cdata.vr_tx_cons = 0;
sc->vr_cdata.vr_tx_cnt = 0;
sc->vr_cdata.vr_tx_pkts = 0;
rd = &sc->vr_rdata;
bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
for (i = 0; i < VR_TX_RING_CNT; i++) {
if (i == VR_TX_RING_CNT - 1)
addr = VR_TX_RING_ADDR(sc, 0);
else
addr = VR_TX_RING_ADDR(sc, i + 1);
rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
txd = &sc->vr_cdata.vr_txdesc[i];
txd->tx_m = NULL;
}
bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
sc->vr_cdata.vr_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that
* we arrange the descriptors in a closed ring, so that the last descriptor
* points back to the first.
*/
static int
vr_rx_ring_init(struct vr_softc *sc)
{
struct vr_ring_data *rd;
struct vr_rxdesc *rxd;
bus_addr_t addr;
int i;
sc->vr_cdata.vr_rx_cons = 0;
rd = &sc->vr_rdata;
bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
for (i = 0; i < VR_RX_RING_CNT; i++) {
rxd = &sc->vr_cdata.vr_rxdesc[i];
rxd->rx_m = NULL;
rxd->desc = &rd->vr_rx_ring[i];
if (i == VR_RX_RING_CNT - 1)
addr = VR_RX_RING_ADDR(sc, 0);
else
addr = VR_RX_RING_ADDR(sc, i + 1);
rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
if (vr_newbuf(sc, i) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
sc->vr_cdata.vr_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static __inline void
vr_discard_rxbuf(struct vr_rxdesc *rxd)
{
struct vr_desc *desc;
desc = rxd->desc;
desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
desc->vr_status = htole32(VR_RXSTAT_OWN);
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
* Note: the length fields are only 11 bits wide, which means the
* largest size we can specify is 2047. This is important because
* MCLBYTES is 2048, so we have to subtract one otherwise we'll
* overflow the field and make a mess.
*/
static int
vr_newbuf(struct vr_softc *sc, int idx)
{
struct vr_desc *desc;
struct vr_rxdesc *rxd;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, sizeof(uint64_t));
if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc->vr_cdata.vr_rxdesc[idx];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
sc->vr_cdata.vr_rx_sparemap = map;
bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
desc = rxd->desc;
desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
desc->vr_status = htole32(VR_RXSTAT_OWN);
return (0);
}
#ifndef __NO_STRICT_ALIGNMENT
static __inline void
vr_fixup_rx(struct mbuf *m)
{
uint16_t *src, *dst;
int i;
src = mtod(m, uint16_t *);
dst = src - 1;
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= ETHER_ALIGN;
}
#endif
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static int
vr_rxeof(struct vr_softc *sc)
{
struct vr_rxdesc *rxd;
struct mbuf *m;
if_t ifp;
struct vr_desc *cur_rx;
int cons, prog, total_len, rx_npkts;
uint32_t rxstat, rxctl;
VR_LOCK_ASSERT(sc);
ifp = sc->vr_ifp;
cons = sc->vr_cdata.vr_rx_cons;
rx_npkts = 0;
bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
sc->vr_cdata.vr_rx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
}
#endif
cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
rxstat = le32toh(cur_rx->vr_status);
rxctl = le32toh(cur_rx->vr_ctl);
if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
break;
prog++;
rxd = &sc->vr_cdata.vr_rxdesc[cons];
m = rxd->rx_m;
/*
* If an error occurs, update stats, clear the
* status word and leave the mbuf cluster in place:
* it should simply get re-used next time this descriptor
* comes up in the ring.
* We don't support SG in Rx path yet, so discard
* partial frame.
*/
if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
(rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
(VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
sc->vr_stat.rx_errors++;
if (rxstat & VR_RXSTAT_CRCERR)
sc->vr_stat.rx_crc_errors++;
if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
sc->vr_stat.rx_alignment++;
if (rxstat & VR_RXSTAT_FIFOOFLOW)
sc->vr_stat.rx_fifo_overflows++;
if (rxstat & VR_RXSTAT_GIANT)
sc->vr_stat.rx_giants++;
if (rxstat & VR_RXSTAT_RUNT)
sc->vr_stat.rx_runts++;
if (rxstat & VR_RXSTAT_BUFFERR)
sc->vr_stat.rx_no_buffers++;
#ifdef VR_SHOW_ERRORS
device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
__func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
#endif
vr_discard_rxbuf(rxd);
continue;
}
if (vr_newbuf(sc, cons) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
sc->vr_stat.rx_errors++;
sc->vr_stat.rx_no_mbufs++;
vr_discard_rxbuf(rxd);
continue;
}
/*
* XXX The VIA Rhine chip includes the CRC with every
* received frame, and there's no way to turn this
* behavior off (at least, I can't find anything in
* the manual that explains how to do it) so we have
* to trim off the CRC manually.
*/
total_len = VR_RXBYTES(rxstat);
total_len -= ETHER_CRC_LEN;
m->m_pkthdr.len = m->m_len = total_len;
#ifndef __NO_STRICT_ALIGNMENT
/*
* RX buffers must be 32-bit aligned.
* Ignore the alignment problems on the non-strict alignment
* platform. The performance hit incurred due to unaligned
* accesses is much smaller than the hit produced by forcing
* buffer copies all the time.
*/
vr_fixup_rx(m);
#endif
m->m_pkthdr.rcvif = ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
sc->vr_stat.rx_ok++;
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
(rxstat & VR_RXSTAT_FRAG) == 0 &&
(rxctl & VR_RXCTL_IP) != 0) {
/* Checksum is valid for non-fragmented IP packets. */
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
m->m_pkthdr.csum_data = 0xffff;
}
}
}
VR_UNLOCK(sc);
if_input(ifp, m);
VR_LOCK(sc);
rx_npkts++;
}
if (prog > 0) {
/*
* Let controller know how many number of RX buffers
* are posted but avoid expensive register access if
* TX pause capability was not negotiated with link
* partner.
*/
if ((sc->vr_flags & VR_F_TXPAUSE) != 0) {
if (prog >= VR_RX_RING_CNT)
prog = VR_RX_RING_CNT - 1;
CSR_WRITE_1(sc, VR_FLOWCR0, prog);
}
sc->vr_cdata.vr_rx_cons = cons;
bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
sc->vr_cdata.vr_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
return (rx_npkts);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
vr_txeof(struct vr_softc *sc)
{
struct vr_txdesc *txd;
struct vr_desc *cur_tx;
if_t ifp;
uint32_t txctl, txstat;
int cons, prod;
VR_LOCK_ASSERT(sc);
cons = sc->vr_cdata.vr_tx_cons;
prod = sc->vr_cdata.vr_tx_prod;
if (cons == prod)
return;
bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
sc->vr_cdata.vr_tx_ring_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
ifp = sc->vr_ifp;
/*
* Go through our tx list and free mbufs for those
* frames that have been transmitted.
*/
for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
txctl = le32toh(cur_tx->vr_ctl);
txstat = le32toh(cur_tx->vr_status);
if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
break;
sc->vr_cdata.vr_tx_cnt--;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
/* Only the first descriptor in the chain is valid. */
if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
continue;
txd = &sc->vr_cdata.vr_txdesc[cons];
KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
__func__));
if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
sc->vr_stat.tx_errors++;
if ((txstat & VR_TXSTAT_ABRT) != 0) {
/* Give up and restart Tx. */
sc->vr_stat.tx_abort++;
bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
VR_INC(cons, VR_TX_RING_CNT);
sc->vr_cdata.vr_tx_cons = cons;
if (vr_tx_stop(sc) != 0) {
device_printf(sc->vr_dev,
"%s: Tx shutdown error -- "
"resetting\n", __func__);
sc->vr_flags |= VR_F_RESTART;
return;
}
vr_tx_start(sc);
break;
}
if ((sc->vr_revid < REV_ID_VT3071_A &&
(txstat & VR_TXSTAT_UNDERRUN)) ||
(txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
sc->vr_stat.tx_underrun++;
/* Retry and restart Tx. */
sc->vr_cdata.vr_tx_cnt++;
sc->vr_cdata.vr_tx_cons = cons;
cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
sc->vr_cdata.vr_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
vr_tx_underrun(sc);
return;
}
if ((txstat & VR_TXSTAT_DEFER) != 0) {
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
sc->vr_stat.tx_collisions++;
}
if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
sc->vr_stat.tx_late_collisions++;
}
} else {
sc->vr_stat.tx_ok++;
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
if (sc->vr_revid < REV_ID_VT3071_A) {
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
(txstat & VR_TXSTAT_COLLCNT) >> 3);
sc->vr_stat.tx_collisions +=
(txstat & VR_TXSTAT_COLLCNT) >> 3;
} else {
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f));
sc->vr_stat.tx_collisions += (txstat & 0x0f);
}
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
sc->vr_cdata.vr_tx_cons = cons;
if (sc->vr_cdata.vr_tx_cnt == 0)
sc->vr_watchdog_timer = 0;
}
static void
vr_tick(void *xsc)
{
struct vr_softc *sc;
struct mii_data *mii;
sc = (struct vr_softc *)xsc;
VR_LOCK_ASSERT(sc);
if ((sc->vr_flags & VR_F_RESTART) != 0) {
device_printf(sc->vr_dev, "restarting\n");
sc->vr_stat.num_restart++;
if_setdrvflagbits(sc->vr_ifp, 0, IFF_DRV_RUNNING);
vr_init_locked(sc);
sc->vr_flags &= ~VR_F_RESTART;
}
mii = device_get_softc(sc->vr_miibus);
mii_tick(mii);
if ((sc->vr_flags & VR_F_LINK) == 0)
vr_miibus_statchg(sc->vr_dev);
vr_watchdog(sc);
callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
}
#ifdef DEVICE_POLLING
static poll_handler_t vr_poll;
static poll_handler_t vr_poll_locked;
static int
vr_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct vr_softc *sc;
int rx_npkts;
sc = if_getsoftc(ifp);
rx_npkts = 0;
VR_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
rx_npkts = vr_poll_locked(ifp, cmd, count);
VR_UNLOCK(sc);
return (rx_npkts);
}
static int
vr_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
{
struct vr_softc *sc;
int rx_npkts;
sc = if_getsoftc(ifp);
VR_LOCK_ASSERT(sc);
sc->rxcycles = count;
rx_npkts = vr_rxeof(sc);
vr_txeof(sc);
if (!if_sendq_empty(ifp))
vr_start_locked(ifp);
if (cmd == POLL_AND_CHECK_STATUS) {
uint16_t status;
/* Also check status register. */
status = CSR_READ_2(sc, VR_ISR);
if (status)
CSR_WRITE_2(sc, VR_ISR, status);
if ((status & VR_INTRS) == 0)
return (rx_npkts);
if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
VR_ISR_STATSOFLOW)) != 0) {
if (vr_error(sc, status) != 0)
return (rx_npkts);
}
if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
#ifdef VR_SHOW_ERRORS
device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
__func__, status, VR_ISR_ERR_BITS);
#endif
vr_rx_start(sc);
}
}
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
/* Back off the transmit threshold. */
static void
vr_tx_underrun(struct vr_softc *sc)
{
int thresh;
device_printf(sc->vr_dev, "Tx underrun -- ");
if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
thresh = sc->vr_txthresh;
sc->vr_txthresh++;
if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
sc->vr_txthresh = VR_TXTHRESH_MAX;
printf("using store and forward mode\n");
} else
printf("increasing Tx threshold(%d -> %d)\n",
vr_tx_threshold_tables[thresh].value,
vr_tx_threshold_tables[thresh + 1].value);
} else
printf("\n");
sc->vr_stat.tx_underrun++;
if (vr_tx_stop(sc) != 0) {
device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
"resetting\n", __func__);
sc->vr_flags |= VR_F_RESTART;
return;
}
vr_tx_start(sc);
}
static int
vr_intr(void *arg)
{
struct vr_softc *sc;
uint16_t status;
sc = (struct vr_softc *)arg;
status = CSR_READ_2(sc, VR_ISR);
if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
return (FILTER_STRAY);
/* Disable interrupts. */
CSR_WRITE_2(sc, VR_IMR, 0x0000);
taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask);
return (FILTER_HANDLED);
}
static void
vr_int_task(void *arg, int npending)
{
struct vr_softc *sc;
if_t ifp;
uint16_t status;
sc = (struct vr_softc *)arg;
VR_LOCK(sc);
if ((sc->vr_flags & VR_F_SUSPENDED) != 0)
goto done_locked;
status = CSR_READ_2(sc, VR_ISR);
ifp = sc->vr_ifp;
#ifdef DEVICE_POLLING
if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
goto done_locked;
#endif
/* Suppress unwanted interrupts. */
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
(sc->vr_flags & VR_F_RESTART) != 0) {
CSR_WRITE_2(sc, VR_IMR, 0);
CSR_WRITE_2(sc, VR_ISR, status);
goto done_locked;
}
for (; (status & VR_INTRS) != 0;) {
CSR_WRITE_2(sc, VR_ISR, status);
if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
VR_ISR_STATSOFLOW)) != 0) {
if (vr_error(sc, status) != 0) {
VR_UNLOCK(sc);
return;
}
}
vr_rxeof(sc);
if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
#ifdef VR_SHOW_ERRORS
device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
__func__, status, VR_ISR_ERR_BITS);
#endif
/* Restart Rx if RxDMA SM was stopped. */
vr_rx_start(sc);
}
vr_txeof(sc);
if (!if_sendq_empty(ifp))
vr_start_locked(ifp);
status = CSR_READ_2(sc, VR_ISR);
}
/* Re-enable interrupts. */
CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
done_locked:
VR_UNLOCK(sc);
}
static int
vr_error(struct vr_softc *sc, uint16_t status)
{
uint16_t pcis;
status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
if ((status & VR_ISR_BUSERR) != 0) {
status &= ~VR_ISR_BUSERR;
sc->vr_stat.bus_errors++;
/* Disable further interrupts. */
CSR_WRITE_2(sc, VR_IMR, 0);
pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
"resetting\n", pcis);
pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
sc->vr_flags |= VR_F_RESTART;
return (EAGAIN);
}
if ((status & VR_ISR_LINKSTAT2) != 0) {
/* Link state change, duplex changes etc. */
status &= ~VR_ISR_LINKSTAT2;
}
if ((status & VR_ISR_STATSOFLOW) != 0) {
status &= ~VR_ISR_STATSOFLOW;
if (sc->vr_revid >= REV_ID_VT6105M_A0) {
/* Update MIB counters. */
}
}
if (status != 0)
device_printf(sc->vr_dev,
"unhandled interrupt, status = 0x%04x\n", status);
return (0);
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
vr_encap(struct vr_softc *sc, struct mbuf **m_head)
{
struct vr_txdesc *txd;
struct vr_desc *desc;
struct mbuf *m;
bus_dma_segment_t txsegs[VR_MAXFRAGS];
uint32_t csum_flags, txctl;
int error, i, nsegs, prod, si;
int padlen;
VR_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
/*
* Some VIA Rhine wants packet buffers to be longword
* aligned, but very often our mbufs aren't. Rather than
* waste time trying to decide when to copy and when not
* to copy, just do it all the time.
*/
if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
/*
* The Rhine chip doesn't auto-pad, so we have to make
* sure to pad short frames out to the minimum frame length
* ourselves.
*/
if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
m = *m_head;
padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
}
if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
m = m_defrag(m, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
}
/*
* Manually pad short frames, and zero the pad space
* to avoid leaking data.
*/
bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
m->m_pkthdr.len += padlen;
m->m_len = m->m_pkthdr.len;
*m_head = m;
}
prod = sc->vr_cdata.vr_tx_prod;
txd = &sc->vr_cdata.vr_txdesc[prod];
error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*m_head);
*m_head = NULL;
return (error);
}
} else if (error != 0)
return (error);
if (nsegs == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
/* Check number of available descriptors. */
if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
return (ENOBUFS);
}
txd->tx_m = *m_head;
bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
/* Set checksum offload. */
csum_flags = 0;
if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
csum_flags |= VR_TXCTL_IPCSUM;
if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
csum_flags |= VR_TXCTL_TCPCSUM;
if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
csum_flags |= VR_TXCTL_UDPCSUM;
}
/*
* Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
* is required for all descriptors regardless of single or
* multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
* the first descriptor for a multi-fragmented frames. Without
* that VIA Rhine chip generates Tx underrun interrupts and can't
* send any frames.
*/
si = prod;
for (i = 0; i < nsegs; i++) {
desc = &sc->vr_rdata.vr_tx_ring[prod];
desc->vr_status = 0;
txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
if (i == 0)
txctl |= VR_TXCTL_FIRSTFRAG;
desc->vr_ctl = htole32(txctl);
desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
sc->vr_cdata.vr_tx_cnt++;
VR_INC(prod, VR_TX_RING_CNT);
}
/* Update producer index. */
sc->vr_cdata.vr_tx_prod = prod;
prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
desc = &sc->vr_rdata.vr_tx_ring[prod];
/*
* Set EOP on the last descriptor and request Tx completion
* interrupt for every VR_TX_INTR_THRESH-th frames.
*/
VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
if (sc->vr_cdata.vr_tx_pkts == 0)
desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
else
desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
/* Lastly turn the first descriptor ownership to hardware. */
desc = &sc->vr_rdata.vr_tx_ring[si];
desc->vr_status |= htole32(VR_TXSTAT_OWN);
/* Sync descriptors. */
bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
sc->vr_cdata.vr_tx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
static void
vr_start(if_t ifp)
{
struct vr_softc *sc;
sc = if_getsoftc(ifp);
VR_LOCK(sc);
vr_start_locked(ifp);
VR_UNLOCK(sc);
}
static void
vr_start_locked(if_t ifp)
{
struct vr_softc *sc;
struct mbuf *m_head;
int enq;
sc = if_getsoftc(ifp);
VR_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp) &&
sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if (vr_encap(sc, &m_head)) {
if (m_head == NULL)
break;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (enq > 0) {
/* Tell the chip to start transmitting. */
VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
/* Set a timeout in case the chip goes out to lunch. */
sc->vr_watchdog_timer = 5;
}
}
static void
vr_init(void *xsc)
{
struct vr_softc *sc;
sc = (struct vr_softc *)xsc;
VR_LOCK(sc);
vr_init_locked(sc);
VR_UNLOCK(sc);
}
static void
vr_init_locked(struct vr_softc *sc)
{
if_t ifp;
struct mii_data *mii;
bus_addr_t addr;
int i;
VR_LOCK_ASSERT(sc);
ifp = sc->vr_ifp;
mii = device_get_softc(sc->vr_miibus);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/* Cancel pending I/O and free all RX/TX buffers. */
vr_stop(sc);
vr_reset(sc);
/* Set our station address. */
for (i = 0; i < ETHER_ADDR_LEN; i++)
CSR_WRITE_1(sc, VR_PAR0 + i, if_getlladdr(sc->vr_ifp)[i]);
/* Set DMA size. */
VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
/*
* BCR0 and BCR1 can override the RXCFG and TXCFG registers,
* so we must set both.
*/
VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
/* Init circular RX list. */
if (vr_rx_ring_init(sc) != 0) {
device_printf(sc->vr_dev,
"initialization failed: no memory for rx buffers\n");
vr_stop(sc);
return;
}
/* Init tx descriptors. */
vr_tx_ring_init(sc);
if ((sc->vr_quirks & VR_Q_CAM) != 0) {
uint8_t vcam[2] = { 0, 0 };
/* Disable VLAN hardware tag insertion/stripping. */
VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
/* Disable VLAN hardware filtering. */
VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
/* Disable all CAM entries. */
vr_cam_mask(sc, VR_MCAST_CAM, 0);
vr_cam_mask(sc, VR_VLAN_CAM, 0);
/* Enable the first VLAN CAM. */
vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
vr_cam_mask(sc, VR_VLAN_CAM, 1);
}
/*
* Set up receive filter.
*/
vr_set_filter(sc);
/*
* Load the address of the RX ring.
*/
addr = VR_RX_RING_ADDR(sc, 0);
CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
/*
* Load the address of the TX ring.
*/
addr = VR_TX_RING_ADDR(sc, 0);
CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
/* Default : full-duplex, no Tx poll. */
CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
/* Set flow-control parameters for Rhine III. */
if (sc->vr_revid >= REV_ID_VT6105_A0) {
/*
* Configure Rx buffer count available for incoming
* packet.
* Even though data sheet says almost nothing about
* this register, this register should be updated
* whenever driver adds new RX buffers to controller.
* Otherwise, XON frame is not sent to link partner
* even if controller has enough RX buffers and you
* would be isolated from network.
* The controller is not smart enough to know number
* of available RX buffers so driver have to let
* controller know how many RX buffers are posted.
* In other words, this register works like a residue
* counter for RX buffers and should be initialized
* to the number of total RX buffers - 1 before
* enabling RX MAC. Note, this register is 8bits so
* it effectively limits the maximum number of RX
* buffer to be configured by controller is 255.
*/
CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1);
/*
* Tx pause low threshold : 8 free receive buffers
* Tx pause XON high threshold : 24 free receive buffers
*/
CSR_WRITE_1(sc, VR_FLOWCR1,
VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF);
/* Set Tx pause timer. */
CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
}
/* Enable receiver and transmitter. */
CSR_WRITE_1(sc, VR_CR0,
VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
#ifdef DEVICE_POLLING
/*
* Disable interrupts if we are polling.
*/
if (if_getcapenable(ifp) & IFCAP_POLLING)
CSR_WRITE_2(sc, VR_IMR, 0);
else
#endif
/*
* Enable interrupts and disable MII intrs.
*/
CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
if (sc->vr_revid > REV_ID_VT6102_A)
CSR_WRITE_2(sc, VR_MII_IMR, 0);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
mii_mediachg(mii);
callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
}
/*
* Set media options.
*/
static int
vr_ifmedia_upd(if_t ifp)
{
struct vr_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
VR_LOCK(sc);
mii = device_get_softc(sc->vr_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
error = mii_mediachg(mii);
VR_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
vr_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct vr_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->vr_miibus);
VR_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
VR_UNLOCK(sc);
return;
}
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
VR_UNLOCK(sc);
}
static int
vr_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct vr_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error, mask;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (command) {
case SIOCSIFFLAGS:
VR_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ sc->vr_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
vr_set_filter(sc);
} else {
if ((sc->vr_flags & VR_F_DETACHED) == 0)
vr_init_locked(sc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
vr_stop(sc);
}
sc->vr_if_flags = if_getflags(ifp);
VR_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
VR_LOCK(sc);
vr_set_filter(sc);
VR_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->vr_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
error = ether_poll_register(vr_poll, ifp);
if (error != 0)
break;
VR_LOCK(sc);
/* Disable interrupts. */
CSR_WRITE_2(sc, VR_IMR, 0x0000);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
VR_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
VR_LOCK(sc);
CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
VR_UNLOCK(sc);
}
}
#endif /* DEVICE_POLLING */
if ((mask & IFCAP_TXCSUM) != 0 &&
(IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
if_sethwassistbits(ifp, VR_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, VR_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_WOL_UCAST) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_UCAST) != 0)
if_togglecapenable(ifp, IFCAP_WOL_UCAST);
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
vr_watchdog(struct vr_softc *sc)
{
if_t ifp;
VR_LOCK_ASSERT(sc);
if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
return;
ifp = sc->vr_ifp;
/*
* Reclaim first as we don't request interrupt for every packets.
*/
vr_txeof(sc);
if (sc->vr_cdata.vr_tx_cnt == 0)
return;
if ((sc->vr_flags & VR_F_LINK) == 0) {
if (bootverbose)
if_printf(sc->vr_ifp, "watchdog timeout "
"(missed link)\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vr_init_locked(sc);
return;
}
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_printf(ifp, "watchdog timeout\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vr_init_locked(sc);
if (!if_sendq_empty(ifp))
vr_start_locked(ifp);
}
static void
vr_tx_start(struct vr_softc *sc)
{
bus_addr_t addr;
uint8_t cmd;
cmd = CSR_READ_1(sc, VR_CR0);
if ((cmd & VR_CR0_TX_ON) == 0) {
addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
cmd |= VR_CR0_TX_ON;
CSR_WRITE_1(sc, VR_CR0, cmd);
}
if (sc->vr_cdata.vr_tx_cnt != 0) {
sc->vr_watchdog_timer = 5;
VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
}
}
static void
vr_rx_start(struct vr_softc *sc)
{
bus_addr_t addr;
uint8_t cmd;
cmd = CSR_READ_1(sc, VR_CR0);
if ((cmd & VR_CR0_RX_ON) == 0) {
addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
cmd |= VR_CR0_RX_ON;
CSR_WRITE_1(sc, VR_CR0, cmd);
}
CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
}
static int
vr_tx_stop(struct vr_softc *sc)
{
int i;
uint8_t cmd;
cmd = CSR_READ_1(sc, VR_CR0);
if ((cmd & VR_CR0_TX_ON) != 0) {
cmd &= ~VR_CR0_TX_ON;
CSR_WRITE_1(sc, VR_CR0, cmd);
for (i = VR_TIMEOUT; i > 0; i--) {
DELAY(5);
cmd = CSR_READ_1(sc, VR_CR0);
if ((cmd & VR_CR0_TX_ON) == 0)
break;
}
if (i == 0)
return (ETIMEDOUT);
}
return (0);
}
static int
vr_rx_stop(struct vr_softc *sc)
{
int i;
uint8_t cmd;
cmd = CSR_READ_1(sc, VR_CR0);
if ((cmd & VR_CR0_RX_ON) != 0) {
cmd &= ~VR_CR0_RX_ON;
CSR_WRITE_1(sc, VR_CR0, cmd);
for (i = VR_TIMEOUT; i > 0; i--) {
DELAY(5);
cmd = CSR_READ_1(sc, VR_CR0);
if ((cmd & VR_CR0_RX_ON) == 0)
break;
}
if (i == 0)
return (ETIMEDOUT);
}
return (0);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
vr_stop(struct vr_softc *sc)
{
struct vr_txdesc *txd;
struct vr_rxdesc *rxd;
if_t ifp;
int i;
VR_LOCK_ASSERT(sc);
ifp = sc->vr_ifp;
sc->vr_watchdog_timer = 0;
callout_stop(&sc->vr_stat_callout);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
if (vr_rx_stop(sc) != 0)
device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
if (vr_tx_stop(sc) != 0)
device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
/* Clear pending interrupts. */
CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
CSR_WRITE_2(sc, VR_IMR, 0x0000);
CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
/*
* Free RX and TX mbufs still in the queues.
*/
for (i = 0; i < VR_RX_RING_CNT; i++) {
rxd = &sc->vr_cdata.vr_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < VR_TX_RING_CNT; i++) {
txd = &sc->vr_cdata.vr_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
}
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
vr_shutdown(device_t dev)
{
return (vr_suspend(dev));
}
static int
vr_suspend(device_t dev)
{
struct vr_softc *sc;
sc = device_get_softc(dev);
VR_LOCK(sc);
vr_stop(sc);
vr_setwol(sc);
sc->vr_flags |= VR_F_SUSPENDED;
VR_UNLOCK(sc);
return (0);
}
static int
vr_resume(device_t dev)
{
struct vr_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
VR_LOCK(sc);
ifp = sc->vr_ifp;
vr_clrwol(sc);
vr_reset(sc);
if (if_getflags(ifp) & IFF_UP)
vr_init_locked(sc);
sc->vr_flags &= ~VR_F_SUSPENDED;
VR_UNLOCK(sc);
return (0);
}
static void
vr_setwol(struct vr_softc *sc)
{
if_t ifp;
int pmc;
uint16_t pmstat;
uint8_t v;
VR_LOCK_ASSERT(sc);
if (sc->vr_revid < REV_ID_VT6102_A ||
pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
return;
ifp = sc->vr_ifp;
/* Clear WOL configuration. */
CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
if (sc->vr_revid > REV_ID_VT6105_B0) {
/* Newer Rhine III supports two additional patterns. */
CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
}
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
/*
* It seems that multicast wakeup frames require programming pattern
* registers and valid CRC as well as pattern mask for each pattern.
* While it's possible to setup such a pattern it would complicate
* WOL configuration so ignore multicast wakeup frames.
*/
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
v = CSR_READ_1(sc, VR_STICKHW);
CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
}
/* Put hardware into sleep. */
v = CSR_READ_1(sc, VR_STICKHW);
v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
CSR_WRITE_1(sc, VR_STICKHW, v);
/* Request PME if WOL is requested. */
pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
}
static void
vr_clrwol(struct vr_softc *sc)
{
uint8_t v;
VR_LOCK_ASSERT(sc);
if (sc->vr_revid < REV_ID_VT6102_A)
return;
/* Take hardware out of sleep. */
v = CSR_READ_1(sc, VR_STICKHW);
v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
CSR_WRITE_1(sc, VR_STICKHW, v);
/* Clear WOL configuration as WOL may interfere normal operation. */
CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
CSR_WRITE_1(sc, VR_WOLCFG_CLR,
VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
if (sc->vr_revid > REV_ID_VT6105_B0) {
/* Newer Rhine III supports two additional patterns. */
CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
}
}
static int
vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
{
struct vr_softc *sc;
struct vr_statistics *stat;
int error;
int result;
result = -1;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (result == 1) {
sc = (struct vr_softc *)arg1;
stat = &sc->vr_stat;
printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
printf("Outbound good frames : %ju\n",
(uintmax_t)stat->tx_ok);
printf("Inbound good frames : %ju\n",
(uintmax_t)stat->rx_ok);
printf("Outbound errors : %u\n", stat->tx_errors);
printf("Inbound errors : %u\n", stat->rx_errors);
printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
printf("Inbound FIFO overflows : %d\n",
stat->rx_fifo_overflows);
printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
printf("Inbound frame alignment errors : %u\n",
stat->rx_alignment);
printf("Inbound giant frames : %u\n", stat->rx_giants);
printf("Inbound runt frames : %u\n", stat->rx_runts);
printf("Outbound aborted with excessive collisions : %u\n",
stat->tx_abort);
printf("Outbound collisions : %u\n", stat->tx_collisions);
printf("Outbound late collisions : %u\n",
stat->tx_late_collisions);
printf("Outbound underrun : %u\n", stat->tx_underrun);
printf("PCI bus errors : %u\n", stat->bus_errors);
printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
stat->num_restart);
}
return (error);
}
diff --git a/sys/dev/vte/if_vte.c b/sys/dev/vte/if_vte.c
index d4e1553c432d..64da34364341 100644
--- a/sys/dev/vte/if_vte.c
+++ b/sys/dev/vte/if_vte.c
@@ -1,2080 +1,2074 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <dev/vte/if_vtereg.h>
#include <dev/vte/if_vtevar.h>
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
MODULE_DEPEND(vte, pci, 1, 1, 1);
MODULE_DEPEND(vte, ether, 1, 1, 1);
MODULE_DEPEND(vte, miibus, 1, 1, 1);
/* Tunables. */
static int tx_deep_copy = 1;
TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy);
/*
* Devices supported by this driver.
*/
static const struct vte_ident vte_ident_table[] = {
{ VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"},
{ 0, 0, NULL}
};
static int vte_attach(device_t);
static int vte_detach(device_t);
static int vte_dma_alloc(struct vte_softc *);
static void vte_dma_free(struct vte_softc *);
static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static struct vte_txdesc *
vte_encap(struct vte_softc *, struct mbuf **);
static const struct vte_ident *
vte_find_ident(device_t);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
vte_fixup_rx(if_t, struct mbuf *);
#endif
static void vte_get_macaddr(struct vte_softc *);
static void vte_init(void *);
static void vte_init_locked(struct vte_softc *);
static int vte_init_rx_ring(struct vte_softc *);
static int vte_init_tx_ring(struct vte_softc *);
static void vte_intr(void *);
static int vte_ioctl(if_t, u_long, caddr_t);
static uint64_t vte_get_counter(if_t, ift_counter);
static void vte_mac_config(struct vte_softc *);
static int vte_miibus_readreg(device_t, int, int);
static void vte_miibus_statchg(device_t);
static int vte_miibus_writereg(device_t, int, int, int);
static int vte_mediachange(if_t);
static int vte_mediachange_locked(if_t);
static void vte_mediastatus(if_t, struct ifmediareq *);
static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
static int vte_probe(device_t);
static void vte_reset(struct vte_softc *);
static int vte_resume(device_t);
static void vte_rxeof(struct vte_softc *);
static void vte_rxfilter(struct vte_softc *);
static int vte_shutdown(device_t);
static void vte_start(if_t);
static void vte_start_locked(struct vte_softc *);
static void vte_start_mac(struct vte_softc *);
static void vte_stats_clear(struct vte_softc *);
static void vte_stats_update(struct vte_softc *);
static void vte_stop(struct vte_softc *);
static void vte_stop_mac(struct vte_softc *);
static int vte_suspend(device_t);
static void vte_sysctl_node(struct vte_softc *);
static void vte_tick(void *);
static void vte_txeof(struct vte_softc *);
static void vte_watchdog(struct vte_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS);
static device_method_t vte_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vte_probe),
DEVMETHOD(device_attach, vte_attach),
DEVMETHOD(device_detach, vte_detach),
DEVMETHOD(device_shutdown, vte_shutdown),
DEVMETHOD(device_suspend, vte_suspend),
DEVMETHOD(device_resume, vte_resume),
/* MII interface. */
DEVMETHOD(miibus_readreg, vte_miibus_readreg),
DEVMETHOD(miibus_writereg, vte_miibus_writereg),
DEVMETHOD(miibus_statchg, vte_miibus_statchg),
DEVMETHOD_END
};
static driver_t vte_driver = {
"vte",
vte_methods,
sizeof(struct vte_softc)
};
DRIVER_MODULE(vte, pci, vte_driver, 0, 0);
DRIVER_MODULE(miibus, vte, miibus_driver, 0, 0);
static int
vte_miibus_readreg(device_t dev, int phy, int reg)
{
struct vte_softc *sc;
int i;
sc = device_get_softc(dev);
CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
(phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
break;
}
if (i == 0) {
device_printf(sc->vte_dev, "phy read timeout : %d\n", reg);
return (0);
}
return (CSR_READ_2(sc, VTE_MMRD));
}
static int
vte_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct vte_softc *sc;
int i;
sc = device_get_softc(dev);
CSR_WRITE_2(sc, VTE_MMWD, val);
CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
(phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
DELAY(5);
if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
break;
}
if (i == 0)
device_printf(sc->vte_dev, "phy write timeout : %d\n", reg);
return (0);
}
static void
vte_miibus_statchg(device_t dev)
{
struct vte_softc *sc;
struct mii_data *mii;
if_t ifp;
uint16_t val;
sc = device_get_softc(dev);
mii = device_get_softc(sc->vte_miibus);
ifp = sc->vte_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
sc->vte_flags &= ~VTE_FLAG_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
case IFM_100_TX:
sc->vte_flags |= VTE_FLAG_LINK;
break;
default:
break;
}
}
/* Stop RX/TX MACs. */
vte_stop_mac(sc);
/* Program MACs with resolved duplex and flow control. */
if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
/*
* Timer waiting time : (63 + TIMER * 64) MII clock.
* MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
*/
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
val = 18 << VTE_IM_TIMER_SHIFT;
else
val = 1 << VTE_IM_TIMER_SHIFT;
val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
/* 48.6us for 100Mbps, 50.8us for 10Mbps */
CSR_WRITE_2(sc, VTE_MRICR, val);
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
val = 18 << VTE_IM_TIMER_SHIFT;
else
val = 1 << VTE_IM_TIMER_SHIFT;
val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
/* 48.6us for 100Mbps, 50.8us for 10Mbps */
CSR_WRITE_2(sc, VTE_MTICR, val);
vte_mac_config(sc);
vte_start_mac(sc);
}
}
static void
vte_mediastatus(if_t ifp, struct ifmediareq *ifmr)
{
struct vte_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
VTE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) == 0) {
VTE_UNLOCK(sc);
return;
}
mii = device_get_softc(sc->vte_miibus);
mii_pollstat(mii);
ifmr->ifm_status = mii->mii_media_status;
ifmr->ifm_active = mii->mii_media_active;
VTE_UNLOCK(sc);
}
static int
vte_mediachange(if_t ifp)
{
struct vte_softc *sc;
int error;
sc = if_getsoftc(ifp);
VTE_LOCK(sc);
error = vte_mediachange_locked(ifp);
VTE_UNLOCK(sc);
return (error);
}
static int
vte_mediachange_locked(if_t ifp)
{
struct vte_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->vte_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
PHY_RESET(miisc);
error = mii_mediachg(mii);
return (error);
}
static const struct vte_ident *
vte_find_ident(device_t dev)
{
const struct vte_ident *ident;
uint16_t vendor, devid;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
for (ident = vte_ident_table; ident->name != NULL; ident++) {
if (vendor == ident->vendorid && devid == ident->deviceid)
return (ident);
}
return (NULL);
}
static int
vte_probe(device_t dev)
{
const struct vte_ident *ident;
ident = vte_find_ident(dev);
if (ident != NULL) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static void
vte_get_macaddr(struct vte_softc *sc)
{
uint16_t mid;
/*
* It seems there is no way to reload station address and
* it is supposed to be set by BIOS.
*/
mid = CSR_READ_2(sc, VTE_MID0L);
sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
mid = CSR_READ_2(sc, VTE_MID0M);
sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
mid = CSR_READ_2(sc, VTE_MID0H);
sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
}
static int
vte_attach(device_t dev)
{
struct vte_softc *sc;
if_t ifp;
uint16_t macid;
int error, rid;
error = 0;
sc = device_get_softc(dev);
sc->vte_dev = dev;
mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0);
sc->vte_ident = vte_find_ident(dev);
/* Map the device. */
pci_enable_busmaster(dev);
sc->vte_res_id = PCIR_BAR(1);
sc->vte_res_type = SYS_RES_MEMORY;
sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
&sc->vte_res_id, RF_ACTIVE);
if (sc->vte_res == NULL) {
sc->vte_res_id = PCIR_BAR(0);
sc->vte_res_type = SYS_RES_IOPORT;
sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
&sc->vte_res_id, RF_ACTIVE);
if (sc->vte_res == NULL) {
device_printf(dev, "cannot map memory/ports.\n");
mtx_destroy(&sc->vte_mtx);
return (ENXIO);
}
}
if (bootverbose) {
device_printf(dev, "using %s space register mapping\n",
sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
device_printf(dev, "MAC Identifier : 0x%04x\n",
CSR_READ_2(sc, VTE_MACID));
macid = CSR_READ_2(sc, VTE_MACID_REV);
device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n",
(macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT,
(macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT);
}
rid = 0;
sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->vte_irq == NULL) {
device_printf(dev, "cannot allocate IRQ resources.\n");
error = ENXIO;
goto fail;
}
/* Reset the ethernet controller. */
vte_reset(sc);
if ((error = vte_dma_alloc(sc)) != 0)
goto fail;
/* Create device sysctl node. */
vte_sysctl_node(sc);
/* Load station address. */
vte_get_macaddr(sc);
ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, vte_ioctl);
if_setstartfn(ifp, vte_start);
if_setinitfn(ifp, vte_init);
if_setgetcounterfn(ifp, vte_get_counter);
if_setsendqlen(ifp, VTE_TX_RING_CNT - 1);
if_setsendqready(ifp);
/*
* Set up MII bus.
* BIOS would have initialized VTE_MPSCCR to catch PHY
* status changes so driver may be able to extract
* configured PHY address. Since it's common to see BIOS
* fails to initialize the register(including the sample
* board I have), let mii(4) probe it. This is more
* reliable than relying on BIOS's initialization.
*
* Advertising flow control capability to mii(4) was
* intentionally disabled due to severe problems in TX
* pause frame generation. See vte_rxeof() for more
* details.
*/
error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange,
vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
ether_ifattach(ifp, sc->vte_eaddr);
/* VLAN capability setup. */
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
/* Tell the upper layer we support VLAN over-sized frames. */
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, vte_intr, sc, &sc->vte_intrhand);
if (error != 0) {
device_printf(dev, "could not set up interrupt handler.\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error != 0)
vte_detach(dev);
return (error);
}
static int
vte_detach(device_t dev)
{
struct vte_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->vte_ifp;
if (device_is_attached(dev)) {
VTE_LOCK(sc);
vte_stop(sc);
VTE_UNLOCK(sc);
callout_drain(&sc->vte_tick_ch);
ether_ifdetach(ifp);
}
if (sc->vte_miibus != NULL) {
device_delete_child(dev, sc->vte_miibus);
sc->vte_miibus = NULL;
}
bus_generic_detach(dev);
if (sc->vte_intrhand != NULL) {
bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand);
sc->vte_intrhand = NULL;
}
if (sc->vte_irq != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq);
sc->vte_irq = NULL;
}
if (sc->vte_res != NULL) {
bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id,
sc->vte_res);
sc->vte_res = NULL;
}
if (ifp != NULL) {
if_free(ifp);
sc->vte_ifp = NULL;
}
vte_dma_free(sc);
mtx_destroy(&sc->vte_mtx);
return (0);
}
#define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
static void
vte_sysctl_node(struct vte_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct vte_hw_stats *stats;
int error;
stats = &sc->vte_stats;
ctx = device_get_sysctl_ctx(sc->vte_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev));
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->vte_int_rx_mod, 0, sysctl_hw_vte_int_mod, "I",
"vte RX interrupt moderation");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
&sc->vte_int_tx_mod, 0, sysctl_hw_vte_int_mod, "I",
"vte TX interrupt moderation");
/* Pull in device tunables. */
sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
error = resource_int_value(device_get_name(sc->vte_dev),
device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod);
if (error == 0) {
if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN ||
sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) {
device_printf(sc->vte_dev, "int_rx_mod value out of "
"range; using default: %d\n",
VTE_IM_RX_BUNDLE_DEFAULT);
sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
}
}
sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
error = resource_int_value(device_get_name(sc->vte_dev),
device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod);
if (error == 0) {
if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN ||
sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) {
device_printf(sc->vte_dev, "int_tx_mod value out of "
"range; using default: %d\n",
VTE_IM_TX_BUNDLE_DEFAULT);
sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
}
}
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VTE statistics");
parent = SYSCTL_CHILDREN(tree);
/* RX statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
child = SYSCTL_CHILDREN(tree);
VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->rx_frames, "Good frames");
VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
&stats->rx_bcast_frames, "Good broadcast frames");
VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
&stats->rx_mcast_frames, "Good multicast frames");
VTE_SYSCTL_STAT_ADD32(ctx, child, "runt",
&stats->rx_runts, "Too short frames");
VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
&stats->rx_crcerrs, "CRC errors");
VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames",
&stats->rx_long_frames,
"Frames that have longer length than maximum packet length");
VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full",
&stats->rx_fifo_full, "FIFO full");
VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail",
&stats->rx_desc_unavail, "Descriptor unavailable frames");
VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->rx_pause_frames, "Pause control frames");
/* TX statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
child = SYSCTL_CHILDREN(tree);
VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
&stats->tx_frames, "Good frames");
VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
&stats->tx_underruns, "FIFO underruns");
VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
&stats->tx_late_colls, "Late collisions");
VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
&stats->tx_pause_frames, "Pause control frames");
}
#undef VTE_SYSCTL_STAT_ADD32
struct vte_dmamap_arg {
bus_addr_t vte_busaddr;
};
static void
vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct vte_dmamap_arg *ctx;
if (error != 0)
return;
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
ctx = (struct vte_dmamap_arg *)arg;
ctx->vte_busaddr = segs[0].ds_addr;
}
static int
vte_dma_alloc(struct vte_softc *sc)
{
struct vte_txdesc *txd;
struct vte_rxdesc *rxd;
struct vte_dmamap_arg ctx;
int error, i;
/* Create parent DMA tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->vte_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vte_cdata.vte_parent_tag);
if (error != 0) {
device_printf(sc->vte_dev,
"could not create parent DMA tag.\n");
goto fail;
}
/* Create DMA tag for TX descriptor ring. */
error = bus_dma_tag_create(
sc->vte_cdata.vte_parent_tag, /* parent */
VTE_TX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
VTE_TX_RING_SZ, /* maxsize */
1, /* nsegments */
VTE_TX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vte_cdata.vte_tx_ring_tag);
if (error != 0) {
device_printf(sc->vte_dev,
"could not create TX ring DMA tag.\n");
goto fail;
}
/* Create DMA tag for RX free descriptor ring. */
error = bus_dma_tag_create(
sc->vte_cdata.vte_parent_tag, /* parent */
VTE_RX_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
VTE_RX_RING_SZ, /* maxsize */
1, /* nsegments */
VTE_RX_RING_SZ, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vte_cdata.vte_rx_ring_tag);
if (error != 0) {
device_printf(sc->vte_dev,
"could not create RX ring DMA tag.\n");
goto fail;
}
/* Allocate DMA'able memory and load the DMA map for TX ring. */
error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag,
(void **)&sc->vte_cdata.vte_tx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->vte_cdata.vte_tx_ring_map);
if (error != 0) {
device_printf(sc->vte_dev,
"could not allocate DMA'able memory for TX ring.\n");
goto fail;
}
ctx.vte_busaddr = 0;
error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag,
sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.vte_busaddr == 0) {
device_printf(sc->vte_dev,
"could not load DMA'able memory for TX ring.\n");
goto fail;
}
sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr;
/* Allocate DMA'able memory and load the DMA map for RX ring. */
error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag,
(void **)&sc->vte_cdata.vte_rx_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
&sc->vte_cdata.vte_rx_ring_map);
if (error != 0) {
device_printf(sc->vte_dev,
"could not allocate DMA'able memory for RX ring.\n");
goto fail;
}
ctx.vte_busaddr = 0;
error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag,
sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0);
if (error != 0 || ctx.vte_busaddr == 0) {
device_printf(sc->vte_dev,
"could not load DMA'able memory for RX ring.\n");
goto fail;
}
sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr;
/* Create TX buffer parent tag. */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->vte_dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
0, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vte_cdata.vte_buffer_tag);
if (error != 0) {
device_printf(sc->vte_dev,
"could not create parent buffer DMA tag.\n");
goto fail;
}
/* Create DMA tag for TX buffers. */
error = bus_dma_tag_create(
sc->vte_cdata.vte_buffer_tag, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vte_cdata.vte_tx_tag);
if (error != 0) {
device_printf(sc->vte_dev, "could not create TX DMA tag.\n");
goto fail;
}
/* Create DMA tag for RX buffers. */
error = bus_dma_tag_create(
sc->vte_cdata.vte_buffer_tag, /* parent */
VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* maxsize */
1, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->vte_cdata.vte_rx_tag);
if (error != 0) {
device_printf(sc->vte_dev, "could not create RX DMA tag.\n");
goto fail;
}
/* Create DMA maps for TX buffers. */
for (i = 0; i < VTE_TX_RING_CNT; i++) {
txd = &sc->vte_cdata.vte_txdesc[i];
txd->tx_m = NULL;
txd->tx_dmamap = NULL;
error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0,
&txd->tx_dmamap);
if (error != 0) {
device_printf(sc->vte_dev,
"could not create TX dmamap.\n");
goto fail;
}
}
/* Create DMA maps for RX buffers. */
if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
&sc->vte_cdata.vte_rx_sparemap)) != 0) {
device_printf(sc->vte_dev,
"could not create spare RX dmamap.\n");
goto fail;
}
for (i = 0; i < VTE_RX_RING_CNT; i++) {
rxd = &sc->vte_cdata.vte_rxdesc[i];
rxd->rx_m = NULL;
rxd->rx_dmamap = NULL;
error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
&rxd->rx_dmamap);
if (error != 0) {
device_printf(sc->vte_dev,
"could not create RX dmamap.\n");
goto fail;
}
}
fail:
return (error);
}
static void
vte_dma_free(struct vte_softc *sc)
{
struct vte_txdesc *txd;
struct vte_rxdesc *rxd;
int i;
/* TX buffers. */
if (sc->vte_cdata.vte_tx_tag != NULL) {
for (i = 0; i < VTE_TX_RING_CNT; i++) {
txd = &sc->vte_cdata.vte_txdesc[i];
if (txd->tx_dmamap != NULL) {
bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag,
txd->tx_dmamap);
txd->tx_dmamap = NULL;
}
}
bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag);
sc->vte_cdata.vte_tx_tag = NULL;
}
/* RX buffers */
if (sc->vte_cdata.vte_rx_tag != NULL) {
for (i = 0; i < VTE_RX_RING_CNT; i++) {
rxd = &sc->vte_cdata.vte_rxdesc[i];
if (rxd->rx_dmamap != NULL) {
bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
rxd->rx_dmamap);
rxd->rx_dmamap = NULL;
}
}
if (sc->vte_cdata.vte_rx_sparemap != NULL) {
bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
sc->vte_cdata.vte_rx_sparemap);
sc->vte_cdata.vte_rx_sparemap = NULL;
}
bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag);
sc->vte_cdata.vte_rx_tag = NULL;
}
/* TX descriptor ring. */
if (sc->vte_cdata.vte_tx_ring_tag != NULL) {
if (sc->vte_cdata.vte_tx_ring_paddr != 0)
bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag,
sc->vte_cdata.vte_tx_ring_map);
if (sc->vte_cdata.vte_tx_ring != NULL)
bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag,
sc->vte_cdata.vte_tx_ring,
sc->vte_cdata.vte_tx_ring_map);
sc->vte_cdata.vte_tx_ring = NULL;
sc->vte_cdata.vte_tx_ring_paddr = 0;
bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag);
sc->vte_cdata.vte_tx_ring_tag = NULL;
}
/* RX ring. */
if (sc->vte_cdata.vte_rx_ring_tag != NULL) {
if (sc->vte_cdata.vte_rx_ring_paddr != 0)
bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag,
sc->vte_cdata.vte_rx_ring_map);
if (sc->vte_cdata.vte_rx_ring != NULL)
bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag,
sc->vte_cdata.vte_rx_ring,
sc->vte_cdata.vte_rx_ring_map);
sc->vte_cdata.vte_rx_ring = NULL;
sc->vte_cdata.vte_rx_ring_paddr = 0;
bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag);
sc->vte_cdata.vte_rx_ring_tag = NULL;
}
if (sc->vte_cdata.vte_buffer_tag != NULL) {
bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag);
sc->vte_cdata.vte_buffer_tag = NULL;
}
if (sc->vte_cdata.vte_parent_tag != NULL) {
bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag);
sc->vte_cdata.vte_parent_tag = NULL;
}
}
static int
vte_shutdown(device_t dev)
{
return (vte_suspend(dev));
}
static int
vte_suspend(device_t dev)
{
struct vte_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
VTE_LOCK(sc);
ifp = sc->vte_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
vte_stop(sc);
VTE_UNLOCK(sc);
return (0);
}
static int
vte_resume(device_t dev)
{
struct vte_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
VTE_LOCK(sc);
ifp = sc->vte_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vte_init_locked(sc);
}
VTE_UNLOCK(sc);
return (0);
}
static struct vte_txdesc *
vte_encap(struct vte_softc *sc, struct mbuf **m_head)
{
struct vte_txdesc *txd;
struct mbuf *m, *n;
bus_dma_segment_t txsegs[1];
int copy, error, nsegs, padlen;
VTE_LOCK_ASSERT(sc);
M_ASSERTPKTHDR((*m_head));
txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
m = *m_head;
/*
* Controller doesn't auto-pad, so we have to make sure pad
* short frames out to the minimum frame length.
*/
if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
else
padlen = 0;
/*
* Controller does not support multi-fragmented TX buffers.
* Controller spends most of its TX processing time in
* de-fragmenting TX buffers. Either faster CPU or more
* advanced controller DMA engine is required to speed up
* TX path processing.
* To mitigate the de-fragmenting issue, perform deep copy
* from fragmented mbuf chains to a pre-allocated mbuf
* cluster with extra cost of kernel memory. For frames
* that is composed of single TX buffer, the deep copy is
* bypassed.
*/
if (tx_deep_copy != 0) {
copy = 0;
if (m->m_next != NULL)
copy++;
if (padlen > 0 && (M_WRITABLE(m) == 0 ||
padlen > M_TRAILINGSPACE(m)))
copy++;
if (copy != 0) {
/* Avoid expensive m_defrag(9) and do deep copy. */
n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
n->m_pkthdr.len = m->m_pkthdr.len;
n->m_len = m->m_pkthdr.len;
m = n;
txd->tx_flags |= VTE_TXMBUF;
}
if (padlen > 0) {
/* Zero out the bytes in the pad area. */
bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
m->m_pkthdr.len += padlen;
m->m_len = m->m_pkthdr.len;
}
} else {
if (M_WRITABLE(m) == 0) {
if (m->m_next != NULL || padlen > 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_NOWAIT);
/* Release original mbuf chains. */
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
return (NULL);
}
*m_head = m;
}
}
if (m->m_next != NULL) {
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (NULL);
}
*m_head = m;
}
if (padlen > 0) {
if (M_TRAILINGSPACE(m) < padlen) {
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (NULL);
}
*m_head = m;
}
/* Zero out the bytes in the pad area. */
bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
m->m_pkthdr.len += padlen;
m->m_len = m->m_pkthdr.len;
}
}
error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag,
txd->tx_dmamap, m, txsegs, &nsegs, 0);
if (error != 0) {
txd->tx_flags &= ~VTE_TXMBUF;
return (NULL);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_PREWRITE);
txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len));
txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr);
sc->vte_cdata.vte_tx_cnt++;
/* Update producer index. */
VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
/* Finally hand over ownership to controller. */
txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
txd->tx_m = m;
return (txd);
}
static void
vte_start(if_t ifp)
{
struct vte_softc *sc;
sc = if_getsoftc(ifp);
VTE_LOCK(sc);
vte_start_locked(sc);
VTE_UNLOCK(sc);
}
static void
vte_start_locked(struct vte_softc *sc)
{
if_t ifp;
struct vte_txdesc *txd;
struct mbuf *m_head;
int enq;
ifp = sc->vte_ifp;
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
return;
for (enq = 0; !if_sendq_empty(ifp); ) {
/* Reserve one free TX descriptor. */
if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/*
* Pack the data into the transmit ring. If we
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
if ((txd = vte_encap(sc, &m_head)) == NULL) {
if (m_head != NULL)
if_sendq_prepend(ifp, m_head);
break;
}
enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
/* Free consumed TX frame. */
if ((txd->tx_flags & VTE_TXMBUF) != 0)
m_freem(m_head);
}
if (enq > 0) {
bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
}
}
static void
vte_watchdog(struct vte_softc *sc)
{
if_t ifp;
VTE_LOCK_ASSERT(sc);
if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
return;
ifp = sc->vte_ifp;
if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
vte_init_locked(sc);
if (!if_sendq_empty(ifp))
vte_start_locked(sc);
}
static int
vte_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct vte_softc *sc;
struct ifreq *ifr;
struct mii_data *mii;
int error;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
VTE_LOCK(sc);
if ((if_getflags(ifp) & IFF_UP) != 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
((if_getflags(ifp) ^ sc->vte_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
vte_rxfilter(sc);
else
vte_init_locked(sc);
} else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
vte_stop(sc);
sc->vte_if_flags = if_getflags(ifp);
VTE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
VTE_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
vte_rxfilter(sc);
VTE_UNLOCK(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = device_get_softc(sc->vte_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
vte_mac_config(struct vte_softc *sc)
{
struct mii_data *mii;
uint16_t mcr;
VTE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->vte_miibus);
mcr = CSR_READ_2(sc, VTE_MCR0);
mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
mcr |= MCR0_FULL_DUPLEX;
#ifdef notyet
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
mcr |= MCR0_FC_ENB;
/*
* The data sheet is not clear whether the controller
* honors received pause frames or not. The is no
* separate control bit for RX pause frame so just
* enable MCR0_FC_ENB bit.
*/
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
mcr |= MCR0_FC_ENB;
#endif
}
CSR_WRITE_2(sc, VTE_MCR0, mcr);
}
static void
vte_stats_clear(struct vte_softc *sc)
{
/* Reading counter registers clears its contents. */
CSR_READ_2(sc, VTE_CNT_RX_DONE);
CSR_READ_2(sc, VTE_CNT_MECNT0);
CSR_READ_2(sc, VTE_CNT_MECNT1);
CSR_READ_2(sc, VTE_CNT_MECNT2);
CSR_READ_2(sc, VTE_CNT_MECNT3);
CSR_READ_2(sc, VTE_CNT_TX_DONE);
CSR_READ_2(sc, VTE_CNT_MECNT4);
CSR_READ_2(sc, VTE_CNT_PAUSE);
}
static void
vte_stats_update(struct vte_softc *sc)
{
struct vte_hw_stats *stat;
uint16_t value;
VTE_LOCK_ASSERT(sc);
stat = &sc->vte_stats;
CSR_READ_2(sc, VTE_MECISR);
/* RX stats. */
stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
value = CSR_READ_2(sc, VTE_CNT_MECNT0);
stat->rx_bcast_frames += (value >> 8);
stat->rx_mcast_frames += (value & 0xFF);
value = CSR_READ_2(sc, VTE_CNT_MECNT1);
stat->rx_runts += (value >> 8);
stat->rx_crcerrs += (value & 0xFF);
value = CSR_READ_2(sc, VTE_CNT_MECNT2);
stat->rx_long_frames += (value & 0xFF);
value = CSR_READ_2(sc, VTE_CNT_MECNT3);
stat->rx_fifo_full += (value >> 8);
stat->rx_desc_unavail += (value & 0xFF);
/* TX stats. */
stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
value = CSR_READ_2(sc, VTE_CNT_MECNT4);
stat->tx_underruns += (value >> 8);
stat->tx_late_colls += (value & 0xFF);
value = CSR_READ_2(sc, VTE_CNT_PAUSE);
stat->tx_pause_frames += (value >> 8);
stat->rx_pause_frames += (value & 0xFF);
}
static uint64_t
vte_get_counter(if_t ifp, ift_counter cnt)
{
struct vte_softc *sc;
struct vte_hw_stats *stat;
sc = if_getsoftc(ifp);
stat = &sc->vte_stats;
switch (cnt) {
case IFCOUNTER_OPACKETS:
return (stat->tx_frames);
case IFCOUNTER_COLLISIONS:
return (stat->tx_late_colls);
case IFCOUNTER_OERRORS:
return (stat->tx_late_colls + stat->tx_underruns);
case IFCOUNTER_IPACKETS:
return (stat->rx_frames);
case IFCOUNTER_IERRORS:
return (stat->rx_crcerrs + stat->rx_runts +
stat->rx_long_frames + stat->rx_fifo_full);
default:
return (if_get_counter_default(ifp, cnt));
}
}
static void
vte_intr(void *arg)
{
struct vte_softc *sc;
if_t ifp;
uint16_t status;
int n;
sc = (struct vte_softc *)arg;
VTE_LOCK(sc);
ifp = sc->vte_ifp;
/* Reading VTE_MISR acknowledges interrupts. */
status = CSR_READ_2(sc, VTE_MISR);
if ((status & VTE_INTRS) == 0) {
/* Not ours. */
VTE_UNLOCK(sc);
return;
}
/* Disable interrupts. */
CSR_WRITE_2(sc, VTE_MIER, 0);
for (n = 8; (status & VTE_INTRS) != 0;) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
MISR_RX_FIFO_FULL)) != 0)
vte_rxeof(sc);
if ((status & MISR_TX_DONE) != 0)
vte_txeof(sc);
if ((status & MISR_EVENT_CNT_OFLOW) != 0)
vte_stats_update(sc);
if (!if_sendq_empty(ifp))
vte_start_locked(sc);
if (--n > 0)
status = CSR_READ_2(sc, VTE_MISR);
else
break;
}
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
/* Re-enable interrupts. */
CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
}
VTE_UNLOCK(sc);
}
static void
vte_txeof(struct vte_softc *sc)
{
if_t ifp;
struct vte_txdesc *txd;
uint16_t status;
int cons, prog;
VTE_LOCK_ASSERT(sc);
ifp = sc->vte_ifp;
if (sc->vte_cdata.vte_tx_cnt == 0)
return;
bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD |
BUS_DMASYNC_POSTWRITE);
cons = sc->vte_cdata.vte_tx_cons;
/*
* Go through our TX list and free mbufs for those
* frames which have been transmitted.
*/
for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
txd = &sc->vte_cdata.vte_txdesc[cons];
status = le16toh(txd->tx_desc->dtst);
if ((status & VTE_DTST_TX_OWN) != 0)
break;
sc->vte_cdata.vte_tx_cnt--;
/* Reclaim transmitted mbufs. */
bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap);
if ((txd->tx_flags & VTE_TXMBUF) == 0)
m_freem(txd->tx_m);
txd->tx_flags &= ~VTE_TXMBUF;
txd->tx_m = NULL;
prog++;
VTE_DESC_INC(cons, VTE_TX_RING_CNT);
}
if (prog > 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->vte_cdata.vte_tx_cons = cons;
/*
* Unarm watchdog timer only when there is no pending
* frames in TX queue.
*/
if (sc->vte_cdata.vte_tx_cnt == 0)
sc->vte_watchdog_timer = 0;
}
}
static int
vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
{
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, sizeof(uint32_t));
if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag,
sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) {
m_freem(m);
return (ENOBUFS);
}
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap);
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
sc->vte_cdata.vte_rx_sparemap = map;
bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_PREREAD);
rxd->rx_m = m;
rxd->rx_desc->drbp = htole32(segs[0].ds_addr);
rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len));
rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
return (0);
}
/*
* It's not supposed to see this controller on strict-alignment
* architectures but make it work for completeness.
*/
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
vte_fixup_rx(if_t ifp, struct mbuf *m)
{
uint16_t *src, *dst;
int i;
src = mtod(m, uint16_t *);
dst = src - 1;
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= ETHER_ALIGN;
return (m);
}
#endif
static void
vte_rxeof(struct vte_softc *sc)
{
if_t ifp;
struct vte_rxdesc *rxd;
struct mbuf *m;
uint16_t status, total_len;
int cons, prog;
bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD |
BUS_DMASYNC_POSTWRITE);
cons = sc->vte_cdata.vte_rx_cons;
ifp = sc->vte_ifp;
for (prog = 0; (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; prog++,
VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
rxd = &sc->vte_cdata.vte_rxdesc[cons];
status = le16toh(rxd->rx_desc->drst);
if ((status & VTE_DRST_RX_OWN) != 0)
break;
total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
m = rxd->rx_m;
if ((status & VTE_DRST_RX_OK) == 0) {
/* Discard errored frame. */
rxd->rx_desc->drlen =
htole16(MCLBYTES - sizeof(uint32_t));
rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
continue;
}
if (vte_newbuf(sc, rxd) != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
rxd->rx_desc->drlen =
htole16(MCLBYTES - sizeof(uint32_t));
rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
continue;
}
/*
* It seems there is no way to strip FCS bytes.
*/
m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
m->m_pkthdr.rcvif = ifp;
#ifndef __NO_STRICT_ALIGNMENT
vte_fixup_rx(ifp, m);
#endif
VTE_UNLOCK(sc);
if_input(ifp, m);
VTE_LOCK(sc);
}
if (prog > 0) {
/* Update the consumer index. */
sc->vte_cdata.vte_rx_cons = cons;
/*
* Sync updated RX descriptors such that controller see
* modified RX buffer addresses.
*/
bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
sc->vte_cdata.vte_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
#ifdef notyet
/*
* Update residue counter. Controller does not
* keep track of number of available RX descriptors
* such that driver should have to update VTE_MRDCR
* to make controller know how many free RX
* descriptors were added to controller. This is
* a similar mechanism used in VIA velocity
* controllers and it indicates controller just
* polls OWN bit of current RX descriptor pointer.
* A couple of severe issues were seen on sample
* board where the controller continuously emits TX
* pause frames once RX pause threshold crossed.
* Once triggered it never recovered form that
* state, I couldn't find a way to make it back to
* work at least. This issue effectively
* disconnected the system from network. Also, the
* controller used 00:00:00:00:00:00 as source
* station address of TX pause frame. Probably this
* is one of reason why vendor recommends not to
* enable flow control on R6040 controller.
*/
CSR_WRITE_2(sc, VTE_MRDCR, prog |
(((VTE_RX_RING_CNT * 2) / 10) <<
VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
#endif
}
}
static void
vte_tick(void *arg)
{
struct vte_softc *sc;
struct mii_data *mii;
sc = (struct vte_softc *)arg;
VTE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->vte_miibus);
mii_tick(mii);
vte_stats_update(sc);
vte_txeof(sc);
vte_watchdog(sc);
callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
}
static void
vte_reset(struct vte_softc *sc)
{
uint16_t mcr, mdcsc;
int i;
mdcsc = CSR_READ_2(sc, VTE_MDCSC);
mcr = CSR_READ_2(sc, VTE_MCR1);
CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
DELAY(10);
if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
break;
}
if (i == 0)
device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
/*
* Follow the guide of vendor recommended way to reset MAC.
* Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
* not reliable so manually reset internal state machine.
*/
CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
CSR_WRITE_2(sc, VTE_MACSM, 0);
DELAY(5000);
/*
* On some SoCs (like Vortex86DX3) MDC speed control register value
* needs to be restored to original value instead of default one,
* otherwise some PHY registers may fail to be read.
*/
if (mdcsc != MDCSC_DEFAULT)
CSR_WRITE_2(sc, VTE_MDCSC, mdcsc);
}
static void
vte_init(void *xsc)
{
struct vte_softc *sc;
sc = (struct vte_softc *)xsc;
VTE_LOCK(sc);
vte_init_locked(sc);
VTE_UNLOCK(sc);
}
static void
vte_init_locked(struct vte_softc *sc)
{
if_t ifp;
bus_addr_t paddr;
uint8_t *eaddr;
VTE_LOCK_ASSERT(sc);
ifp = sc->vte_ifp;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel any pending I/O.
*/
vte_stop(sc);
/*
* Reset the chip to a known state.
*/
vte_reset(sc);
/* Initialize RX descriptors. */
if (vte_init_rx_ring(sc) != 0) {
device_printf(sc->vte_dev, "no memory for RX buffers.\n");
vte_stop(sc);
return;
}
if (vte_init_tx_ring(sc) != 0) {
device_printf(sc->vte_dev, "no memory for TX buffers.\n");
vte_stop(sc);
return;
}
/*
* Reprogram the station address. Controller supports up
* to 4 different station addresses so driver programs the
* first station address as its own ethernet address and
* configure the remaining three addresses as perfect
* multicast addresses.
*/
eaddr = if_getlladdr(sc->vte_ifp);
CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
/* Set TX descriptor base addresses. */
paddr = sc->vte_cdata.vte_tx_ring_paddr;
CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
/* Set RX descriptor base addresses. */
paddr = sc->vte_cdata.vte_rx_ring_paddr;
CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
/*
* Initialize RX descriptor residue counter and set RX
* pause threshold to 20% of available RX descriptors.
* See comments on vte_rxeof() for details on flow control
* issues.
*/
CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
(((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
/*
* Always use maximum frame size that controller can
* support. Otherwise received frames that has longer
* frame length than vte(4) MTU would be silently dropped
* in controller. This would break path-MTU discovery as
* sender wouldn't get any responses from receiver. The
* RX buffer size should be multiple of 4.
* Note, jumbo frames are silently ignored by controller
* and even MAC counters do not detect them.
*/
CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
/* Configure FIFO. */
CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
/*
* Configure TX/RX MACs. Actual resolved duplex and flow
* control configuration is done after detecting a valid
* link. Note, we don't generate early interrupt here
* as well since FreeBSD does not have interrupt latency
* problems like Windows.
*/
CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
/*
* We manually keep track of PHY status changes to
* configure resolved duplex and flow control since only
* duplex configuration can be automatically reflected to
* MCR0.
*/
CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
MCR1_EXCESS_COL_RETRY_16);
/* Initialize RX filter. */
vte_rxfilter(sc);
/* Disable TX/RX interrupt moderation control. */
CSR_WRITE_2(sc, VTE_MRICR, 0);
CSR_WRITE_2(sc, VTE_MTICR, 0);
/* Enable MAC event counter interrupts. */
CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
/* Clear MAC statistics. */
vte_stats_clear(sc);
/* Acknowledge all pending interrupts and clear it. */
CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
CSR_WRITE_2(sc, VTE_MISR, 0);
sc->vte_flags &= ~VTE_FLAG_LINK;
/* Switch to the current media. */
vte_mediachange_locked(ifp);
callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
static void
vte_stop(struct vte_softc *sc)
{
if_t ifp;
struct vte_txdesc *txd;
struct vte_rxdesc *rxd;
int i;
VTE_LOCK_ASSERT(sc);
/*
* Mark the interface down and cancel the watchdog timer.
*/
ifp = sc->vte_ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
sc->vte_flags &= ~VTE_FLAG_LINK;
callout_stop(&sc->vte_tick_ch);
sc->vte_watchdog_timer = 0;
vte_stats_update(sc);
/* Disable interrupts. */
CSR_WRITE_2(sc, VTE_MIER, 0);
CSR_WRITE_2(sc, VTE_MECIER, 0);
/* Stop RX/TX MACs. */
vte_stop_mac(sc);
/* Clear interrupts. */
CSR_READ_2(sc, VTE_MISR);
/*
* Free TX/RX mbufs still in the queues.
*/
for (i = 0; i < VTE_RX_RING_CNT; i++) {
rxd = &sc->vte_cdata.vte_rxdesc[i];
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc->vte_cdata.vte_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->vte_cdata.vte_rx_tag,
rxd->rx_dmamap);
m_freem(rxd->rx_m);
rxd->rx_m = NULL;
}
}
for (i = 0; i < VTE_TX_RING_CNT; i++) {
txd = &sc->vte_cdata.vte_txdesc[i];
if (txd->tx_m != NULL) {
bus_dmamap_sync(sc->vte_cdata.vte_tx_tag,
txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vte_cdata.vte_tx_tag,
txd->tx_dmamap);
if ((txd->tx_flags & VTE_TXMBUF) == 0)
m_freem(txd->tx_m);
txd->tx_m = NULL;
txd->tx_flags &= ~VTE_TXMBUF;
}
}
/* Free TX mbuf pools used for deep copy. */
for (i = 0; i < VTE_TX_RING_CNT; i++) {
if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
m_freem(sc->vte_cdata.vte_txmbufs[i]);
sc->vte_cdata.vte_txmbufs[i] = NULL;
}
}
}
static void
vte_start_mac(struct vte_softc *sc)
{
uint16_t mcr;
int i;
VTE_LOCK_ASSERT(sc);
/* Enable RX/TX MACs. */
mcr = CSR_READ_2(sc, VTE_MCR0);
if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
(MCR0_RX_ENB | MCR0_TX_ENB)) {
mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
CSR_WRITE_2(sc, VTE_MCR0, mcr);
for (i = VTE_TIMEOUT; i > 0; i--) {
mcr = CSR_READ_2(sc, VTE_MCR0);
if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
(MCR0_RX_ENB | MCR0_TX_ENB))
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->vte_dev,
"could not enable RX/TX MAC(0x%04x)!\n", mcr);
}
}
static void
vte_stop_mac(struct vte_softc *sc)
{
uint16_t mcr;
int i;
VTE_LOCK_ASSERT(sc);
/* Disable RX/TX MACs. */
mcr = CSR_READ_2(sc, VTE_MCR0);
if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
CSR_WRITE_2(sc, VTE_MCR0, mcr);
for (i = VTE_TIMEOUT; i > 0; i--) {
mcr = CSR_READ_2(sc, VTE_MCR0);
if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
break;
DELAY(10);
}
if (i == 0)
device_printf(sc->vte_dev,
"could not disable RX/TX MAC(0x%04x)!\n", mcr);
}
}
static int
vte_init_tx_ring(struct vte_softc *sc)
{
struct vte_tx_desc *desc;
struct vte_txdesc *txd;
bus_addr_t addr;
int i;
VTE_LOCK_ASSERT(sc);
sc->vte_cdata.vte_tx_prod = 0;
sc->vte_cdata.vte_tx_cons = 0;
sc->vte_cdata.vte_tx_cnt = 0;
/* Pre-allocate TX mbufs for deep copy. */
if (tx_deep_copy != 0) {
for (i = 0; i < VTE_TX_RING_CNT; i++) {
sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT,
MT_DATA, M_PKTHDR);
if (sc->vte_cdata.vte_txmbufs[i] == NULL)
return (ENOBUFS);
sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
}
}
desc = sc->vte_cdata.vte_tx_ring;
bzero(desc, VTE_TX_RING_SZ);
for (i = 0; i < VTE_TX_RING_CNT; i++) {
txd = &sc->vte_cdata.vte_txdesc[i];
txd->tx_m = NULL;
if (i != VTE_TX_RING_CNT - 1)
addr = sc->vte_cdata.vte_tx_ring_paddr +
sizeof(struct vte_tx_desc) * (i + 1);
else
addr = sc->vte_cdata.vte_tx_ring_paddr +
sizeof(struct vte_tx_desc) * 0;
desc = &sc->vte_cdata.vte_tx_ring[i];
desc->dtnp = htole32(addr);
txd->tx_desc = desc;
}
bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
return (0);
}
static int
vte_init_rx_ring(struct vte_softc *sc)
{
struct vte_rx_desc *desc;
struct vte_rxdesc *rxd;
bus_addr_t addr;
int i;
VTE_LOCK_ASSERT(sc);
sc->vte_cdata.vte_rx_cons = 0;
desc = sc->vte_cdata.vte_rx_ring;
bzero(desc, VTE_RX_RING_SZ);
for (i = 0; i < VTE_RX_RING_CNT; i++) {
rxd = &sc->vte_cdata.vte_rxdesc[i];
rxd->rx_m = NULL;
if (i != VTE_RX_RING_CNT - 1)
addr = sc->vte_cdata.vte_rx_ring_paddr +
sizeof(struct vte_rx_desc) * (i + 1);
else
addr = sc->vte_cdata.vte_rx_ring_paddr +
sizeof(struct vte_rx_desc) * 0;
desc = &sc->vte_cdata.vte_rx_ring[i];
desc->drnp = htole32(addr);
rxd->rx_desc = desc;
if (vte_newbuf(sc, rxd) != 0)
return (ENOBUFS);
}
bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
return (0);
}
struct vte_maddr_ctx {
uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
uint16_t mchash[4];
u_int nperf;
};
static u_int
vte_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct vte_maddr_ctx *ctx = arg;
uint8_t *eaddr;
uint32_t crc;
/*
* Program the first 3 multicast groups into the perfect filter.
* For all others, use the hash table.
*/
if (ctx->nperf < VTE_RXFILT_PERFECT_CNT) {
eaddr = LLADDR(sdl);
ctx->rxfilt_perf[ctx->nperf][0] = eaddr[1] << 8 | eaddr[0];
ctx->rxfilt_perf[ctx->nperf][1] = eaddr[3] << 8 | eaddr[2];
ctx->rxfilt_perf[ctx->nperf][2] = eaddr[5] << 8 | eaddr[4];
ctx->nperf++;
return (1);
}
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
ctx->mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
return (1);
}
static void
vte_rxfilter(struct vte_softc *sc)
{
if_t ifp;
struct vte_maddr_ctx ctx;
uint16_t mcr;
int i;
VTE_LOCK_ASSERT(sc);
ifp = sc->vte_ifp;
bzero(ctx.mchash, sizeof(ctx.mchash));
for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
ctx.rxfilt_perf[i][0] = 0xFFFF;
ctx.rxfilt_perf[i][1] = 0xFFFF;
ctx.rxfilt_perf[i][2] = 0xFFFF;
}
ctx.nperf = 0;
mcr = CSR_READ_2(sc, VTE_MCR0);
mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST);
mcr |= MCR0_BROADCAST_DIS;
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
mcr &= ~MCR0_BROADCAST_DIS;
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
mcr |= MCR0_PROMISC;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
mcr |= MCR0_MULTICAST;
ctx.mchash[0] = 0xFFFF;
ctx.mchash[1] = 0xFFFF;
ctx.mchash[2] = 0xFFFF;
ctx.mchash[3] = 0xFFFF;
goto chipit;
}
if_foreach_llmaddr(ifp, vte_hash_maddr, &ctx);
if (ctx.mchash[0] != 0 || ctx.mchash[1] != 0 ||
ctx.mchash[2] != 0 || ctx.mchash[3] != 0)
mcr |= MCR0_MULTICAST;
chipit:
/* Program multicast hash table. */
CSR_WRITE_2(sc, VTE_MAR0, ctx.mchash[0]);
CSR_WRITE_2(sc, VTE_MAR1, ctx.mchash[1]);
CSR_WRITE_2(sc, VTE_MAR2, ctx.mchash[2]);
CSR_WRITE_2(sc, VTE_MAR3, ctx.mchash[3]);
/* Program perfect filter table. */
for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
ctx.rxfilt_perf[i][0]);
CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
ctx.rxfilt_perf[i][1]);
CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
ctx.rxfilt_perf[i][2]);
}
CSR_WRITE_2(sc, VTE_MCR0, mcr);
CSR_READ_2(sc, VTE_MCR0);
}
static int
sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
{
int error, value;
if (arg1 == NULL)
return (EINVAL);
value = *(int *)arg1;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
if (value < low || value > high)
return (EINVAL);
*(int *)arg1 = value;
return (0);
}
static int
sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)
{
return (sysctl_int_range(oidp, arg1, arg2, req,
VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX));
}
diff --git a/sys/dev/xilinx/if_xae.c b/sys/dev/xilinx/if_xae.c
index 722be6a21cbd..26f1bf805ffa 100644
--- a/sys/dev/xilinx/if_xae.c
+++ b/sys/dev/xilinx/if_xae.c
@@ -1,1149 +1,1144 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
* Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
* DARPA SSITH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <machine/bus.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mii/tiphy.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/xilinx/if_xaereg.h>
#include <dev/xilinx/if_xaevar.h>
#include <dev/xilinx/axidma.h>
#include "miibus_if.h"
#define READ4(_sc, _reg) \
bus_read_4((_sc)->res[0], _reg)
#define WRITE4(_sc, _reg, _val) \
bus_write_4((_sc)->res[0], _reg, _val)
#define READ8(_sc, _reg) \
bus_read_8((_sc)->res[0], _reg)
#define WRITE8(_sc, _reg, _val) \
bus_write_8((_sc)->res[0], _reg, _val)
#define XAE_LOCK(sc) mtx_lock(&(sc)->mtx)
#define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
#define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
#define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
#define XAE_DEBUG
#undef XAE_DEBUG
#ifdef XAE_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
#define RX_QUEUE_SIZE 64
#define TX_QUEUE_SIZE 64
#define NUM_RX_MBUF 16
#define BUFRING_SIZE 8192
#define MDIO_CLK_DIV_DEFAULT 29
#define PHY1_RD(sc, _r) \
xae_miibus_read_reg(sc->dev, 1, _r)
#define PHY1_WR(sc, _r, _v) \
xae_miibus_write_reg(sc->dev, 1, _r, _v)
#define PHY_RD(sc, _r) \
xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
#define PHY_WR(sc, _r, _v) \
xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
/* Use this macro to access regs > 0x1f */
#define WRITE_TI_EREG(sc, reg, data) { \
PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \
PHY_WR(sc, MII_MMDAADR, reg); \
PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \
PHY_WR(sc, MII_MMDAADR, data); \
}
/* Not documented, Xilinx VCU118 workaround */
#define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */
#define DP83867_SGMIICTL1 0xD3 /* not documented register */
#define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */
static struct resource_spec xae_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
static void xae_stop_locked(struct xae_softc *sc);
static void xae_setup_rxfilter(struct xae_softc *sc);
static int
xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
{
struct mbuf *m;
int i;
for (i = 0; i < n; i++) {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->dev,
"%s: Can't alloc rx mbuf\n", __func__);
return (-1);
}
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
}
return (0);
}
static int
xae_get_phyaddr(phandle_t node, int *phy_addr)
{
phandle_t phy_node;
pcell_t phy_handle, phy_reg;
if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
sizeof(phy_handle)) <= 0)
return (ENXIO);
phy_node = OF_node_from_xref(phy_handle);
if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
sizeof(phy_reg)) <= 0)
return (ENXIO);
*phy_addr = phy_reg;
return (0);
}
static int
xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct xae_softc *sc;
if_t ifp;
struct mbuf *m;
int err;
sc = arg;
XAE_LOCK(sc);
ifp = sc->ifp;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
if (err != 0) {
break;
}
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
m_freem(m);
}
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
XAE_UNLOCK(sc);
return (0);
}
static int
xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct xae_softc *sc;
if_t ifp;
struct mbuf *m;
int err;
uint32_t cnt_processed;
sc = arg;
dprintf("%s\n", __func__);
XAE_LOCK(sc);
ifp = sc->ifp;
cnt_processed = 0;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
if (err != 0) {
break;
}
cnt_processed++;
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
continue;
}
m->m_pkthdr.len = m->m_len = st.transferred;
m->m_pkthdr.rcvif = ifp;
XAE_UNLOCK(sc);
if_input(ifp, m);
XAE_LOCK(sc);
}
xae_rx_enqueue(sc, cnt_processed);
XAE_UNLOCK(sc);
return (0);
}
static void
xae_qflush(if_t ifp)
{
}
static int
xae_transmit_locked(if_t ifp)
{
struct xae_softc *sc;
struct mbuf *m;
struct buf_ring *br;
int error;
int enq;
dprintf("%s\n", __func__);
sc = if_getsoftc(ifp);
br = sc->br;
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
error = xdma_enqueue_mbuf(sc->xchan_tx,
&m, 0, 4, 4, XDMA_MEM_TO_DEV);
if (error != 0) {
/* No space in request queue available yet. */
drbr_putback(ifp, br, m);
break;
}
drbr_advance(ifp, br);
enq++;
/* If anyone is interested give them a copy. */
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0)
xdma_queue_submit(sc->xchan_tx);
return (0);
}
static int
xae_transmit(if_t ifp, struct mbuf *m)
{
struct xae_softc *sc;
int error;
dprintf("%s\n", __func__);
sc = if_getsoftc(ifp);
XAE_LOCK(sc);
error = drbr_enqueue(ifp, sc->br, m);
if (error) {
XAE_UNLOCK(sc);
return (error);
}
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
XAE_UNLOCK(sc);
return (0);
}
if (!sc->link_is_up) {
XAE_UNLOCK(sc);
return (0);
}
error = xae_transmit_locked(ifp);
XAE_UNLOCK(sc);
return (error);
}
static void
xae_stop_locked(struct xae_softc *sc)
{
if_t ifp;
uint32_t reg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
callout_stop(&sc->xae_callout);
/* Stop the transmitter */
reg = READ4(sc, XAE_TC);
reg &= ~TC_TX;
WRITE4(sc, XAE_TC, reg);
/* Stop the receiver. */
reg = READ4(sc, XAE_RCW1);
reg &= ~RCW1_RX;
WRITE4(sc, XAE_RCW1, reg);
}
static uint64_t
xae_stat(struct xae_softc *sc, int counter_id)
{
uint64_t new, old;
uint64_t delta;
KASSERT(counter_id < XAE_MAX_COUNTERS,
("counter %d is out of range", counter_id));
new = READ8(sc, XAE_STATCNT(counter_id));
old = sc->counters[counter_id];
if (new >= old)
delta = new - old;
else
delta = UINT64_MAX - old + new;
sc->counters[counter_id] = new;
return (delta);
}
static void
xae_harvest_stats(struct xae_softc *sc)
{
if_t ifp;
ifp = sc->ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
if_inc_counter(ifp, IFCOUNTER_IERRORS,
xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
xae_stat(sc, RX_ALIGNMENT_ERRORS));
if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
if_inc_counter(ifp, IFCOUNTER_OERRORS,
xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
xae_stat(sc, TX_LATE_COLLISIONS) +
xae_stat(sc, TX_EXCESS_COLLISIONS));
}
static void
xae_tick(void *arg)
{
struct xae_softc *sc;
if_t ifp;
int link_was_up;
sc = arg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return;
/* Gather stats from hardware counters. */
xae_harvest_stats(sc);
/* Check the media status. */
link_was_up = sc->link_is_up;
mii_tick(sc->mii_softc);
if (sc->link_is_up && !link_was_up)
xae_transmit_locked(sc->ifp);
/* Schedule another check one second from now. */
callout_reset(&sc->xae_callout, hz, xae_tick, sc);
}
static void
xae_init_locked(struct xae_softc *sc)
{
if_t ifp;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
xae_setup_rxfilter(sc);
/* Enable the transmitter */
WRITE4(sc, XAE_TC, TC_TX);
/* Enable the receiver. */
WRITE4(sc, XAE_RCW1, RCW1_RX);
/*
* Call mii_mediachg() which will call back into xae_miibus_statchg()
* to set up the remaining config registers based on current media.
*/
mii_mediachg(sc->mii_softc);
callout_reset(&sc->xae_callout, hz, xae_tick, sc);
}
static void
xae_init(void *arg)
{
struct xae_softc *sc;
sc = arg;
XAE_LOCK(sc);
xae_init_locked(sc);
XAE_UNLOCK(sc);
}
static void
xae_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct xae_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = sc->mii_softc;
XAE_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
XAE_UNLOCK(sc);
}
static int
xae_media_change_locked(struct xae_softc *sc)
{
return (mii_mediachg(sc->mii_softc));
}
static int
xae_media_change(if_t ifp)
{
struct xae_softc *sc;
int error;
sc = if_getsoftc(ifp);
XAE_LOCK(sc);
error = xae_media_change_locked(sc);
XAE_UNLOCK(sc);
return (error);
}
static u_int
xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct xae_softc *sc = arg;
uint32_t reg;
uint8_t *ma;
if (cnt >= XAE_MULTICAST_TABLE_SIZE)
return (1);
ma = LLADDR(sdl);
reg = READ4(sc, XAE_FFC) & 0xffffff00;
reg |= cnt;
WRITE4(sc, XAE_FFC, reg);
reg = (ma[0]);
reg |= (ma[1] << 8);
reg |= (ma[2] << 16);
reg |= (ma[3] << 24);
WRITE4(sc, XAE_FFV(0), reg);
reg = ma[4];
reg |= ma[5] << 8;
WRITE4(sc, XAE_FFV(1), reg);
return (1);
}
static void
xae_setup_rxfilter(struct xae_softc *sc)
{
if_t ifp;
uint32_t reg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
/*
* Set the multicast (group) filter hash.
*/
if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
reg = READ4(sc, XAE_FFC);
reg |= FFC_PM;
WRITE4(sc, XAE_FFC, reg);
} else {
reg = READ4(sc, XAE_FFC);
reg &= ~FFC_PM;
WRITE4(sc, XAE_FFC, reg);
if_foreach_llmaddr(ifp, xae_write_maddr, sc);
}
/*
* Set the primary address.
*/
reg = sc->macaddr[0];
reg |= (sc->macaddr[1] << 8);
reg |= (sc->macaddr[2] << 16);
reg |= (sc->macaddr[3] << 24);
WRITE4(sc, XAE_UAW0, reg);
reg = sc->macaddr[4];
reg |= (sc->macaddr[5] << 8);
WRITE4(sc, XAE_UAW1, reg);
}
static int
xae_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct xae_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int mask, error;
sc = if_getsoftc(ifp);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
XAE_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ sc->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
xae_setup_rxfilter(sc);
} else {
if (!sc->is_detaching)
xae_init_locked(sc);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
xae_stop_locked(sc);
}
sc->if_flags = if_getflags(ifp);
XAE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
XAE_LOCK(sc);
xae_setup_rxfilter(sc);
XAE_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = sc->mii_softc;
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if (mask & IFCAP_VLAN_MTU) {
/* No work to do except acknowledge the change took */
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
}
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
xae_intr(void *arg)
{
}
static int
xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
{
phandle_t node;
int len;
node = ofw_bus_get_node(sc->dev);
/* Check if there is property */
if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
return (EINVAL);
if (len != ETHER_ADDR_LEN)
return (EINVAL);
OF_getprop(node, "local-mac-address", hwaddr,
ETHER_ADDR_LEN);
return (0);
}
static int
mdio_wait(struct xae_softc *sc)
{
uint32_t reg;
int timeout;
timeout = 200;
do {
reg = READ4(sc, XAE_MDIO_CTRL);
if (reg & MDIO_CTRL_READY)
break;
DELAY(1);
} while (timeout--);
if (timeout <= 0) {
printf("Failed to get MDIO ready\n");
return (1);
}
return (0);
}
static int
xae_miibus_read_reg(device_t dev, int phy, int reg)
{
struct xae_softc *sc;
uint32_t mii;
int rv;
sc = device_get_softc(dev);
if (mdio_wait(sc))
return (0);
mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
WRITE4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (0);
rv = READ4(sc, XAE_MDIO_READ);
return (rv);
}
static int
xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
{
struct xae_softc *sc;
uint32_t mii;
sc = device_get_softc(dev);
if (mdio_wait(sc))
return (1);
mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
WRITE4(sc, XAE_MDIO_WRITE, val);
WRITE4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (1);
return (0);
}
static void
xae_phy_fixup(struct xae_softc *sc)
{
uint32_t reg;
do {
WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
reg = PHY_RD(sc, DP83867_CFG2);
reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
reg |= CFG2_INTERRUPT_POLARITY;
reg |= CFG2_SPEED_OPT_ENHANCED_EN;
reg |= CFG2_SPEED_OPT_10M_EN;
PHY_WR(sc, DP83867_CFG2, reg);
WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
PHY_WR(sc, MII_BMCR,
BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
} while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
do {
PHY1_WR(sc, MII_BMCR,
BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
DELAY(40000);
} while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
}
static int
get_xdma_std(struct xae_softc *sc)
{
sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
if (sc->xdma_tx == NULL)
return (ENXIO);
sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
if (sc->xdma_rx == NULL) {
xdma_put(sc->xdma_tx);
return (ENXIO);
}
return (0);
}
static int
get_xdma_axistream(struct xae_softc *sc)
{
struct axidma_fdt_data *data;
device_t dma_dev;
phandle_t node;
pcell_t prop;
size_t len;
node = ofw_bus_get_node(sc->dev);
len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
if (len != sizeof(prop)) {
device_printf(sc->dev,
"%s: Couldn't get axistream-connected prop.\n", __func__);
return (ENXIO);
}
dma_dev = OF_device_from_xref(prop);
if (dma_dev == NULL) {
device_printf(sc->dev, "Could not get DMA device by xref.\n");
return (ENXIO);
}
sc->xdma_tx = xdma_get(sc->dev, dma_dev);
if (sc->xdma_tx == NULL) {
device_printf(sc->dev, "Could not find DMA controller.\n");
return (ENXIO);
}
data = malloc(sizeof(struct axidma_fdt_data),
M_DEVBUF, (M_WAITOK | M_ZERO));
data->id = AXIDMA_TX_CHAN;
sc->xdma_tx->data = data;
sc->xdma_rx = xdma_get(sc->dev, dma_dev);
if (sc->xdma_rx == NULL) {
device_printf(sc->dev, "Could not find DMA controller.\n");
return (ENXIO);
}
data = malloc(sizeof(struct axidma_fdt_data),
M_DEVBUF, (M_WAITOK | M_ZERO));
data->id = AXIDMA_RX_CHAN;
sc->xdma_rx->data = data;
return (0);
}
static int
setup_xdma(struct xae_softc *sc)
{
device_t dev;
vmem_t *vmem;
int error;
dev = sc->dev;
/* Get xDMA controller */
error = get_xdma_std(sc);
if (error) {
device_printf(sc->dev,
"Fallback to axistream-connected property\n");
error = get_xdma_axistream(sc);
}
if (error) {
device_printf(dev, "Could not find xDMA controllers.\n");
return (ENXIO);
}
/* Alloc xDMA TX virtual channel. */
sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
if (sc->xchan_tx == NULL) {
device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
error = xdma_setup_intr(sc->xchan_tx, 0,
xae_xdma_tx_intr, sc, &sc->ih_tx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA TX interrupt handler.\n");
return (ENXIO);
}
/* Alloc xDMA RX virtual channel. */
sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
if (sc->xchan_rx == NULL) {
device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
xae_xdma_rx_intr, sc, &sc->ih_rx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA RX interrupt handler.\n");
return (ENXIO);
}
/* Setup bounce buffer */
vmem = xdma_get_memory(dev);
if (vmem) {
xchan_set_memory(sc->xchan_tx, vmem);
xchan_set_memory(sc->xchan_rx, vmem);
}
xdma_prep_sg(sc->xchan_tx,
TX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
8, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
xdma_prep_sg(sc->xchan_rx,
RX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
1, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
return (0);
}
static int
xae_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
return (ENXIO);
device_set_desc(dev, "Xilinx AXI Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
xae_attach(device_t dev)
{
struct xae_softc *sc;
if_t ifp;
phandle_t node;
uint32_t reg;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
node = ofw_bus_get_node(dev);
if (setup_xdma(sc) != 0) {
device_printf(dev, "Could not setup xDMA.\n");
return (ENXIO);
}
mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
MTX_NETWORK_LOCK, MTX_DEF);
sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &sc->mtx);
if (sc->br == NULL)
return (ENOMEM);
if (bus_alloc_resources(dev, xae_spec, sc->res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
/* Memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
device_printf(sc->dev, "Identification: %x\n",
READ4(sc, XAE_IDENT));
/* Get MAC addr */
if (xae_get_hwaddr(sc, sc->macaddr)) {
device_printf(sc->dev, "can't get mac\n");
return (ENXIO);
}
/* Enable MII clock */
reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
reg |= MDIO_SETUP_ENABLE;
WRITE4(sc, XAE_MDIO_SETUP, reg);
if (mdio_wait(sc))
return (ENXIO);
callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
/* Setup interrupt handler. */
error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
NULL, xae_intr, sc, &sc->intr_cookie);
if (error != 0) {
device_printf(dev, "could not setup interrupt handler.\n");
return (ENXIO);
}
/* Set up the ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "could not allocate ifp.\n");
- return (ENXIO);
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_settransmitfn(ifp, xae_transmit);
if_setqflushfn(ifp, xae_qflush);
if_setioctlfn(ifp, xae_ioctl);
if_setinitfn(ifp, xae_init);
if_setsendqlen(ifp, TX_DESC_COUNT - 1);
if_setsendqready(ifp);
if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
return (ENXIO);
/* Attach the mii driver. */
error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "PHY attach failed\n");
return (ENXIO);
}
sc->mii_softc = device_get_softc(sc->miibus);
/* Apply vcu118 workaround. */
if (OF_getproplen(node, "xlnx,vcu118") >= 0)
xae_phy_fixup(sc);
/* All ready to run, attach the ethernet interface. */
ether_ifattach(ifp, sc->macaddr);
sc->is_attached = true;
xae_rx_enqueue(sc, NUM_RX_MBUF);
xdma_queue_submit(sc->xchan_rx);
return (0);
}
static int
xae_detach(device_t dev)
{
struct xae_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
device_get_nameunit(dev)));
ifp = sc->ifp;
/* Only cleanup if attach succeeded. */
if (device_is_attached(dev)) {
XAE_LOCK(sc);
xae_stop_locked(sc);
XAE_UNLOCK(sc);
callout_drain(&sc->xae_callout);
ether_ifdetach(ifp);
}
if (sc->miibus != NULL)
device_delete_child(dev, sc->miibus);
if (ifp != NULL)
if_free(ifp);
mtx_destroy(&sc->mtx);
bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
bus_release_resources(dev, xae_spec, sc->res);
xdma_channel_free(sc->xchan_tx);
xdma_channel_free(sc->xchan_rx);
xdma_put(sc->xdma_tx);
xdma_put(sc->xdma_rx);
return (0);
}
static void
xae_miibus_statchg(device_t dev)
{
struct xae_softc *sc;
struct mii_data *mii;
uint32_t reg;
/*
* Called by the MII bus driver when the PHY establishes
* link to set the MAC interface registers.
*/
sc = device_get_softc(dev);
XAE_ASSERT_LOCKED(sc);
mii = sc->mii_softc;
if (mii->mii_media_status & IFM_ACTIVE)
sc->link_is_up = true;
else
sc->link_is_up = false;
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
reg = SPEED_1000;
break;
case IFM_100_TX:
reg = SPEED_100;
break;
case IFM_10_T:
reg = SPEED_10;
break;
case IFM_NONE:
sc->link_is_up = false;
return;
default:
sc->link_is_up = false;
device_printf(dev, "Unsupported media %u\n",
IFM_SUBTYPE(mii->mii_media_active));
return;
}
WRITE4(sc, XAE_SPEED, reg);
}
static device_method_t xae_methods[] = {
DEVMETHOD(device_probe, xae_probe),
DEVMETHOD(device_attach, xae_attach),
DEVMETHOD(device_detach, xae_detach),
/* MII Interface */
DEVMETHOD(miibus_readreg, xae_miibus_read_reg),
DEVMETHOD(miibus_writereg, xae_miibus_write_reg),
DEVMETHOD(miibus_statchg, xae_miibus_statchg),
{ 0, 0 }
};
driver_t xae_driver = {
"xae",
xae_methods,
sizeof(struct xae_softc),
};
DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0);
DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0);
MODULE_DEPEND(xae, ether, 1, 1, 1);
MODULE_DEPEND(xae, miibus, 1, 1, 1);
diff --git a/sys/dev/xl/if_xl.c b/sys/dev/xl/if_xl.c
index 573bc6581577..32f95549b774 100644
--- a/sys/dev/xl/if_xl.c
+++ b/sys/dev/xl/if_xl.c
@@ -1,3291 +1,3286 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* 3Com 3c90x Etherlink XL PCI NIC driver
*
* Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
* bus-master chips (3c90x cards and embedded controllers) including
* the following:
*
* 3Com 3c900-TPO 10Mbps/RJ-45
* 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
* 3Com 3c905-TX 10/100Mbps/RJ-45
* 3Com 3c905-T4 10/100Mbps/RJ-45
* 3Com 3c900B-TPO 10Mbps/RJ-45
* 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
* 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
* 3Com 3c900B-FL 10Mbps/Fiber-optic
* 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
* 3Com 3c905B-TX 10/100Mbps/RJ-45
* 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
* 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
* 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
* 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
* 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
* 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
* 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
* 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
* 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
* 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
* 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
* 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
* 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
* 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
* 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
* Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
* Dell on-board 3c920 10/100Mbps/RJ-45
* Dell Precision on-board 3c905B 10/100Mbps/RJ-45
* Dell Latitude laptop docking station embedded 3c905-TX
*
* Written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
/*
* The 3c90x series chips use a bus-master DMA interface for transferring
* packets to and from the controller chip. Some of the "vortex" cards
* (3c59x) also supported a bus master mode, however for those chips
* you could only DMA packets to/from a contiguous memory buffer. For
* transmission this would mean copying the contents of the queued mbuf
* chain into an mbuf cluster and then DMAing the cluster. This extra
* copy would sort of defeat the purpose of the bus master support for
* any packet that doesn't fit into a single mbuf.
*
* By contrast, the 3c90x cards support a fragment-based bus master
* mode where mbuf chains can be encapsulated using TX descriptors.
* This is similar to other PCI chips such as the Texas Instruments
* ThunderLAN and the Intel 82557/82558.
*
* The "vortex" driver (if_vx.c) happens to work for the "boomerang"
* bus master chips because they maintain the old PIO interface for
* backwards compatibility, but starting with the 3c905B and the
* "cyclone" chips, the compatibility interface has been dropped.
* Since using bus master DMA is a big win, we use this driver to
* support the PCI "boomerang" chips even though they work with the
* "vortex" driver in order to obtain better performance.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
MODULE_DEPEND(xl, pci, 1, 1, 1);
MODULE_DEPEND(xl, ether, 1, 1, 1);
MODULE_DEPEND(xl, miibus, 1, 1, 1);
/* "device miibus" required. See GENERIC if you get errors here. */
#include "miibus_if.h"
#include <dev/xl/if_xlreg.h>
/*
* TX Checksumming is disabled by default for two reasons:
* - TX Checksumming will occasionally produce corrupt packets
* - TX Checksumming seems to reduce performance
*
* Only 905B/C cards were reported to have this problem, it is possible
* that later chips _may_ be immune.
*/
#define XL905B_TXCSUM_BROKEN 1
#ifdef XL905B_TXCSUM_BROKEN
#define XL905B_CSUM_FEATURES 0
#else
#define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
#endif
/*
* Various supported device vendors/types and their names.
*/
static const struct xl_type xl_devs[] = {
{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
"3Com 3c900-TPO Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
"3Com 3c900-COMBO Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
"3Com 3c905-TX Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
"3Com 3c905-T4 Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
"3Com 3c900B-TPO Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
"3Com 3c900B-COMBO Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
"3Com 3c900B-TPC Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
"3Com 3c900B-FL Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
"3Com 3c905B-TX Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
"3Com 3c905B-T4 Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
"3Com 3c905B-FX/SC Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
"3Com 3c905B-COMBO Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
"3Com 3c905C-TX Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
"3Com 3c920B-EMB Integrated Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
"3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
"3Com 3c980 Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
"3Com 3c980C Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
"3Com 3cSOHO100-TX OfficeConnect" },
{ TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
"3Com 3c450-TX HomeConnect" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_555,
"3Com 3c555 Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_556,
"3Com 3c556 Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
"3Com 3c556B Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
"3Com 3c575TX Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
"3Com 3c575B Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
"3Com 3c575C Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_656,
"3Com 3c656 Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
"3Com 3c656B Fast Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_TORNADO_656C,
"3Com 3c656C Fast Etherlink XL" },
{ 0, 0, NULL }
};
static int xl_probe(device_t);
static int xl_attach(device_t);
static int xl_detach(device_t);
static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
static void xl_tick(void *);
static void xl_stats_update(struct xl_softc *);
static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **);
static int xl_rxeof(struct xl_softc *);
static void xl_rxeof_task(void *, int);
static int xl_rx_resync(struct xl_softc *);
static void xl_txeof(struct xl_softc *);
static void xl_txeof_90xB(struct xl_softc *);
static void xl_txeoc(struct xl_softc *);
static void xl_intr(void *);
static void xl_start(if_t);
static void xl_start_locked(if_t);
static void xl_start_90xB_locked(if_t);
static int xl_ioctl(if_t, u_long, caddr_t);
static void xl_init(void *);
static void xl_init_locked(struct xl_softc *);
static void xl_stop(struct xl_softc *);
static int xl_watchdog(struct xl_softc *);
static int xl_shutdown(device_t);
static int xl_suspend(device_t);
static int xl_resume(device_t);
static void xl_setwol(struct xl_softc *);
#ifdef DEVICE_POLLING
static int xl_poll(if_t ifp, enum poll_cmd cmd, int count);
static int xl_poll_locked(if_t ifp, enum poll_cmd cmd, int count);
#endif
static int xl_ifmedia_upd(if_t);
static void xl_ifmedia_sts(if_t, struct ifmediareq *);
static int xl_eeprom_wait(struct xl_softc *);
static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
static void xl_rxfilter(struct xl_softc *);
static void xl_rxfilter_90x(struct xl_softc *);
static void xl_rxfilter_90xB(struct xl_softc *);
static void xl_setcfg(struct xl_softc *);
static void xl_setmode(struct xl_softc *, int);
static void xl_reset(struct xl_softc *);
static int xl_list_rx_init(struct xl_softc *);
static int xl_list_tx_init(struct xl_softc *);
static int xl_list_tx_init_90xB(struct xl_softc *);
static void xl_wait(struct xl_softc *);
static void xl_mediacheck(struct xl_softc *);
static void xl_choose_media(struct xl_softc *sc, int *media);
static void xl_choose_xcvr(struct xl_softc *, int);
static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
#ifdef notdef
static void xl_testpacket(struct xl_softc *);
#endif
static int xl_miibus_readreg(device_t, int, int);
static int xl_miibus_writereg(device_t, int, int, int);
static void xl_miibus_statchg(device_t);
static void xl_miibus_mediainit(device_t);
/*
* MII bit-bang glue
*/
static uint32_t xl_mii_bitbang_read(device_t);
static void xl_mii_bitbang_write(device_t, uint32_t);
static const struct mii_bitbang_ops xl_mii_bitbang_ops = {
xl_mii_bitbang_read,
xl_mii_bitbang_write,
{
XL_MII_DATA, /* MII_BIT_MDO */
XL_MII_DATA, /* MII_BIT_MDI */
XL_MII_CLK, /* MII_BIT_MDC */
XL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
0, /* MII_BIT_DIR_PHY_HOST */
}
};
static device_method_t xl_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, xl_probe),
DEVMETHOD(device_attach, xl_attach),
DEVMETHOD(device_detach, xl_detach),
DEVMETHOD(device_shutdown, xl_shutdown),
DEVMETHOD(device_suspend, xl_suspend),
DEVMETHOD(device_resume, xl_resume),
/* MII interface */
DEVMETHOD(miibus_readreg, xl_miibus_readreg),
DEVMETHOD(miibus_writereg, xl_miibus_writereg),
DEVMETHOD(miibus_statchg, xl_miibus_statchg),
DEVMETHOD(miibus_mediainit, xl_miibus_mediainit),
DEVMETHOD_END
};
static driver_t xl_driver = {
"xl",
xl_methods,
sizeof(struct xl_softc)
};
DRIVER_MODULE_ORDERED(xl, pci, xl_driver, NULL, NULL, SI_ORDER_ANY);
DRIVER_MODULE(miibus, xl, miibus_driver, NULL, NULL);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, xl, xl_devs,
nitems(xl_devs) - 1);
static void
xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
u_int32_t *paddr;
paddr = arg;
*paddr = segs->ds_addr;
}
/*
* Murphy's law says that it's possible the chip can wedge and
* the 'command in progress' bit may never clear. Hence, we wait
* only a finite amount of time to avoid getting caught in an
* infinite loop. Normally this delay routine would be a macro,
* but it isn't called during normal operation so we can afford
* to make it a function. Suppress warning when card gone.
*/
static void
xl_wait(struct xl_softc *sc)
{
int i;
for (i = 0; i < XL_TIMEOUT; i++) {
if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
break;
}
if (i == XL_TIMEOUT && bus_child_present(sc->xl_dev))
device_printf(sc->xl_dev, "command never completed!\n");
}
/*
* MII access routines are provided for adapters with external
* PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
* autoneg logic that's faked up to look like a PHY (3c905B-TX).
* Note: if you don't perform the MDIO operations just right,
* it's possible to end up with code that works correctly with
* some chips/CPUs/processor speeds/bus speeds/etc but not
* with others.
*/
/*
* Read the MII serial port for the MII bit-bang module.
*/
static uint32_t
xl_mii_bitbang_read(device_t dev)
{
struct xl_softc *sc;
uint32_t val;
sc = device_get_softc(dev);
/* We're already in window 4. */
val = CSR_READ_2(sc, XL_W4_PHY_MGMT);
CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
/*
* Write the MII serial port for the MII bit-bang module.
*/
static void
xl_mii_bitbang_write(device_t dev, uint32_t val)
{
struct xl_softc *sc;
sc = device_get_softc(dev);
/* We're already in window 4. */
CSR_WRITE_2(sc, XL_W4_PHY_MGMT, val);
CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
xl_miibus_readreg(device_t dev, int phy, int reg)
{
struct xl_softc *sc;
sc = device_get_softc(dev);
/* Select the window 4. */
XL_SEL_WIN(4);
return (mii_bitbang_readreg(dev, &xl_mii_bitbang_ops, phy, reg));
}
static int
xl_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct xl_softc *sc;
sc = device_get_softc(dev);
/* Select the window 4. */
XL_SEL_WIN(4);
mii_bitbang_writereg(dev, &xl_mii_bitbang_ops, phy, reg, data);
return (0);
}
static void
xl_miibus_statchg(device_t dev)
{
struct xl_softc *sc;
struct mii_data *mii;
uint8_t macctl;
sc = device_get_softc(dev);
mii = device_get_softc(sc->xl_miibus);
xl_setcfg(sc);
/* Set ASIC's duplex mode to match the PHY. */
XL_SEL_WIN(3);
macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
macctl |= XL_MACCTRL_DUPLEX;
if (sc->xl_type == XL_TYPE_905B) {
if ((IFM_OPTIONS(mii->mii_media_active) &
IFM_ETH_RXPAUSE) != 0)
macctl |= XL_MACCTRL_FLOW_CONTROL_ENB;
else
macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
}
} else {
macctl &= ~XL_MACCTRL_DUPLEX;
if (sc->xl_type == XL_TYPE_905B)
macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
}
CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
}
/*
* Special support for the 3c905B-COMBO. This card has 10/100 support
* plus BNC and AUI ports. This means we will have both an miibus attached
* plus some non-MII media settings. In order to allow this, we have to
* add the extra media to the miibus's ifmedia struct, but we can't do
* that during xl_attach() because the miibus hasn't been attached yet.
* So instead, we wait until the miibus probe/attach is done, at which
* point we will get a callback telling is that it's safe to add our
* extra media.
*/
static void
xl_miibus_mediainit(device_t dev)
{
struct xl_softc *sc;
struct mii_data *mii;
struct ifmedia *ifm;
sc = device_get_softc(dev);
mii = device_get_softc(sc->xl_miibus);
ifm = &mii->mii_media;
if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
/*
* Check for a 10baseFL board in disguise.
*/
if (sc->xl_type == XL_TYPE_905B &&
sc->xl_media == XL_MEDIAOPT_10FL) {
if (bootverbose)
device_printf(sc->xl_dev, "found 10baseFL\n");
ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
NULL);
if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
ifmedia_add(ifm,
IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
} else {
if (bootverbose)
device_printf(sc->xl_dev, "found AUI\n");
ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
}
}
if (sc->xl_media & XL_MEDIAOPT_BNC) {
if (bootverbose)
device_printf(sc->xl_dev, "found BNC\n");
ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
}
}
/*
* The EEPROM is slow: give it time to come ready after issuing
* it a command.
*/
static int
xl_eeprom_wait(struct xl_softc *sc)
{
int i;
for (i = 0; i < 100; i++) {
if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
DELAY(162);
else
break;
}
if (i == 100) {
device_printf(sc->xl_dev, "eeprom failed to come ready\n");
return (1);
}
return (0);
}
/*
* Read a sequence of words from the EEPROM. Note that ethernet address
* data is stored in the EEPROM in network byte order.
*/
static int
xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
{
int err = 0, i;
u_int16_t word = 0, *ptr;
#define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
#define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
/*
* XXX: WARNING! DANGER!
* It's easy to accidentally overwrite the rom content!
* Note: the 3c575 uses 8bit EEPROM offsets.
*/
XL_SEL_WIN(0);
if (xl_eeprom_wait(sc))
return (1);
if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
off += 0x30;
for (i = 0; i < cnt; i++) {
if (sc->xl_flags & XL_FLAG_8BITROM)
CSR_WRITE_2(sc, XL_W0_EE_CMD,
XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
else
CSR_WRITE_2(sc, XL_W0_EE_CMD,
XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
err = xl_eeprom_wait(sc);
if (err)
break;
word = CSR_READ_2(sc, XL_W0_EE_DATA);
ptr = (u_int16_t *)(dest + (i * 2));
if (swap)
*ptr = ntohs(word);
else
*ptr = word;
}
return (err ? 1 : 0);
}
static void
xl_rxfilter(struct xl_softc *sc)
{
if (sc->xl_type == XL_TYPE_905B)
xl_rxfilter_90xB(sc);
else
xl_rxfilter_90x(sc);
}
/*
* NICs older than the 3c905B have only one multicast option, which
* is to enable reception of all multicast frames.
*/
static u_int
xl_check_maddr_90x(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint8_t *rxfilt = arg;
*rxfilt |= XL_RXFILTER_ALLMULTI;
return (1);
}
static void
xl_rxfilter_90x(struct xl_softc *sc)
{
if_t ifp;
u_int8_t rxfilt;
XL_LOCK_ASSERT(sc);
ifp = sc->xl_ifp;
XL_SEL_WIN(5);
rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
/* Set the individual bit to receive frames for this host only. */
rxfilt |= XL_RXFILTER_INDIVIDUAL;
/* Set capture broadcast bit to capture broadcast frames. */
if (if_getflags(ifp) & IFF_BROADCAST)
rxfilt |= XL_RXFILTER_BROADCAST;
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) {
if (if_getflags(ifp) & IFF_PROMISC)
rxfilt |= XL_RXFILTER_ALLFRAMES;
if (if_getflags(ifp) & IFF_ALLMULTI)
rxfilt |= XL_RXFILTER_ALLMULTI;
} else
if_foreach_llmaddr(sc->xl_ifp, xl_check_maddr_90x, &rxfilt);
CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
XL_SEL_WIN(7);
}
/*
* 3c905B adapters have a hash filter that we can program.
* Note: the 3c905B currently only supports a 64-bit
* hash table, which means we really only need 6 bits,
* but the manual indicates that future chip revisions
* will have a 256-bit hash table, hence the routine
* is set up to calculate 8 bits of position info in
* case we need it some day.
* Note II, The Sequel: _CURRENT_ versions of the
* 3c905B have a 256 bit hash table. This means we have
* to use all 8 bits regardless. On older cards, the
* upper 2 bits will be ignored. Grrrr....
*/
static u_int
xl_check_maddr_90xB(void *arg, struct sockaddr_dl *sdl, u_int count)
{
struct xl_softc *sc = arg;
uint16_t h;
h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) & 0xFF;
CSR_WRITE_2(sc, XL_COMMAND, h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
return (1);
}
static void
xl_rxfilter_90xB(struct xl_softc *sc)
{
if_t ifp;
int i;
u_int8_t rxfilt;
XL_LOCK_ASSERT(sc);
ifp = sc->xl_ifp;
XL_SEL_WIN(5);
rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
XL_RXFILTER_MULTIHASH);
/* Set the individual bit to receive frames for this host only. */
rxfilt |= XL_RXFILTER_INDIVIDUAL;
/* Set capture broadcast bit to capture broadcast frames. */
if (if_getflags(ifp) & IFF_BROADCAST)
rxfilt |= XL_RXFILTER_BROADCAST;
/* If we want promiscuous mode, set the allframes bit. */
if (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) {
if (if_getflags(ifp) & IFF_PROMISC)
rxfilt |= XL_RXFILTER_ALLFRAMES;
if (if_getflags(ifp) & IFF_ALLMULTI)
rxfilt |= XL_RXFILTER_ALLMULTI;
} else {
/* First, zot all the existing hash bits. */
for (i = 0; i < XL_HASHFILT_SIZE; i++)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | i);
/* Now program new ones. */
if (if_foreach_llmaddr(sc->xl_ifp, xl_check_maddr_90xB, sc) > 0)
rxfilt |= XL_RXFILTER_MULTIHASH;
}
CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
XL_SEL_WIN(7);
}
static void
xl_setcfg(struct xl_softc *sc)
{
u_int32_t icfg;
/*XL_LOCK_ASSERT(sc);*/
XL_SEL_WIN(3);
icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
icfg &= ~XL_ICFG_CONNECTOR_MASK;
if (sc->xl_media & XL_MEDIAOPT_MII ||
sc->xl_media & XL_MEDIAOPT_BT4)
icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
if (sc->xl_media & XL_MEDIAOPT_BTX)
icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
}
static void
xl_setmode(struct xl_softc *sc, int media)
{
u_int32_t icfg;
u_int16_t mediastat;
char *pmsg = "", *dmsg = "";
XL_LOCK_ASSERT(sc);
XL_SEL_WIN(4);
mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
XL_SEL_WIN(3);
icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
if (sc->xl_media & XL_MEDIAOPT_BT) {
if (IFM_SUBTYPE(media) == IFM_10_T) {
pmsg = "10baseT transceiver";
sc->xl_xcvr = XL_XCVR_10BT;
icfg &= ~XL_ICFG_CONNECTOR_MASK;
icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
mediastat |= XL_MEDIASTAT_LINKBEAT |
XL_MEDIASTAT_JABGUARD;
mediastat &= ~XL_MEDIASTAT_SQEENB;
}
}
if (sc->xl_media & XL_MEDIAOPT_BFX) {
if (IFM_SUBTYPE(media) == IFM_100_FX) {
pmsg = "100baseFX port";
sc->xl_xcvr = XL_XCVR_100BFX;
icfg &= ~XL_ICFG_CONNECTOR_MASK;
icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
mediastat |= XL_MEDIASTAT_LINKBEAT;
mediastat &= ~XL_MEDIASTAT_SQEENB;
}
}
if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
if (IFM_SUBTYPE(media) == IFM_10_5) {
pmsg = "AUI port";
sc->xl_xcvr = XL_XCVR_AUI;
icfg &= ~XL_ICFG_CONNECTOR_MASK;
icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
XL_MEDIASTAT_JABGUARD);
mediastat |= ~XL_MEDIASTAT_SQEENB;
}
if (IFM_SUBTYPE(media) == IFM_10_FL) {
pmsg = "10baseFL transceiver";
sc->xl_xcvr = XL_XCVR_AUI;
icfg &= ~XL_ICFG_CONNECTOR_MASK;
icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
XL_MEDIASTAT_JABGUARD);
mediastat |= ~XL_MEDIASTAT_SQEENB;
}
}
if (sc->xl_media & XL_MEDIAOPT_BNC) {
if (IFM_SUBTYPE(media) == IFM_10_2) {
pmsg = "AUI port";
sc->xl_xcvr = XL_XCVR_COAX;
icfg &= ~XL_ICFG_CONNECTOR_MASK;
icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
}
}
if ((media & IFM_GMASK) == IFM_FDX ||
IFM_SUBTYPE(media) == IFM_100_FX) {
dmsg = "full";
XL_SEL_WIN(3);
CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
} else {
dmsg = "half";
XL_SEL_WIN(3);
CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
}
if (IFM_SUBTYPE(media) == IFM_10_2)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
else
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
XL_SEL_WIN(4);
CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
DELAY(800);
XL_SEL_WIN(7);
device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg);
}
static void
xl_reset(struct xl_softc *sc)
{
int i;
XL_LOCK_ASSERT(sc);
XL_SEL_WIN(0);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
XL_RESETOPT_DISADVFD:0));
/*
* If we're using memory mapped register mode, pause briefly
* after issuing the reset command before trying to access any
* other registers. With my 3c575C CardBus card, failing to do
* this results in the system locking up while trying to poll
* the command busy bit in the status register.
*/
if (sc->xl_flags & XL_FLAG_USE_MMIO)
DELAY(100000);
for (i = 0; i < XL_TIMEOUT; i++) {
DELAY(10);
if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
break;
}
if (i == XL_TIMEOUT)
device_printf(sc->xl_dev, "reset didn't complete\n");
/* Reset TX and RX. */
/* Note: the RX reset takes an absurd amount of time
* on newer versions of the Tornado chips such as those
* on the 3c905CX and newer 3c908C cards. We wait an
* extra amount of time so that xl_wait() doesn't complain
* and annoy the users.
*/
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
DELAY(100000);
xl_wait(sc);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
xl_wait(sc);
if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
XL_SEL_WIN(2);
CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
XL_RESETOPT_INVERT_LED : 0) |
((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
XL_RESETOPT_INVERT_MII : 0));
}
/* Wait a little while for the chip to get its brains in order. */
DELAY(100000);
}
/*
* Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
xl_probe(device_t dev)
{
const struct xl_type *t;
t = xl_devs;
while (t->xl_name != NULL) {
if ((pci_get_vendor(dev) == t->xl_vid) &&
(pci_get_device(dev) == t->xl_did)) {
device_set_desc(dev, t->xl_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
/*
* This routine is a kludge to work around possible hardware faults
* or manufacturing defects that can cause the media options register
* (or reset options register, as it's called for the first generation
* 3c90x adapters) to return an incorrect result. I have encountered
* one Dell Latitude laptop docking station with an integrated 3c905-TX
* which doesn't have any of the 'mediaopt' bits set. This screws up
* the attach routine pretty badly because it doesn't know what media
* to look for. If we find ourselves in this predicament, this routine
* will try to guess the media options values and warn the user of a
* possible manufacturing defect with his adapter/system/whatever.
*/
static void
xl_mediacheck(struct xl_softc *sc)
{
/*
* If some of the media options bits are set, assume they are
* correct. If not, try to figure it out down below.
* XXX I should check for 10baseFL, but I don't have an adapter
* to test with.
*/
if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
/*
* Check the XCVR value. If it's not in the normal range
* of values, we need to fake it up here.
*/
if (sc->xl_xcvr <= XL_XCVR_AUTO)
return;
else {
device_printf(sc->xl_dev,
"bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
device_printf(sc->xl_dev,
"choosing new default based on card type\n");
}
} else {
if (sc->xl_type == XL_TYPE_905B &&
sc->xl_media & XL_MEDIAOPT_10FL)
return;
device_printf(sc->xl_dev,
"WARNING: no media options bits set in the media options register!!\n");
device_printf(sc->xl_dev,
"this could be a manufacturing defect in your adapter or system\n");
device_printf(sc->xl_dev,
"attempting to guess media type; you should probably consult your vendor\n");
}
xl_choose_xcvr(sc, 1);
}
static void
xl_choose_xcvr(struct xl_softc *sc, int verbose)
{
u_int16_t devid;
/*
* Read the device ID from the EEPROM.
* This is what's loaded into the PCI device ID register, so it has
* to be correct otherwise we wouldn't have gotten this far.
*/
xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
switch (devid) {
case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
sc->xl_media = XL_MEDIAOPT_BT;
sc->xl_xcvr = XL_XCVR_10BT;
if (verbose)
device_printf(sc->xl_dev,
"guessing 10BaseT transceiver\n");
break;
case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
sc->xl_xcvr = XL_XCVR_10BT;
if (verbose)
device_printf(sc->xl_dev,
"guessing COMBO (AUI/BNC/TP)\n");
break;
case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
sc->xl_xcvr = XL_XCVR_10BT;
if (verbose)
device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n");
break;
case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
sc->xl_media = XL_MEDIAOPT_10FL;
sc->xl_xcvr = XL_XCVR_AUI;
if (verbose)
device_printf(sc->xl_dev, "guessing 10baseFL\n");
break;
case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
case TC_DEVICEID_HURRICANE_555: /* 3c555 */
case TC_DEVICEID_HURRICANE_556: /* 3c556 */
case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
case TC_DEVICEID_HURRICANE_656: /* 3c656 */
case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
case TC_DEVICEID_TORNADO_656C: /* 3c656C */
case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
case TC_DEVICEID_TORNADO_10_100BT_920B_WNM: /* 3c920B-EMB-WNM */
sc->xl_media = XL_MEDIAOPT_MII;
sc->xl_xcvr = XL_XCVR_MII;
if (verbose)
device_printf(sc->xl_dev, "guessing MII\n");
break;
case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
sc->xl_media = XL_MEDIAOPT_BT4;
sc->xl_xcvr = XL_XCVR_MII;
if (verbose)
device_printf(sc->xl_dev, "guessing 100baseT4/MII\n");
break;
case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
sc->xl_media = XL_MEDIAOPT_BTX;
sc->xl_xcvr = XL_XCVR_AUTO;
if (verbose)
device_printf(sc->xl_dev, "guessing 10/100 internal\n");
break;
case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
sc->xl_xcvr = XL_XCVR_AUTO;
if (verbose)
device_printf(sc->xl_dev,
"guessing 10/100 plus BNC/AUI\n");
break;
default:
device_printf(sc->xl_dev,
"unknown device ID: %x -- defaulting to 10baseT\n", devid);
sc->xl_media = XL_MEDIAOPT_BT;
break;
}
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
xl_attach(device_t dev)
{
u_char eaddr[ETHER_ADDR_LEN];
u_int16_t sinfo2, xcvr[2];
struct xl_softc *sc;
if_t ifp;
int media, pmcap;
int error = 0, phy, rid, res;
uint16_t did;
sc = device_get_softc(dev);
sc->xl_dev = dev;
mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
did = pci_get_device(dev);
sc->xl_flags = 0;
if (did == TC_DEVICEID_HURRICANE_555)
sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
if (did == TC_DEVICEID_HURRICANE_556 ||
did == TC_DEVICEID_HURRICANE_556B)
sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
if (did == TC_DEVICEID_HURRICANE_555 ||
did == TC_DEVICEID_HURRICANE_556)
sc->xl_flags |= XL_FLAG_8BITROM;
if (did == TC_DEVICEID_HURRICANE_556B)
sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
if (did == TC_DEVICEID_HURRICANE_575B ||
did == TC_DEVICEID_HURRICANE_575C ||
did == TC_DEVICEID_HURRICANE_656B ||
did == TC_DEVICEID_TORNADO_656C)
sc->xl_flags |= XL_FLAG_FUNCREG;
if (did == TC_DEVICEID_HURRICANE_575A ||
did == TC_DEVICEID_HURRICANE_575B ||
did == TC_DEVICEID_HURRICANE_575C ||
did == TC_DEVICEID_HURRICANE_656B ||
did == TC_DEVICEID_TORNADO_656C)
sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
XL_FLAG_8BITROM;
if (did == TC_DEVICEID_HURRICANE_656)
sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
if (did == TC_DEVICEID_HURRICANE_575B)
sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
if (did == TC_DEVICEID_HURRICANE_575C)
sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
if (did == TC_DEVICEID_TORNADO_656C)
sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
if (did == TC_DEVICEID_HURRICANE_656 ||
did == TC_DEVICEID_HURRICANE_656B)
sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
XL_FLAG_INVERT_LED_PWR;
if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
sc->xl_flags |= XL_FLAG_PHYOK;
switch (did) {
case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
case TC_DEVICEID_HURRICANE_575A:
case TC_DEVICEID_HURRICANE_575B:
case TC_DEVICEID_HURRICANE_575C:
sc->xl_flags |= XL_FLAG_NO_MMIO;
break;
default:
break;
}
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
rid = XL_PCI_LOMEM;
res = SYS_RES_MEMORY;
sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
}
if (sc->xl_res != NULL) {
sc->xl_flags |= XL_FLAG_USE_MMIO;
if (bootverbose)
device_printf(dev, "using memory mapped I/O\n");
} else {
rid = XL_PCI_LOIO;
res = SYS_RES_IOPORT;
sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
if (sc->xl_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
if (bootverbose)
device_printf(dev, "using port I/O\n");
}
sc->xl_btag = rman_get_bustag(sc->xl_res);
sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
if (sc->xl_flags & XL_FLAG_FUNCREG) {
rid = XL_PCI_FUNCMEM;
sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->xl_fres == NULL) {
device_printf(dev, "couldn't map funcreg memory\n");
error = ENXIO;
goto fail;
}
sc->xl_ftag = rman_get_bustag(sc->xl_fres);
sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
}
/* Allocate interrupt */
rid = 0;
sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->xl_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
/* Initialize interface name. */
ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
/* Reset the adapter. */
XL_LOCK(sc);
xl_reset(sc);
XL_UNLOCK(sc);
/*
* Get station address from the EEPROM.
*/
if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
device_printf(dev, "failed to read station address\n");
error = ENXIO;
goto fail;
}
callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0);
NET_TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
/*
* Now allocate a tag for the DMA descriptor lists and a chunk
* of DMA-able memory based on the tag. Also obtain the DMA
* addresses of the RX and TX ring, which we'll need later.
* All of our lists are allocated as a contiguous block
* of memory.
*/
error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
&sc->xl_ldata.xl_rx_tag);
if (error) {
device_printf(dev, "failed to allocate rx dma tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
(void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap);
if (error) {
device_printf(dev, "no memory for rx list buffers!\n");
bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
sc->xl_ldata.xl_rx_tag = NULL;
goto fail;
}
error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
XL_RX_LIST_SZ, xl_dma_map_addr,
&sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "cannot get dma address of the rx ring!\n");
bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
sc->xl_ldata.xl_rx_dmamap);
bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
sc->xl_ldata.xl_rx_tag = NULL;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
&sc->xl_ldata.xl_tx_tag);
if (error) {
device_printf(dev, "failed to allocate tx dma tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
(void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap);
if (error) {
device_printf(dev, "no memory for list buffers!\n");
bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
sc->xl_ldata.xl_tx_tag = NULL;
goto fail;
}
error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
XL_TX_LIST_SZ, xl_dma_map_addr,
&sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "cannot get dma address of the tx ring!\n");
bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
sc->xl_ldata.xl_tx_dmamap);
bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
sc->xl_ldata.xl_tx_tag = NULL;
goto fail;
}
/*
* Allocate a DMA tag for the mapping of mbufs.
*/
error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
NULL, &sc->xl_mtag);
if (error) {
device_printf(dev, "failed to allocate mbuf dma tag\n");
goto fail;
}
/* We need a spare DMA map for the RX ring. */
error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
if (error)
goto fail;
/*
* Figure out the card type. 3c905B adapters have the
* 'supportsNoTxLength' bit set in the capabilities
* word in the EEPROM.
* Note: my 3c575C CardBus card lies. It returns a value
* of 0x1578 for its capabilities word, which is somewhat
* nonsensical. Another way to distinguish a 3c90x chip
* from a 3c90xB/C chip is to check for the 'supportsLargePackets'
* bit. This will only be set for 3c90x boomerage chips.
*/
xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
!(sc->xl_caps & XL_CAPS_LARGE_PKTS))
sc->xl_type = XL_TYPE_905B;
else
sc->xl_type = XL_TYPE_90X;
/* Check availability of WOL. */
if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 &&
pci_find_cap(dev, PCIY_PMG, &pmcap) == 0) {
sc->xl_pmcap = pmcap;
sc->xl_flags |= XL_FLAG_WOL;
sinfo2 = 0;
xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0);
if ((sinfo2 & XL_SINFO2_AUX_WOL_CON) == 0 && bootverbose)
device_printf(dev,
"No auxiliary remote wakeup connector!\n");
}
/* Set the TX start threshold for best performance. */
sc->xl_tx_thresh = XL_MIN_FRAMELEN;
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, xl_ioctl);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
if (sc->xl_type == XL_TYPE_905B) {
if_sethwassist(ifp, XL905B_CSUM_FEATURES);
#ifdef XL905B_TXCSUM_BROKEN
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
#else
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
#endif
}
if ((sc->xl_flags & XL_FLAG_WOL) != 0)
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
if_setstartfn(ifp, xl_start);
if_setinitfn(ifp, xl_init);
if_setsendqlen(ifp, XL_TX_LIST_CNT - 1);
if_setsendqready(ifp);
/*
* Now we have to see what sort of media we have.
* This includes probing for an MII interace and a
* possible PHY.
*/
XL_SEL_WIN(3);
sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
if (bootverbose)
device_printf(dev, "media options word: %x\n", sc->xl_media);
xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
xl_mediacheck(sc);
if (sc->xl_media & XL_MEDIAOPT_MII ||
sc->xl_media & XL_MEDIAOPT_BTX ||
sc->xl_media & XL_MEDIAOPT_BT4) {
if (bootverbose)
device_printf(dev, "found MII/AUTO\n");
xl_setcfg(sc);
/*
* Attach PHYs only at MII address 24 if !XL_FLAG_PHYOK.
* This is to guard against problems with certain 3Com ASIC
* revisions that incorrectly map the internal transceiver
* control registers at all MII addresses.
*/
phy = MII_PHY_ANY;
if ((sc->xl_flags & XL_FLAG_PHYOK) == 0)
phy = 24;
error = mii_attach(dev, &sc->xl_miibus, ifp, xl_ifmedia_upd,
xl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
sc->xl_type == XL_TYPE_905B ? MIIF_DOPAUSE : 0);
if (error != 0) {
device_printf(dev, "attaching PHYs failed\n");
goto fail;
}
goto done;
}
/*
* Sanity check. If the user has selected "auto" and this isn't
* a 10/100 card of some kind, we need to force the transceiver
* type to something sane.
*/
if (sc->xl_xcvr == XL_XCVR_AUTO)
xl_choose_xcvr(sc, bootverbose);
/*
* Do ifmedia setup.
*/
if (sc->xl_media & XL_MEDIAOPT_BT) {
if (bootverbose)
device_printf(dev, "found 10baseT\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
ifmedia_add(&sc->ifmedia,
IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
}
if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
/*
* Check for a 10baseFL board in disguise.
*/
if (sc->xl_type == XL_TYPE_905B &&
sc->xl_media == XL_MEDIAOPT_10FL) {
if (bootverbose)
device_printf(dev, "found 10baseFL\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
0, NULL);
if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
ifmedia_add(&sc->ifmedia,
IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
} else {
if (bootverbose)
device_printf(dev, "found AUI\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
}
}
if (sc->xl_media & XL_MEDIAOPT_BNC) {
if (bootverbose)
device_printf(dev, "found BNC\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
}
if (sc->xl_media & XL_MEDIAOPT_BFX) {
if (bootverbose)
device_printf(dev, "found 100baseFX\n");
ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
}
media = IFM_ETHER|IFM_100_TX|IFM_FDX;
xl_choose_media(sc, &media);
if (sc->xl_miibus == NULL)
ifmedia_set(&sc->ifmedia, media);
done:
if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
XL_SEL_WIN(0);
CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
}
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr);
error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, xl_intr, sc, &sc->xl_intrhand);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
fail:
if (error)
xl_detach(dev);
return (error);
}
/*
* Choose a default media.
* XXX This is a leaf function only called by xl_attach() and
* acquires/releases the non-recursible driver mutex to
* satisfy lock assertions.
*/
static void
xl_choose_media(struct xl_softc *sc, int *media)
{
XL_LOCK(sc);
switch (sc->xl_xcvr) {
case XL_XCVR_10BT:
*media = IFM_ETHER|IFM_10_T;
xl_setmode(sc, *media);
break;
case XL_XCVR_AUI:
if (sc->xl_type == XL_TYPE_905B &&
sc->xl_media == XL_MEDIAOPT_10FL) {
*media = IFM_ETHER|IFM_10_FL;
xl_setmode(sc, *media);
} else {
*media = IFM_ETHER|IFM_10_5;
xl_setmode(sc, *media);
}
break;
case XL_XCVR_COAX:
*media = IFM_ETHER|IFM_10_2;
xl_setmode(sc, *media);
break;
case XL_XCVR_AUTO:
case XL_XCVR_100BTX:
case XL_XCVR_MII:
/* Chosen by miibus */
break;
case XL_XCVR_100BFX:
*media = IFM_ETHER|IFM_100_FX;
break;
default:
device_printf(sc->xl_dev, "unknown XCVR type: %d\n",
sc->xl_xcvr);
/*
* This will probably be wrong, but it prevents
* the ifmedia code from panicking.
*/
*media = IFM_ETHER|IFM_10_T;
break;
}
XL_UNLOCK(sc);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
xl_detach(device_t dev)
{
struct xl_softc *sc;
if_t ifp;
int rid, res;
sc = device_get_softc(dev);
ifp = sc->xl_ifp;
KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
#ifdef DEVICE_POLLING
if (ifp && if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
if (sc->xl_flags & XL_FLAG_USE_MMIO) {
rid = XL_PCI_LOMEM;
res = SYS_RES_MEMORY;
} else {
rid = XL_PCI_LOIO;
res = SYS_RES_IOPORT;
}
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
XL_LOCK(sc);
xl_stop(sc);
XL_UNLOCK(sc);
taskqueue_drain(taskqueue_swi, &sc->xl_task);
callout_drain(&sc->xl_tick_callout);
ether_ifdetach(ifp);
}
if (sc->xl_miibus)
device_delete_child(dev, sc->xl_miibus);
bus_generic_detach(dev);
ifmedia_removeall(&sc->ifmedia);
if (sc->xl_intrhand)
bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
if (sc->xl_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
if (sc->xl_fres != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
XL_PCI_FUNCMEM, sc->xl_fres);
if (sc->xl_res)
bus_release_resource(dev, res, rid, sc->xl_res);
if (ifp)
if_free(ifp);
if (sc->xl_mtag) {
bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
bus_dma_tag_destroy(sc->xl_mtag);
}
if (sc->xl_ldata.xl_rx_tag) {
bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
sc->xl_ldata.xl_rx_dmamap);
bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
sc->xl_ldata.xl_rx_dmamap);
bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
}
if (sc->xl_ldata.xl_tx_tag) {
bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
sc->xl_ldata.xl_tx_dmamap);
bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
sc->xl_ldata.xl_tx_dmamap);
bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
}
mtx_destroy(&sc->xl_mtx);
return (0);
}
/*
* Initialize the transmit descriptors.
*/
static int
xl_list_tx_init(struct xl_softc *sc)
{
struct xl_chain_data *cd;
struct xl_list_data *ld;
int error, i;
XL_LOCK_ASSERT(sc);
cd = &sc->xl_cdata;
ld = &sc->xl_ldata;
for (i = 0; i < XL_TX_LIST_CNT; i++) {
cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
error = bus_dmamap_create(sc->xl_mtag, 0,
&cd->xl_tx_chain[i].xl_map);
if (error)
return (error);
cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
i * sizeof(struct xl_list);
if (i == (XL_TX_LIST_CNT - 1))
cd->xl_tx_chain[i].xl_next = NULL;
else
cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
}
cd->xl_tx_free = &cd->xl_tx_chain[0];
cd->xl_tx_tail = cd->xl_tx_head = NULL;
bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* Initialize the transmit descriptors.
*/
static int
xl_list_tx_init_90xB(struct xl_softc *sc)
{
struct xl_chain_data *cd;
struct xl_list_data *ld;
int error, i;
XL_LOCK_ASSERT(sc);
cd = &sc->xl_cdata;
ld = &sc->xl_ldata;
for (i = 0; i < XL_TX_LIST_CNT; i++) {
cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
error = bus_dmamap_create(sc->xl_mtag, 0,
&cd->xl_tx_chain[i].xl_map);
if (error)
return (error);
cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
i * sizeof(struct xl_list);
if (i == (XL_TX_LIST_CNT - 1))
cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
else
cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
if (i == 0)
cd->xl_tx_chain[i].xl_prev =
&cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
else
cd->xl_tx_chain[i].xl_prev =
&cd->xl_tx_chain[i - 1];
}
bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
cd->xl_tx_prod = 1;
cd->xl_tx_cons = 1;
cd->xl_tx_cnt = 0;
bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
return (0);
}
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that
* we arrange the descriptors in a closed ring, so that the last descriptor
* points back to the first.
*/
static int
xl_list_rx_init(struct xl_softc *sc)
{
struct xl_chain_data *cd;
struct xl_list_data *ld;
int error, i, next;
u_int32_t nextptr;
XL_LOCK_ASSERT(sc);
cd = &sc->xl_cdata;
ld = &sc->xl_ldata;
for (i = 0; i < XL_RX_LIST_CNT; i++) {
cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
error = bus_dmamap_create(sc->xl_mtag, 0,
&cd->xl_rx_chain[i].xl_map);
if (error)
return (error);
error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
if (error)
return (error);
if (i == (XL_RX_LIST_CNT - 1))
next = 0;
else
next = i + 1;
nextptr = ld->xl_rx_dmaaddr +
next * sizeof(struct xl_list_onefrag);
cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
ld->xl_rx_list[i].xl_next = htole32(nextptr);
}
bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
cd->xl_rx_head = &cd->xl_rx_chain[0];
return (0);
}
/*
* Initialize an RX descriptor and attach an MBUF cluster.
* If we fail to do so, we need to leave the old mbuf and
* the old DMA map untouched so that it can be reused.
*/
static int
xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
{
struct mbuf *m_new = NULL;
bus_dmamap_t map;
bus_dma_segment_t segs[1];
int error, nseg;
XL_LOCK_ASSERT(sc);
m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m_new == NULL)
return (ENOBUFS);
m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
/* Force longword alignment for packet payload. */
m_adj(m_new, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new,
segs, &nseg, BUS_DMA_NOWAIT);
if (error) {
m_freem(m_new);
device_printf(sc->xl_dev, "can't map mbuf (error %d)\n",
error);
return (error);
}
KASSERT(nseg == 1,
("%s: too many DMA segments (%d)", __func__, nseg));
bus_dmamap_unload(sc->xl_mtag, c->xl_map);
map = c->xl_map;
c->xl_map = sc->xl_tmpmap;
sc->xl_tmpmap = map;
c->xl_mbuf = m_new;
c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr);
c->xl_ptr->xl_status = 0;
bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
return (0);
}
static int
xl_rx_resync(struct xl_softc *sc)
{
struct xl_chain_onefrag *pos;
int i;
XL_LOCK_ASSERT(sc);
pos = sc->xl_cdata.xl_rx_head;
for (i = 0; i < XL_RX_LIST_CNT; i++) {
if (pos->xl_ptr->xl_status)
break;
pos = pos->xl_next;
}
if (i == XL_RX_LIST_CNT)
return (0);
sc->xl_cdata.xl_rx_head = pos;
return (EAGAIN);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*/
static int
xl_rxeof(struct xl_softc *sc)
{
struct mbuf *m;
if_t ifp = sc->xl_ifp;
struct xl_chain_onefrag *cur_rx;
int total_len;
int rx_npkts = 0;
u_int32_t rxstat;
XL_LOCK_ASSERT(sc);
again:
bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
BUS_DMASYNC_POSTREAD);
while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
}
#endif
cur_rx = sc->xl_cdata.xl_rx_head;
sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
total_len = rxstat & XL_RXSTAT_LENMASK;
rx_npkts++;
/*
* Since we have told the chip to allow large frames,
* we need to trap giant frame errors in software. We allow
* a little more than the normal frame size to account for
* frames with VLAN tags.
*/
if (total_len > XL_MAX_FRAMELEN)
rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
/*
* If an error occurs, update stats, clear the
* status word and leave the mbuf cluster in place:
* it should simply get re-used next time this descriptor
* comes up in the ring.
*/
if (rxstat & XL_RXSTAT_UP_ERROR) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
cur_rx->xl_ptr->xl_status = 0;
bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
continue;
}
/*
* If the error bit was not set, the upload complete
* bit should be set which means we have a valid packet.
* If not, something truly strange has happened.
*/
if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
device_printf(sc->xl_dev,
"bad receive status -- packet dropped\n");
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
cur_rx->xl_ptr->xl_status = 0;
bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
continue;
}
/* No errors; receive the packet. */
bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
BUS_DMASYNC_POSTREAD);
m = cur_rx->xl_mbuf;
/*
* Try to conjure up a new mbuf cluster. If that
* fails, it means we have an out of memory condition and
* should leave the buffer in place and continue. This will
* result in a lost packet, but there's little else we
* can do in this situation.
*/
if (xl_newbuf(sc, cur_rx)) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
cur_rx->xl_ptr->xl_status = 0;
bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
continue;
}
bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = total_len;
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
/* Do IP checksum checking. */
if (rxstat & XL_RXSTAT_IPCKOK)
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if (!(rxstat & XL_RXSTAT_IPCKERR))
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((rxstat & XL_RXSTAT_TCPCOK &&
!(rxstat & XL_RXSTAT_TCPCKERR)) ||
(rxstat & XL_RXSTAT_UDPCKOK &&
!(rxstat & XL_RXSTAT_UDPCKERR))) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
XL_UNLOCK(sc);
if_input(ifp, m);
XL_LOCK(sc);
/*
* If we are running from the taskqueue, the interface
* might have been stopped while we were passing the last
* packet up the network stack.
*/
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return (rx_npkts);
}
/*
* Handle the 'end of channel' condition. When the upload
* engine hits the end of the RX ring, it will stall. This
* is our cue to flush the RX ring, reload the uplist pointer
* register and unstall the engine.
* XXX This is actually a little goofy. With the ThunderLAN
* chip, you get an interrupt when the receiver hits the end
* of the receive ring, which tells you exactly when you
* you need to reload the ring pointer. Here we have to
* fake it. I'm mad at myself for not being clever enough
* to avoid the use of a goto here.
*/
if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
xl_wait(sc);
CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
goto again;
}
return (rx_npkts);
}
/*
* Taskqueue wrapper for xl_rxeof().
*/
static void
xl_rxeof_task(void *arg, int pending)
{
struct xl_softc *sc = (struct xl_softc *)arg;
XL_LOCK(sc);
if (if_getdrvflags(sc->xl_ifp) & IFF_DRV_RUNNING)
xl_rxeof(sc);
XL_UNLOCK(sc);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
xl_txeof(struct xl_softc *sc)
{
struct xl_chain *cur_tx;
if_t ifp = sc->xl_ifp;
XL_LOCK_ASSERT(sc);
/*
* Go through our tx list and free mbufs for those
* frames that have been uploaded. Note: the 3c905B
* sets a special bit in the status word to let us
* know that a frame has been downloaded, but the
* original 3c900/3c905 adapters don't do that.
* Consequently, we have to use a different test if
* xl_type != XL_TYPE_905B.
*/
while (sc->xl_cdata.xl_tx_head != NULL) {
cur_tx = sc->xl_cdata.xl_tx_head;
if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
break;
sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
m_freem(cur_tx->xl_mbuf);
cur_tx->xl_mbuf = NULL;
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
sc->xl_cdata.xl_tx_free = cur_tx;
}
if (sc->xl_cdata.xl_tx_head == NULL) {
sc->xl_wdog_timer = 0;
sc->xl_cdata.xl_tx_tail = NULL;
} else {
if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
sc->xl_cdata.xl_tx_head->xl_phys);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
}
}
}
static void
xl_txeof_90xB(struct xl_softc *sc)
{
struct xl_chain *cur_tx = NULL;
if_t ifp = sc->xl_ifp;
int idx;
XL_LOCK_ASSERT(sc);
bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
BUS_DMASYNC_POSTREAD);
idx = sc->xl_cdata.xl_tx_cons;
while (idx != sc->xl_cdata.xl_tx_prod) {
cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
if (!(le32toh(cur_tx->xl_ptr->xl_status) &
XL_TXSTAT_DL_COMPLETE))
break;
if (cur_tx->xl_mbuf != NULL) {
bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
m_freem(cur_tx->xl_mbuf);
cur_tx->xl_mbuf = NULL;
}
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
sc->xl_cdata.xl_tx_cnt--;
XL_INC(idx, XL_TX_LIST_CNT);
}
if (sc->xl_cdata.xl_tx_cnt == 0)
sc->xl_wdog_timer = 0;
sc->xl_cdata.xl_tx_cons = idx;
if (cur_tx != NULL)
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
/*
* TX 'end of channel' interrupt handler. Actually, we should
* only get a 'TX complete' interrupt if there's a transmit error,
* so this is really TX error handler.
*/
static void
xl_txeoc(struct xl_softc *sc)
{
u_int8_t txstat;
XL_LOCK_ASSERT(sc);
while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
if (txstat & XL_TXSTATUS_UNDERRUN ||
txstat & XL_TXSTATUS_JABBER ||
txstat & XL_TXSTATUS_RECLAIM) {
device_printf(sc->xl_dev,
"transmission error: 0x%02x\n", txstat);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
xl_wait(sc);
if (sc->xl_type == XL_TYPE_905B) {
if (sc->xl_cdata.xl_tx_cnt) {
int i;
struct xl_chain *c;
i = sc->xl_cdata.xl_tx_cons;
c = &sc->xl_cdata.xl_tx_chain[i];
CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
c->xl_phys);
CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
sc->xl_wdog_timer = 5;
}
} else {
if (sc->xl_cdata.xl_tx_head != NULL) {
CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
sc->xl_cdata.xl_tx_head->xl_phys);
sc->xl_wdog_timer = 5;
}
}
/*
* Remember to set this for the
* first generation 3c90X chips.
*/
CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
if (txstat & XL_TXSTATUS_UNDERRUN &&
sc->xl_tx_thresh < XL_PACKET_SIZE) {
sc->xl_tx_thresh += XL_MIN_FRAMELEN;
device_printf(sc->xl_dev,
"tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
}
CSR_WRITE_2(sc, XL_COMMAND,
XL_CMD_TX_SET_START|sc->xl_tx_thresh);
if (sc->xl_type == XL_TYPE_905B) {
CSR_WRITE_2(sc, XL_COMMAND,
XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
}
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
} else {
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
}
/*
* Write an arbitrary byte to the TX_STATUS register
* to clear this interrupt/error and advance to the next.
*/
CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
}
}
static void
xl_intr(void *arg)
{
struct xl_softc *sc = arg;
if_t ifp = sc->xl_ifp;
u_int16_t status;
XL_LOCK(sc);
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING) {
XL_UNLOCK(sc);
return;
}
#endif
for (;;) {
status = CSR_READ_2(sc, XL_STATUS);
if ((status & XL_INTRS) == 0 || status == 0xFFFF)
break;
CSR_WRITE_2(sc, XL_COMMAND,
XL_CMD_INTR_ACK|(status & XL_INTRS));
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
break;
if (status & XL_STAT_UP_COMPLETE) {
if (xl_rxeof(sc) == 0) {
while (xl_rx_resync(sc))
xl_rxeof(sc);
}
}
if (status & XL_STAT_DOWN_COMPLETE) {
if (sc->xl_type == XL_TYPE_905B)
xl_txeof_90xB(sc);
else
xl_txeof(sc);
}
if (status & XL_STAT_TX_COMPLETE) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
xl_txeoc(sc);
}
if (status & XL_STAT_ADFAIL) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
xl_init_locked(sc);
break;
}
if (status & XL_STAT_STATSOFLOW)
xl_stats_update(sc);
}
if (!if_sendq_empty(ifp) &&
if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if (sc->xl_type == XL_TYPE_905B)
xl_start_90xB_locked(ifp);
else
xl_start_locked(ifp);
}
XL_UNLOCK(sc);
}
#ifdef DEVICE_POLLING
static int
xl_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct xl_softc *sc = if_getsoftc(ifp);
int rx_npkts = 0;
XL_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rx_npkts = xl_poll_locked(ifp, cmd, count);
XL_UNLOCK(sc);
return (rx_npkts);
}
static int
xl_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
{
struct xl_softc *sc = if_getsoftc(ifp);
int rx_npkts;
XL_LOCK_ASSERT(sc);
sc->rxcycles = count;
rx_npkts = xl_rxeof(sc);
if (sc->xl_type == XL_TYPE_905B)
xl_txeof_90xB(sc);
else
xl_txeof(sc);
if (!if_sendq_empty(ifp)) {
if (sc->xl_type == XL_TYPE_905B)
xl_start_90xB_locked(ifp);
else
xl_start_locked(ifp);
}
if (cmd == POLL_AND_CHECK_STATUS) {
u_int16_t status;
status = CSR_READ_2(sc, XL_STATUS);
if (status & XL_INTRS && status != 0xFFFF) {
CSR_WRITE_2(sc, XL_COMMAND,
XL_CMD_INTR_ACK|(status & XL_INTRS));
if (status & XL_STAT_TX_COMPLETE) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
xl_txeoc(sc);
}
if (status & XL_STAT_ADFAIL) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
xl_init_locked(sc);
}
if (status & XL_STAT_STATSOFLOW)
xl_stats_update(sc);
}
}
return (rx_npkts);
}
#endif /* DEVICE_POLLING */
static void
xl_tick(void *xsc)
{
struct xl_softc *sc = xsc;
struct mii_data *mii;
XL_LOCK_ASSERT(sc);
if (sc->xl_miibus != NULL) {
mii = device_get_softc(sc->xl_miibus);
mii_tick(mii);
}
xl_stats_update(sc);
if (xl_watchdog(sc) == EJUSTRETURN)
return;
callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc);
}
static void
xl_stats_update(struct xl_softc *sc)
{
if_t ifp = sc->xl_ifp;
struct xl_stats xl_stats;
u_int8_t *p;
int i;
XL_LOCK_ASSERT(sc);
bzero((char *)&xl_stats, sizeof(struct xl_stats));
p = (u_int8_t *)&xl_stats;
/* Read all the stats registers. */
XL_SEL_WIN(6);
for (i = 0; i < 16; i++)
*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
if_inc_counter(ifp, IFCOUNTER_IERRORS, xl_stats.xl_rx_overrun);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
xl_stats.xl_tx_multi_collision +
xl_stats.xl_tx_single_collision +
xl_stats.xl_tx_late_collision);
/*
* Boomerang and cyclone chips have an extra stats counter
* in window 4 (BadSSD). We have to read this too in order
* to clear out all the stats registers and avoid a statsoflow
* interrupt.
*/
XL_SEL_WIN(4);
CSR_READ_1(sc, XL_W4_BADSSD);
XL_SEL_WIN(7);
}
/*
* Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
* pointers to the fragment pointers.
*/
static int
xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head)
{
struct mbuf *m_new;
if_t ifp = sc->xl_ifp;
int error, i, nseg, total_len;
u_int32_t status;
XL_LOCK_ASSERT(sc);
error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head,
sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
if (error && error != EFBIG) {
if_printf(ifp, "can't map mbuf (error %d)\n", error);
return (error);
}
/*
* Handle special case: we used up all 63 fragments,
* but we have more mbufs left in the chain. Copy the
* data into an mbuf cluster. Note that we don't
* bother clearing the values in the other fragment
* pointers/counters; it wouldn't gain us anything,
* and would waste cycles.
*/
if (error) {
m_new = m_collapse(*m_head, M_NOWAIT, XL_MAXFRAGS);
if (m_new == NULL) {
m_freem(*m_head);
*m_head = NULL;
return (ENOBUFS);
}
*m_head = m_new;
error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map,
*m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
if (error) {
m_freem(*m_head);
*m_head = NULL;
if_printf(ifp, "can't map mbuf (error %d)\n", error);
return (error);
}
}
KASSERT(nseg <= XL_MAXFRAGS,
("%s: too many DMA segments (%d)", __func__, nseg));
if (nseg == 0) {
m_freem(*m_head);
*m_head = NULL;
return (EIO);
}
bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
total_len = 0;
for (i = 0; i < nseg; i++) {
KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES,
("segment size too large"));
c->xl_ptr->xl_frag[i].xl_addr =
htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr);
c->xl_ptr->xl_frag[i].xl_len =
htole32(sc->xl_cdata.xl_tx_segs[i].ds_len);
total_len += sc->xl_cdata.xl_tx_segs[i].ds_len;
}
c->xl_ptr->xl_frag[nseg - 1].xl_len |= htole32(XL_LAST_FRAG);
if (sc->xl_type == XL_TYPE_905B) {
status = XL_TXSTAT_RND_DEFEAT;
#ifndef XL905B_TXCSUM_BROKEN
if ((*m_head)->m_pkthdr.csum_flags) {
if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
status |= XL_TXSTAT_IPCKSUM;
if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
status |= XL_TXSTAT_TCPCKSUM;
if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
status |= XL_TXSTAT_UDPCKSUM;
}
#endif
} else
status = total_len;
c->xl_ptr->xl_status = htole32(status);
c->xl_ptr->xl_next = 0;
c->xl_mbuf = *m_head;
return (0);
}
/*
* Main transmit routine. To avoid having to do mbuf copies, we put pointers
* to the mbuf data regions directly in the transmit lists. We also save a
* copy of the pointers since the transmit list fragment pointers are
* physical addresses.
*/
static void
xl_start(if_t ifp)
{
struct xl_softc *sc = if_getsoftc(ifp);
XL_LOCK(sc);
if (sc->xl_type == XL_TYPE_905B)
xl_start_90xB_locked(ifp);
else
xl_start_locked(ifp);
XL_UNLOCK(sc);
}
static void
xl_start_locked(if_t ifp)
{
struct xl_softc *sc = if_getsoftc(ifp);
struct mbuf *m_head;
struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
struct xl_chain *prev_tx;
int error;
XL_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
/*
* Check for an available queue slot. If there are none,
* punt.
*/
if (sc->xl_cdata.xl_tx_free == NULL) {
xl_txeoc(sc);
xl_txeof(sc);
if (sc->xl_cdata.xl_tx_free == NULL) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
return;
}
}
start_tx = sc->xl_cdata.xl_tx_free;
for (; !if_sendq_empty(ifp) &&
sc->xl_cdata.xl_tx_free != NULL;) {
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
/* Pick a descriptor off the free list. */
prev_tx = cur_tx;
cur_tx = sc->xl_cdata.xl_tx_free;
/* Pack the data into the descriptor. */
error = xl_encap(sc, cur_tx, &m_head);
if (error) {
cur_tx = prev_tx;
if (m_head == NULL)
break;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m_head);
break;
}
sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
cur_tx->xl_next = NULL;
/* Chain it together. */
if (prev != NULL) {
prev->xl_next = cur_tx;
prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
}
prev = cur_tx;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, cur_tx->xl_mbuf);
}
/*
* If there are no packets queued, bail.
*/
if (cur_tx == NULL)
return;
/*
* Place the request for the upload interrupt
* in the last descriptor in the chain. This way, if
* we're chaining several packets at once, we'll only
* get an interrupt once for the whole chain rather than
* once for each packet.
*/
cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
/*
* Queue the packets. If the TX channel is clear, update
* the downlist pointer register.
*/
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
xl_wait(sc);
if (sc->xl_cdata.xl_tx_head != NULL) {
sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
htole32(start_tx->xl_phys);
sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
htole32(~XL_TXSTAT_DL_INTR);
sc->xl_cdata.xl_tx_tail = cur_tx;
} else {
sc->xl_cdata.xl_tx_head = start_tx;
sc->xl_cdata.xl_tx_tail = cur_tx;
}
bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
BUS_DMASYNC_PREWRITE);
if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
XL_SEL_WIN(7);
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->xl_wdog_timer = 5;
/*
* XXX Under certain conditions, usually on slower machines
* where interrupts may be dropped, it's possible for the
* adapter to chew up all the buffers in the receive ring
* and stall, without us being able to do anything about it.
* To guard against this, we need to make a pass over the
* RX queue to make sure there aren't any packets pending.
* Doing it here means we can flush the receive ring at the
* same time the chip is DMAing the transmit descriptors we
* just gave it.
*
* 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
* nature of their chips in all their marketing literature;
* we may as well take advantage of it. :)
*/
taskqueue_enqueue(taskqueue_swi, &sc->xl_task);
}
static void
xl_start_90xB_locked(if_t ifp)
{
struct xl_softc *sc = if_getsoftc(ifp);
struct mbuf *m_head;
struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
struct xl_chain *prev_tx;
int error, idx;
XL_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
idx = sc->xl_cdata.xl_tx_prod;
start_tx = &sc->xl_cdata.xl_tx_chain[idx];
for (; !if_sendq_empty(ifp) &&
sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) {
if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
prev_tx = cur_tx;
cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
/* Pack the data into the descriptor. */
error = xl_encap(sc, cur_tx, &m_head);
if (error) {
cur_tx = prev_tx;
if (m_head == NULL)
break;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if_sendq_prepend(ifp, m_head);
break;
}
/* Chain it together. */
if (prev != NULL)
prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
prev = cur_tx;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
BPF_MTAP(ifp, cur_tx->xl_mbuf);
XL_INC(idx, XL_TX_LIST_CNT);
sc->xl_cdata.xl_tx_cnt++;
}
/*
* If there are no packets queued, bail.
*/
if (cur_tx == NULL)
return;
/*
* Place the request for the upload interrupt
* in the last descriptor in the chain. This way, if
* we're chaining several packets at once, we'll only
* get an interrupt once for the whole chain rather than
* once for each packet.
*/
cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
/* Start transmission */
sc->xl_cdata.xl_tx_prod = idx;
start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
BUS_DMASYNC_PREWRITE);
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->xl_wdog_timer = 5;
}
static void
xl_init(void *xsc)
{
struct xl_softc *sc = xsc;
XL_LOCK(sc);
xl_init_locked(sc);
XL_UNLOCK(sc);
}
static void
xl_init_locked(struct xl_softc *sc)
{
if_t ifp = sc->xl_ifp;
int error, i;
struct mii_data *mii = NULL;
XL_LOCK_ASSERT(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
xl_stop(sc);
/* Reset the chip to a known state. */
xl_reset(sc);
if (sc->xl_miibus == NULL) {
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
xl_wait(sc);
}
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
xl_wait(sc);
DELAY(10000);
if (sc->xl_miibus != NULL)
mii = device_get_softc(sc->xl_miibus);
/*
* Clear WOL status and disable all WOL feature as WOL
* would interfere Rx operation under normal environments.
*/
if ((sc->xl_flags & XL_FLAG_WOL) != 0) {
XL_SEL_WIN(7);
CSR_READ_2(sc, XL_W7_BM_PME);
CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
}
/* Init our MAC address */
XL_SEL_WIN(2);
for (i = 0; i < ETHER_ADDR_LEN; i++) {
CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
if_getlladdr(sc->xl_ifp)[i]);
}
/* Clear the station mask. */
for (i = 0; i < 3; i++)
CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
#ifdef notdef
/* Reset TX and RX. */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
xl_wait(sc);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
xl_wait(sc);
#endif
/* Init circular RX list. */
error = xl_list_rx_init(sc);
if (error) {
device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n",
error);
xl_stop(sc);
return;
}
/* Init TX descriptors. */
if (sc->xl_type == XL_TYPE_905B)
error = xl_list_tx_init_90xB(sc);
else
error = xl_list_tx_init(sc);
if (error) {
device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n",
error);
xl_stop(sc);
return;
}
/*
* Set the TX freethresh value.
* Note that this has no effect on 3c905B "cyclone"
* cards but is required for 3c900/3c905 "boomerang"
* cards in order to enable the download engine.
*/
CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
/* Set the TX start threshold for best performance. */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
/*
* If this is a 3c905B, also set the tx reclaim threshold.
* This helps cut down on the number of tx reclaim errors
* that could happen on a busy network. The chip multiplies
* the register value by 16 to obtain the actual threshold
* in bytes, so we divide by 16 when setting the value here.
* The existing threshold value can be examined by reading
* the register at offset 9 in window 5.
*/
if (sc->xl_type == XL_TYPE_905B) {
CSR_WRITE_2(sc, XL_COMMAND,
XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
}
/* Set RX filter bits. */
xl_rxfilter(sc);
/*
* Load the address of the RX list. We have to
* stall the upload engine before we can manipulate
* the uplist pointer register, then unstall it when
* we're finished. We also have to wait for the
* stall command to complete before proceeding.
* Note that we have to do this after any RX resets
* have completed since the uplist register is cleared
* by a reset.
*/
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
xl_wait(sc);
CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
xl_wait(sc);
if (sc->xl_type == XL_TYPE_905B) {
/* Set polling interval */
CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
/* Load the address of the TX list */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
xl_wait(sc);
CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
sc->xl_cdata.xl_tx_chain[0].xl_phys);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
xl_wait(sc);
}
/*
* If the coax transceiver is on, make sure to enable
* the DC-DC converter.
*/
XL_SEL_WIN(3);
if (sc->xl_xcvr == XL_XCVR_COAX)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
else
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
/*
* increase packet size to allow reception of 802.1q or ISL packets.
* For the 3c90x chip, set the 'allow large packets' bit in the MAC
* control register. For 3c90xB/C chips, use the RX packet size
* register.
*/
if (sc->xl_type == XL_TYPE_905B)
CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
else {
u_int8_t macctl;
macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
}
/* Clear out the stats counters. */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
xl_stats_update(sc);
XL_SEL_WIN(4);
CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
/*
* Enable interrupts.
*/
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
if (if_getcapenable(ifp) & IFCAP_POLLING)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
else
#endif
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
if (sc->xl_flags & XL_FLAG_FUNCREG)
bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
/* Set the RX early threshold */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
/* Enable receiver and transmitter. */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
xl_wait(sc);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
xl_wait(sc);
/* XXX Downcall to miibus. */
if (mii != NULL)
mii_mediachg(mii);
/* Select window 7 for normal operations. */
XL_SEL_WIN(7);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
sc->xl_wdog_timer = 0;
callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc);
}
/*
* Set media options.
*/
static int
xl_ifmedia_upd(if_t ifp)
{
struct xl_softc *sc = if_getsoftc(ifp);
struct ifmedia *ifm = NULL;
struct mii_data *mii = NULL;
XL_LOCK(sc);
if (sc->xl_miibus != NULL)
mii = device_get_softc(sc->xl_miibus);
if (mii == NULL)
ifm = &sc->ifmedia;
else
ifm = &mii->mii_media;
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_100_FX:
case IFM_10_FL:
case IFM_10_2:
case IFM_10_5:
xl_setmode(sc, ifm->ifm_media);
XL_UNLOCK(sc);
return (0);
}
if (sc->xl_media & XL_MEDIAOPT_MII ||
sc->xl_media & XL_MEDIAOPT_BTX ||
sc->xl_media & XL_MEDIAOPT_BT4) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
xl_init_locked(sc);
} else {
xl_setmode(sc, ifm->ifm_media);
}
XL_UNLOCK(sc);
return (0);
}
/*
* Report current media status.
*/
static void
xl_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct xl_softc *sc = if_getsoftc(ifp);
u_int32_t icfg;
u_int16_t status = 0;
struct mii_data *mii = NULL;
XL_LOCK(sc);
if (sc->xl_miibus != NULL)
mii = device_get_softc(sc->xl_miibus);
XL_SEL_WIN(4);
status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
XL_SEL_WIN(3);
icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
icfg >>= XL_ICFG_CONNECTOR_BITS;
ifmr->ifm_active = IFM_ETHER;
ifmr->ifm_status = IFM_AVALID;
if ((status & XL_MEDIASTAT_CARRIER) == 0)
ifmr->ifm_status |= IFM_ACTIVE;
switch (icfg) {
case XL_XCVR_10BT:
ifmr->ifm_active = IFM_ETHER|IFM_10_T;
if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
break;
case XL_XCVR_AUI:
if (sc->xl_type == XL_TYPE_905B &&
sc->xl_media == XL_MEDIAOPT_10FL) {
ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
} else
ifmr->ifm_active = IFM_ETHER|IFM_10_5;
break;
case XL_XCVR_COAX:
ifmr->ifm_active = IFM_ETHER|IFM_10_2;
break;
/*
* XXX MII and BTX/AUTO should be separate cases.
*/
case XL_XCVR_100BTX:
case XL_XCVR_AUTO:
case XL_XCVR_MII:
if (mii != NULL) {
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
break;
case XL_XCVR_100BFX:
ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
break;
default:
if_printf(ifp, "unknown XCVR type: %d\n", icfg);
break;
}
XL_UNLOCK(sc);
}
static int
xl_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct xl_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
int error = 0, mask;
struct mii_data *mii = NULL;
switch (command) {
case SIOCSIFFLAGS:
XL_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
(if_getflags(ifp) ^ sc->xl_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
xl_rxfilter(sc);
else
xl_init_locked(sc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
xl_stop(sc);
}
sc->xl_if_flags = if_getflags(ifp);
XL_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/* XXX Downcall from if_addmulti() possibly with locks held. */
XL_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
xl_rxfilter(sc);
XL_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
if (sc->xl_miibus != NULL)
mii = device_get_softc(sc->xl_miibus);
if (mii == NULL)
error = ifmedia_ioctl(ifp, ifr,
&sc->ifmedia, command);
else
error = ifmedia_ioctl(ifp, ifr,
&mii->mii_media, command);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if ((mask & IFCAP_POLLING) != 0 &&
(if_getcapabilities(ifp) & IFCAP_POLLING) != 0) {
if_togglecapenable(ifp, IFCAP_POLLING);
if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0) {
error = ether_poll_register(xl_poll, ifp);
if (error)
break;
XL_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
XL_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
/* Enable interrupts. */
XL_LOCK(sc);
CSR_WRITE_2(sc, XL_COMMAND,
XL_CMD_INTR_ACK | 0xFF);
CSR_WRITE_2(sc, XL_COMMAND,
XL_CMD_INTR_ENB | XL_INTRS);
if (sc->xl_flags & XL_FLAG_FUNCREG)
bus_space_write_4(sc->xl_ftag,
sc->xl_fhandle, 4, 0x8000);
XL_UNLOCK(sc);
}
}
#endif /* DEVICE_POLLING */
XL_LOCK(sc);
if ((mask & IFCAP_TXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, XL905B_CSUM_FEATURES, 0);
else
if_sethwassistbits(ifp, 0, XL905B_CSUM_FEATURES);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
(if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
XL_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static int
xl_watchdog(struct xl_softc *sc)
{
if_t ifp = sc->xl_ifp;
u_int16_t status = 0;
int misintr;
XL_LOCK_ASSERT(sc);
if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
return (0);
xl_rxeof(sc);
xl_txeoc(sc);
misintr = 0;
if (sc->xl_type == XL_TYPE_905B) {
xl_txeof_90xB(sc);
if (sc->xl_cdata.xl_tx_cnt == 0)
misintr++;
} else {
xl_txeof(sc);
if (sc->xl_cdata.xl_tx_head == NULL)
misintr++;
}
if (misintr != 0) {
device_printf(sc->xl_dev,
"watchdog timeout (missed Tx interrupts) -- recovering\n");
return (0);
}
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
XL_SEL_WIN(4);
status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
device_printf(sc->xl_dev, "watchdog timeout\n");
if (status & XL_MEDIASTAT_CARRIER)
device_printf(sc->xl_dev,
"no carrier - transceiver cable problem?\n");
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
xl_init_locked(sc);
if (!if_sendq_empty(ifp)) {
if (sc->xl_type == XL_TYPE_905B)
xl_start_90xB_locked(ifp);
else
xl_start_locked(ifp);
}
return (EJUSTRETURN);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
xl_stop(struct xl_softc *sc)
{
int i;
if_t ifp = sc->xl_ifp;
XL_LOCK_ASSERT(sc);
sc->xl_wdog_timer = 0;
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
xl_wait(sc);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
DELAY(800);
#ifdef foo
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
xl_wait(sc);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
xl_wait(sc);
#endif
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
if (sc->xl_flags & XL_FLAG_FUNCREG)
bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
/* Stop the stats updater. */
callout_stop(&sc->xl_tick_callout);
/*
* Free data in the RX lists.
*/
for (i = 0; i < XL_RX_LIST_CNT; i++) {
if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
bus_dmamap_unload(sc->xl_mtag,
sc->xl_cdata.xl_rx_chain[i].xl_map);
bus_dmamap_destroy(sc->xl_mtag,
sc->xl_cdata.xl_rx_chain[i].xl_map);
m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
}
}
if (sc->xl_ldata.xl_rx_list != NULL)
bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
/*
* Free the TX list buffers.
*/
for (i = 0; i < XL_TX_LIST_CNT; i++) {
if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
bus_dmamap_unload(sc->xl_mtag,
sc->xl_cdata.xl_tx_chain[i].xl_map);
bus_dmamap_destroy(sc->xl_mtag,
sc->xl_cdata.xl_tx_chain[i].xl_map);
m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
}
}
if (sc->xl_ldata.xl_tx_list != NULL)
bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static int
xl_shutdown(device_t dev)
{
return (xl_suspend(dev));
}
static int
xl_suspend(device_t dev)
{
struct xl_softc *sc;
sc = device_get_softc(dev);
XL_LOCK(sc);
xl_stop(sc);
xl_setwol(sc);
XL_UNLOCK(sc);
return (0);
}
static int
xl_resume(device_t dev)
{
struct xl_softc *sc;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->xl_ifp;
XL_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
xl_init_locked(sc);
}
XL_UNLOCK(sc);
return (0);
}
static void
xl_setwol(struct xl_softc *sc)
{
if_t ifp;
u_int16_t cfg, pmstat;
if ((sc->xl_flags & XL_FLAG_WOL) == 0)
return;
ifp = sc->xl_ifp;
XL_SEL_WIN(7);
/* Clear any pending PME events. */
CSR_READ_2(sc, XL_W7_BM_PME);
cfg = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
cfg |= XL_BM_PME_MAGIC;
CSR_WRITE_2(sc, XL_W7_BM_PME, cfg);
/* Enable RX. */
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
/* Request PME. */
pmstat = pci_read_config(sc->xl_dev,
sc->xl_pmcap + PCIR_POWER_STATUS, 2);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
pmstat |= PCIM_PSTAT_PMEENABLE;
else
pmstat &= ~PCIM_PSTAT_PMEENABLE;
pci_write_config(sc->xl_dev,
sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2);
}
diff --git a/sys/net/if_bridge.c b/sys/net/if_bridge.c
index 1e6f9b578ee3..723b92d3dcd5 100644
--- a/sys/net/if_bridge.c
+++ b/sys/net/if_bridge.c
@@ -1,3926 +1,3922 @@
/* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
*/
/*
* Network interface bridge support.
*
* TODO:
*
* - Currently only supports Ethernet-like interfaces (Ethernet,
* 802.11, VLANs on Ethernet, etc.) Figure out a nice way
* to bridge other types of interfaces (maybe consider
* heterogeneous bridges).
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/protosw.h>
#include <sys/systm.h>
#include <sys/jail.h>
#include <sys/time.h>
#include <sys/socket.h> /* for net/if.h */
#include <sys/sockio.h>
#include <sys/ctype.h> /* string functions */
#include <sys/kernel.h>
#include <sys/random.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
#include <vm/uma.h>
#include <sys/module.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_clone.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/pfil.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet6/in6_ifattach.h>
#endif
#if defined(INET) || defined(INET6)
#include <netinet/ip_carp.h>
#endif
#include <machine/in_cksum.h>
#include <netinet/if_ether.h>
#include <net/bridgestp.h>
#include <net/if_bridgevar.h>
#include <net/if_llc.h>
#include <net/if_vlan_var.h>
#include <net/route.h>
/*
* At various points in the code we need to know if we're hooked into the INET
* and/or INET6 pfil. Define some macros to do that based on which IP versions
* are enabled in the kernel. This avoids littering the rest of the code with
* #ifnet INET6 to avoid referencing V_inet6_pfil_head.
*/
#ifdef INET6
#define PFIL_HOOKED_IN_INET6 PFIL_HOOKED_IN(V_inet6_pfil_head)
#define PFIL_HOOKED_OUT_INET6 PFIL_HOOKED_OUT(V_inet6_pfil_head)
#else
#define PFIL_HOOKED_IN_INET6 false
#define PFIL_HOOKED_OUT_INET6 false
#endif
#ifdef INET
#define PFIL_HOOKED_IN_INET PFIL_HOOKED_IN(V_inet_pfil_head)
#define PFIL_HOOKED_OUT_INET PFIL_HOOKED_OUT(V_inet_pfil_head)
#else
#define PFIL_HOOKED_IN_INET false
#define PFIL_HOOKED_OUT_INET false
#endif
#define PFIL_HOOKED_IN_46 (PFIL_HOOKED_IN_INET6 || PFIL_HOOKED_IN_INET)
#define PFIL_HOOKED_OUT_46 (PFIL_HOOKED_OUT_INET6 || PFIL_HOOKED_OUT_INET)
/*
* Size of the route hash table. Must be a power of two.
*/
#ifndef BRIDGE_RTHASH_SIZE
#define BRIDGE_RTHASH_SIZE 1024
#endif
#define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
/*
* Default maximum number of addresses to cache.
*/
#ifndef BRIDGE_RTABLE_MAX
#define BRIDGE_RTABLE_MAX 2000
#endif
/*
* Timeout (in seconds) for entries learned dynamically.
*/
#ifndef BRIDGE_RTABLE_TIMEOUT
#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
#endif
/*
* Number of seconds between walks of the route list.
*/
#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
#endif
/*
* List of capabilities to possibly mask on the member interface.
*/
#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
IFCAP_TXCSUM_IPV6)
/*
* List of capabilities to strip
*/
#define BRIDGE_IFCAPS_STRIP IFCAP_LRO
/*
* Bridge locking
*
* The bridge relies heavily on the epoch(9) system to protect its data
* structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
* must ensure there is only one writer at a time.
*
* That is: for read accesses we only need to be in NET_EPOCH, but for write
* accesses we must hold:
*
* - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
* - BRIDGE_LOCK, for any other change
*
* The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
* calls to bridge member interfaces and these ioctl()s can sleep.
* The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
* required while we're in NET_EPOCH and then we're not allowed to sleep.
*/
#define BRIDGE_LOCK_INIT(_sc) do { \
sx_init(&(_sc)->sc_sx, "if_bridge"); \
mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF); \
} while (0)
#define BRIDGE_LOCK_DESTROY(_sc) do { \
sx_destroy(&(_sc)->sc_sx); \
mtx_destroy(&(_sc)->sc_rt_mtx); \
} while (0)
#define BRIDGE_LOCK(_sc) sx_xlock(&(_sc)->sc_sx)
#define BRIDGE_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
#define BRIDGE_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
#define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
#define BRIDGE_UNLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
#define BRIDGE_RT_LOCK(_sc) mtx_lock(&(_sc)->sc_rt_mtx)
#define BRIDGE_RT_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rt_mtx)
#define BRIDGE_RT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
#define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
/*
* Bridge interface list entry.
*/
struct bridge_iflist {
CK_LIST_ENTRY(bridge_iflist) bif_next;
struct ifnet *bif_ifp; /* member if */
struct bstp_port bif_stp; /* STP state */
uint32_t bif_flags; /* member if flags */
int bif_savedcaps; /* saved capabilities */
uint32_t bif_addrmax; /* max # of addresses */
uint32_t bif_addrcnt; /* cur. # of addresses */
uint32_t bif_addrexceeded;/* # of address violations */
struct epoch_context bif_epoch_ctx;
};
/*
* Bridge route node.
*/
struct bridge_rtnode {
CK_LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
CK_LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
struct bridge_iflist *brt_dst; /* destination if */
unsigned long brt_expire; /* expiration time */
uint8_t brt_flags; /* address flags */
uint8_t brt_addr[ETHER_ADDR_LEN];
uint16_t brt_vlan; /* vlan id */
struct vnet *brt_vnet;
struct epoch_context brt_epoch_ctx;
};
#define brt_ifp brt_dst->bif_ifp
/*
* Software state for each bridge.
*/
struct bridge_softc {
struct ifnet *sc_ifp; /* make this an interface */
LIST_ENTRY(bridge_softc) sc_list;
struct sx sc_sx;
struct mtx sc_rt_mtx;
uint32_t sc_brtmax; /* max # of addresses */
uint32_t sc_brtcnt; /* cur. # of addresses */
uint32_t sc_brttimeout; /* rt timeout in seconds */
struct callout sc_brcallout; /* bridge callout */
CK_LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
CK_LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
CK_LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
uint32_t sc_rthash_key; /* key for hash */
CK_LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
struct bstp_state sc_stp; /* STP state */
uint32_t sc_brtexceeded; /* # of cache drops */
struct ifnet *sc_ifaddr; /* member mac copied from */
struct ether_addr sc_defaddr; /* Default MAC address */
if_input_fn_t sc_if_input; /* Saved copy of if_input */
struct epoch_context sc_epoch_ctx;
};
VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
#define V_bridge_list_sx VNET(bridge_list_sx)
static eventhandler_tag bridge_detach_cookie;
int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
#define V_bridge_rtnode_zone VNET(bridge_rtnode_zone)
static int bridge_clone_create(struct if_clone *, char *, size_t,
struct ifc_data *, struct ifnet **);
static int bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
static int bridge_ioctl(struct ifnet *, u_long, caddr_t);
static void bridge_mutecaps(struct bridge_softc *);
static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
int);
static void bridge_ifdetach(void *arg __unused, struct ifnet *);
static void bridge_init(void *);
static void bridge_dummynet(struct mbuf *, struct ifnet *);
static void bridge_stop(struct ifnet *, int);
static int bridge_transmit(struct ifnet *, struct mbuf *);
#ifdef ALTQ
static void bridge_altq_start(if_t);
static int bridge_altq_transmit(if_t, struct mbuf *);
#endif
static void bridge_qflush(struct ifnet *);
static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
static void bridge_inject(struct ifnet *, struct mbuf *);
static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
struct rtentry *);
static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
struct mbuf *);
static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
struct mbuf *m);
static void bridge_timer(void *);
static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
struct mbuf *, int);
static void bridge_span(struct bridge_softc *, struct mbuf *);
static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
uint16_t, struct bridge_iflist *, int, uint8_t);
static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
uint16_t);
static void bridge_rttrim(struct bridge_softc *);
static void bridge_rtage(struct bridge_softc *);
static void bridge_rtflush(struct bridge_softc *, int);
static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
uint16_t);
static void bridge_rtable_init(struct bridge_softc *);
static void bridge_rtable_fini(struct bridge_softc *);
static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
const uint8_t *, uint16_t);
static int bridge_rtnode_insert(struct bridge_softc *,
struct bridge_rtnode *);
static void bridge_rtnode_destroy(struct bridge_softc *,
struct bridge_rtnode *);
static void bridge_rtable_expire(struct ifnet *, int);
static void bridge_state_change(struct ifnet *, int);
static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
const char *name);
static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
struct ifnet *ifp);
static void bridge_delete_member(struct bridge_softc *,
struct bridge_iflist *, int);
static void bridge_delete_span(struct bridge_softc *,
struct bridge_iflist *);
static int bridge_ioctl_add(struct bridge_softc *, void *);
static int bridge_ioctl_del(struct bridge_softc *, void *);
static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
static int bridge_ioctl_scache(struct bridge_softc *, void *);
static int bridge_ioctl_gcache(struct bridge_softc *, void *);
static int bridge_ioctl_gifs(struct bridge_softc *, void *);
static int bridge_ioctl_rts(struct bridge_softc *, void *);
static int bridge_ioctl_saddr(struct bridge_softc *, void *);
static int bridge_ioctl_sto(struct bridge_softc *, void *);
static int bridge_ioctl_gto(struct bridge_softc *, void *);
static int bridge_ioctl_daddr(struct bridge_softc *, void *);
static int bridge_ioctl_flush(struct bridge_softc *, void *);
static int bridge_ioctl_gpri(struct bridge_softc *, void *);
static int bridge_ioctl_spri(struct bridge_softc *, void *);
static int bridge_ioctl_ght(struct bridge_softc *, void *);
static int bridge_ioctl_sht(struct bridge_softc *, void *);
static int bridge_ioctl_gfd(struct bridge_softc *, void *);
static int bridge_ioctl_sfd(struct bridge_softc *, void *);
static int bridge_ioctl_gma(struct bridge_softc *, void *);
static int bridge_ioctl_sma(struct bridge_softc *, void *);
static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
static int bridge_ioctl_addspan(struct bridge_softc *, void *);
static int bridge_ioctl_delspan(struct bridge_softc *, void *);
static int bridge_ioctl_gbparam(struct bridge_softc *, void *);
static int bridge_ioctl_grte(struct bridge_softc *, void *);
static int bridge_ioctl_gifsstp(struct bridge_softc *, void *);
static int bridge_ioctl_sproto(struct bridge_softc *, void *);
static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
int);
#ifdef INET
static int bridge_ip_checkbasic(struct mbuf **mp);
static int bridge_fragment(struct ifnet *, struct mbuf **mp,
struct ether_header *, int, struct llc *);
#endif /* INET */
#ifdef INET6
static int bridge_ip6_checkbasic(struct mbuf **mp);
#endif /* INET6 */
static void bridge_linkstate(struct ifnet *ifp);
static void bridge_linkcheck(struct bridge_softc *sc);
/*
* Use the "null" value from IEEE 802.1Q-2014 Table 9-2
* to indicate untagged frames.
*/
#define VLANTAGOF(_m) \
(_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : DOT1Q_VID_NULL
static struct bstp_cb_ops bridge_ops = {
.bcb_state = bridge_state_change,
.bcb_rtage = bridge_rtable_expire
};
SYSCTL_DECL(_net_link);
static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Bridge");
/* only pass IP[46] packets when pfil is enabled */
VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
#define V_pfil_onlyip VNET(pfil_onlyip)
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
"Only pass IP packets when pfil is enabled");
/* run pfil hooks on the bridge interface */
VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
#define V_pfil_bridge VNET(pfil_bridge)
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
"Packet filter on the bridge interface");
/* layer2 filter with ipfw */
VNET_DEFINE_STATIC(int, pfil_ipfw);
#define V_pfil_ipfw VNET(pfil_ipfw)
/* layer2 ARP filter with ipfw */
VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
#define V_pfil_ipfw_arp VNET(pfil_ipfw_arp)
SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
"Filter ARP packets through IPFW layer2");
/* run pfil hooks on the member interface */
VNET_DEFINE_STATIC(int, pfil_member) = 0;
#define V_pfil_member VNET(pfil_member)
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
"Packet filter on the member interface");
/* run pfil hooks on the physical interface for locally destined packets */
VNET_DEFINE_STATIC(int, pfil_local_phys);
#define V_pfil_local_phys VNET(pfil_local_phys)
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
"Packet filter on the physical interface for locally destined packets");
/* log STP state changes */
VNET_DEFINE_STATIC(int, log_stp);
#define V_log_stp VNET(log_stp)
SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
"Log STP state changes");
/* share MAC with first bridge member */
VNET_DEFINE_STATIC(int, bridge_inherit_mac);
#define V_bridge_inherit_mac VNET(bridge_inherit_mac)
SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
"Inherit MAC address from the first bridge member");
VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
#define V_allow_llz_overlap VNET(allow_llz_overlap)
SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
"Allow overlap of link-local scope "
"zones of a bridge interface and the member interfaces");
/* log MAC address port flapping */
VNET_DEFINE_STATIC(bool, log_mac_flap) = true;
#define V_log_mac_flap VNET(log_mac_flap)
SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(log_mac_flap), true,
"Log MAC address port flapping");
VNET_DEFINE_STATIC(int, log_interval) = 5;
VNET_DEFINE_STATIC(int, log_count) = 0;
VNET_DEFINE_STATIC(struct timeval, log_last) = { 0 };
#define V_log_interval VNET(log_interval)
#define V_log_count VNET(log_count)
#define V_log_last VNET(log_last)
struct bridge_control {
int (*bc_func)(struct bridge_softc *, void *);
int bc_argsize;
int bc_flags;
};
#define BC_F_COPYIN 0x01 /* copy arguments in */
#define BC_F_COPYOUT 0x02 /* copy arguments out */
#define BC_F_SUSER 0x04 /* do super-user check */
static const struct bridge_control bridge_control_table[] = {
{ bridge_ioctl_add, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_del, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_gifflags, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_COPYOUT },
{ bridge_ioctl_sifflags, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_scache, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_gcache, sizeof(struct ifbrparam),
BC_F_COPYOUT },
{ bridge_ioctl_gifs, sizeof(struct ifbifconf),
BC_F_COPYIN|BC_F_COPYOUT },
{ bridge_ioctl_rts, sizeof(struct ifbaconf),
BC_F_COPYIN|BC_F_COPYOUT },
{ bridge_ioctl_saddr, sizeof(struct ifbareq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_sto, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_gto, sizeof(struct ifbrparam),
BC_F_COPYOUT },
{ bridge_ioctl_daddr, sizeof(struct ifbareq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_flush, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_gpri, sizeof(struct ifbrparam),
BC_F_COPYOUT },
{ bridge_ioctl_spri, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_ght, sizeof(struct ifbrparam),
BC_F_COPYOUT },
{ bridge_ioctl_sht, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_gfd, sizeof(struct ifbrparam),
BC_F_COPYOUT },
{ bridge_ioctl_sfd, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_gma, sizeof(struct ifbrparam),
BC_F_COPYOUT },
{ bridge_ioctl_sma, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_sifprio, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_sifcost, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_addspan, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_delspan, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_gbparam, sizeof(struct ifbropreq),
BC_F_COPYOUT },
{ bridge_ioctl_grte, sizeof(struct ifbrparam),
BC_F_COPYOUT },
{ bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf),
BC_F_COPYIN|BC_F_COPYOUT },
{ bridge_ioctl_sproto, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_stxhc, sizeof(struct ifbrparam),
BC_F_COPYIN|BC_F_SUSER },
{ bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
BC_F_COPYIN|BC_F_SUSER },
};
static const int bridge_control_table_size = nitems(bridge_control_table);
VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list);
#define V_bridge_list VNET(bridge_list)
#define BRIDGE_LIST_LOCK_INIT(x) sx_init(&V_bridge_list_sx, \
"if_bridge list")
#define BRIDGE_LIST_LOCK_DESTROY(x) sx_destroy(&V_bridge_list_sx)
#define BRIDGE_LIST_LOCK(x) sx_xlock(&V_bridge_list_sx)
#define BRIDGE_LIST_UNLOCK(x) sx_xunlock(&V_bridge_list_sx)
VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
#define V_bridge_cloner VNET(bridge_cloner)
static const char bridge_name[] = "bridge";
static void
vnet_bridge_init(const void *unused __unused)
{
V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
BRIDGE_LIST_LOCK_INIT();
LIST_INIT(&V_bridge_list);
struct if_clone_addreq req = {
.create_f = bridge_clone_create,
.destroy_f = bridge_clone_destroy,
.flags = IFC_F_AUTOUNIT,
};
V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
}
VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
vnet_bridge_init, NULL);
static void
vnet_bridge_uninit(const void *unused __unused)
{
ifc_detach_cloner(V_bridge_cloner);
V_bridge_cloner = NULL;
BRIDGE_LIST_LOCK_DESTROY();
/* Callbacks may use the UMA zone. */
NET_EPOCH_DRAIN_CALLBACKS();
uma_zdestroy(V_bridge_rtnode_zone);
}
VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
vnet_bridge_uninit, NULL);
static int
bridge_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
bridge_dn_p = bridge_dummynet;
bridge_detach_cookie = EVENTHANDLER_REGISTER(
ifnet_departure_event, bridge_ifdetach, NULL,
EVENTHANDLER_PRI_ANY);
break;
case MOD_UNLOAD:
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
bridge_detach_cookie);
bridge_dn_p = NULL;
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t bridge_mod = {
"if_bridge",
bridge_modevent,
0
};
DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_bridge, 1);
MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
/*
* handler for net.link.bridge.ipfw
*/
static int
sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
{
int enable = V_pfil_ipfw;
int error;
error = sysctl_handle_int(oidp, &enable, 0, req);
enable &= 1;
if (enable != V_pfil_ipfw) {
V_pfil_ipfw = enable;
/*
* Disable pfil so that ipfw doesnt run twice, if the user
* really wants both then they can re-enable pfil_bridge and/or
* pfil_member. Also allow non-ip packets as ipfw can filter by
* layer2 type.
*/
if (V_pfil_ipfw) {
V_pfil_onlyip = 0;
V_pfil_bridge = 0;
V_pfil_member = 0;
}
}
return (error);
}
SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
&VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
"Layer2 filter with IPFW");
#ifdef VIMAGE
static void
bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
{
struct bridge_softc *sc = ifp->if_softc;
struct bridge_iflist *bif;
BRIDGE_LOCK(sc);
while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
bridge_delete_member(sc, bif, 0);
while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
bridge_delete_span(sc, bif);
}
BRIDGE_UNLOCK(sc);
ether_reassign(ifp, newvnet, arg);
}
#endif
/*
* bridge_clone_create:
*
* Create a new bridge instance.
*/
static int
bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
struct bridge_softc *sc;
struct ifnet *ifp;
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- free(sc, M_DEVBUF);
- return (ENOSPC);
- }
BRIDGE_LOCK_INIT(sc);
sc->sc_brtmax = BRIDGE_RTABLE_MAX;
sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
/* Initialize our routing table. */
bridge_rtable_init(sc);
callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
CK_LIST_INIT(&sc->sc_iflist);
CK_LIST_INIT(&sc->sc_spanlist);
ifp->if_softc = sc;
if_initname(ifp, bridge_name, ifd->unit);
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = bridge_ioctl;
#ifdef ALTQ
ifp->if_start = bridge_altq_start;
ifp->if_transmit = bridge_altq_transmit;
IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
ifp->if_snd.ifq_drv_maxlen = 0;
IFQ_SET_READY(&ifp->if_snd);
#else
ifp->if_transmit = bridge_transmit;
#endif
ifp->if_qflush = bridge_qflush;
ifp->if_init = bridge_init;
ifp->if_type = IFT_BRIDGE;
ether_gen_addr(ifp, &sc->sc_defaddr);
bstp_attach(&sc->sc_stp, &bridge_ops);
ether_ifattach(ifp, sc->sc_defaddr.octet);
/* Now undo some of the damage... */
ifp->if_baudrate = 0;
ifp->if_type = IFT_BRIDGE;
#ifdef VIMAGE
ifp->if_reassign = bridge_reassign;
#endif
sc->sc_if_input = ifp->if_input; /* ether_input */
ifp->if_input = bridge_inject;
/*
* Allow BRIDGE_INPUT() to pass in packets originating from the bridge
* itself via bridge_inject(). This is required for netmap but
* otherwise has no effect.
*/
ifp->if_bridge_input = bridge_input;
BRIDGE_LIST_LOCK();
LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
BRIDGE_LIST_UNLOCK();
*ifpp = ifp;
return (0);
}
static void
bridge_clone_destroy_cb(struct epoch_context *ctx)
{
struct bridge_softc *sc;
sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
BRIDGE_LOCK_DESTROY(sc);
free(sc, M_DEVBUF);
}
/*
* bridge_clone_destroy:
*
* Destroy a bridge instance.
*/
static int
bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
struct bridge_softc *sc = ifp->if_softc;
struct bridge_iflist *bif;
struct epoch_tracker et;
BRIDGE_LOCK(sc);
bridge_stop(ifp, 1);
ifp->if_flags &= ~IFF_UP;
while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
bridge_delete_member(sc, bif, 0);
while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
bridge_delete_span(sc, bif);
}
/* Tear down the routing table. */
bridge_rtable_fini(sc);
BRIDGE_UNLOCK(sc);
NET_EPOCH_ENTER(et);
callout_drain(&sc->sc_brcallout);
BRIDGE_LIST_LOCK();
LIST_REMOVE(sc, sc_list);
BRIDGE_LIST_UNLOCK();
bstp_detach(&sc->sc_stp);
#ifdef ALTQ
IFQ_PURGE(&ifp->if_snd);
#endif
NET_EPOCH_EXIT(et);
ether_ifdetach(ifp);
if_free(ifp);
NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
return (0);
}
/*
* bridge_ioctl:
*
* Handle a control request from the operator.
*/
static int
bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct bridge_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
struct bridge_iflist *bif;
struct thread *td = curthread;
union {
struct ifbreq ifbreq;
struct ifbifconf ifbifconf;
struct ifbareq ifbareq;
struct ifbaconf ifbaconf;
struct ifbrparam ifbrparam;
struct ifbropreq ifbropreq;
} args;
struct ifdrv *ifd = (struct ifdrv *) data;
const struct bridge_control *bc;
int error = 0, oldmtu;
BRIDGE_LOCK(sc);
switch (cmd) {
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCGDRVSPEC:
case SIOCSDRVSPEC:
if (ifd->ifd_cmd >= bridge_control_table_size) {
error = EINVAL;
break;
}
bc = &bridge_control_table[ifd->ifd_cmd];
if (cmd == SIOCGDRVSPEC &&
(bc->bc_flags & BC_F_COPYOUT) == 0) {
error = EINVAL;
break;
}
else if (cmd == SIOCSDRVSPEC &&
(bc->bc_flags & BC_F_COPYOUT) != 0) {
error = EINVAL;
break;
}
if (bc->bc_flags & BC_F_SUSER) {
error = priv_check(td, PRIV_NET_BRIDGE);
if (error)
break;
}
if (ifd->ifd_len != bc->bc_argsize ||
ifd->ifd_len > sizeof(args)) {
error = EINVAL;
break;
}
bzero(&args, sizeof(args));
if (bc->bc_flags & BC_F_COPYIN) {
error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
if (error)
break;
}
oldmtu = ifp->if_mtu;
error = (*bc->bc_func)(sc, &args);
if (error)
break;
/*
* Bridge MTU may change during addition of the first port.
* If it did, do network layer specific procedure.
*/
if (ifp->if_mtu != oldmtu)
if_notifymtu(ifp);
if (bc->bc_flags & BC_F_COPYOUT)
error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
break;
case SIOCSIFFLAGS:
if (!(ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
/*
* If interface is marked down and it is running,
* then stop and disable it.
*/
bridge_stop(ifp, 1);
} else if ((ifp->if_flags & IFF_UP) &&
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
/*
* If interface is marked up and it is stopped, then
* start it.
*/
BRIDGE_UNLOCK(sc);
(*ifp->if_init)(sc);
BRIDGE_LOCK(sc);
}
break;
case SIOCSIFMTU:
oldmtu = sc->sc_ifp->if_mtu;
if (ifr->ifr_mtu < IF_MINMTU) {
error = EINVAL;
break;
}
if (CK_LIST_EMPTY(&sc->sc_iflist)) {
sc->sc_ifp->if_mtu = ifr->ifr_mtu;
break;
}
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
SIOCSIFMTU, (caddr_t)ifr);
if (error != 0) {
log(LOG_NOTICE, "%s: invalid MTU: %u for"
" member %s\n", sc->sc_ifp->if_xname,
ifr->ifr_mtu,
bif->bif_ifp->if_xname);
error = EINVAL;
break;
}
}
if (error) {
/* Restore the previous MTU on all member interfaces. */
ifr->ifr_mtu = oldmtu;
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
(*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
SIOCSIFMTU, (caddr_t)ifr);
}
} else {
sc->sc_ifp->if_mtu = ifr->ifr_mtu;
}
break;
default:
/*
* drop the lock as ether_ioctl() will call bridge_start() and
* cause the lock to be recursed.
*/
BRIDGE_UNLOCK(sc);
error = ether_ioctl(ifp, cmd, data);
BRIDGE_LOCK(sc);
break;
}
BRIDGE_UNLOCK(sc);
return (error);
}
/*
* bridge_mutecaps:
*
* Clear or restore unwanted capabilities on the member interface
*/
static void
bridge_mutecaps(struct bridge_softc *sc)
{
struct bridge_iflist *bif;
int enabled, mask;
BRIDGE_LOCK_ASSERT(sc);
/* Initial bitmask of capabilities to test */
mask = BRIDGE_IFCAPS_MASK;
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
/* Every member must support it or its disabled */
mask &= bif->bif_savedcaps;
}
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
enabled = bif->bif_ifp->if_capenable;
enabled &= ~BRIDGE_IFCAPS_STRIP;
/* strip off mask bits and enable them again if allowed */
enabled &= ~BRIDGE_IFCAPS_MASK;
enabled |= mask;
bridge_set_ifcap(sc, bif, enabled);
}
}
static void
bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
{
struct ifnet *ifp = bif->bif_ifp;
struct ifreq ifr;
int error, mask, stuck;
bzero(&ifr, sizeof(ifr));
ifr.ifr_reqcap = set;
if (ifp->if_capenable != set) {
error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
if (error)
if_printf(sc->sc_ifp,
"error setting capabilities on %s: %d\n",
ifp->if_xname, error);
mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
stuck = ifp->if_capenable & mask & ~set;
if (stuck != 0)
if_printf(sc->sc_ifp,
"can't disable some capabilities on %s: 0x%x\n",
ifp->if_xname, stuck);
}
}
/*
* bridge_lookup_member:
*
* Lookup a bridge member interface.
*/
static struct bridge_iflist *
bridge_lookup_member(struct bridge_softc *sc, const char *name)
{
struct bridge_iflist *bif;
struct ifnet *ifp;
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
ifp = bif->bif_ifp;
if (strcmp(ifp->if_xname, name) == 0)
return (bif);
}
return (NULL);
}
/*
* bridge_lookup_member_if:
*
* Lookup a bridge member interface by ifnet*.
*/
static struct bridge_iflist *
bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
{
struct bridge_iflist *bif;
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (bif->bif_ifp == member_ifp)
return (bif);
}
return (NULL);
}
static void
bridge_delete_member_cb(struct epoch_context *ctx)
{
struct bridge_iflist *bif;
bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
free(bif, M_DEVBUF);
}
/*
* bridge_delete_member:
*
* Delete the specified member interface.
*/
static void
bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
int gone)
{
struct ifnet *ifs = bif->bif_ifp;
struct ifnet *fif = NULL;
struct bridge_iflist *bifl;
BRIDGE_LOCK_ASSERT(sc);
if (bif->bif_flags & IFBIF_STP)
bstp_disable(&bif->bif_stp);
ifs->if_bridge = NULL;
CK_LIST_REMOVE(bif, bif_next);
/*
* If removing the interface that gave the bridge its mac address, set
* the mac address of the bridge to the address of the next member, or
* to its default address if no members are left.
*/
if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
if (CK_LIST_EMPTY(&sc->sc_iflist)) {
bcopy(&sc->sc_defaddr,
IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
sc->sc_ifaddr = NULL;
} else {
bifl = CK_LIST_FIRST(&sc->sc_iflist);
fif = bifl->bif_ifp;
bcopy(IF_LLADDR(fif),
IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
sc->sc_ifaddr = fif;
}
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
}
bridge_linkcheck(sc);
bridge_mutecaps(sc); /* recalcuate now this interface is removed */
BRIDGE_RT_LOCK(sc);
bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
BRIDGE_RT_UNLOCK(sc);
KASSERT(bif->bif_addrcnt == 0,
("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
ifs->if_bridge_output = NULL;
ifs->if_bridge_input = NULL;
ifs->if_bridge_linkstate = NULL;
if (!gone) {
switch (ifs->if_type) {
case IFT_ETHER:
case IFT_L2VLAN:
/*
* Take the interface out of promiscuous mode, but only
* if it was promiscuous in the first place. It might
* not be if we're in the bridge_ioctl_add() error path.
*/
if (ifs->if_flags & IFF_PROMISC)
(void) ifpromisc(ifs, 0);
break;
case IFT_GIF:
break;
default:
#ifdef DIAGNOSTIC
panic("bridge_delete_member: impossible");
#endif
break;
}
/* reneable any interface capabilities */
bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
}
bstp_destroy(&bif->bif_stp); /* prepare to free */
NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
}
/*
* bridge_delete_span:
*
* Delete the specified span interface.
*/
static void
bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
{
BRIDGE_LOCK_ASSERT(sc);
KASSERT(bif->bif_ifp->if_bridge == NULL,
("%s: not a span interface", __func__));
CK_LIST_REMOVE(bif, bif_next);
NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
}
static int
bridge_ioctl_add(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif = NULL;
struct ifnet *ifs;
int error = 0;
ifs = ifunit(req->ifbr_ifsname);
if (ifs == NULL)
return (ENOENT);
if (ifs->if_ioctl == NULL) /* must be supported */
return (EINVAL);
/* If it's in the span list, it can't be a member. */
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
if (ifs == bif->bif_ifp)
return (EBUSY);
if (ifs->if_bridge == sc)
return (EEXIST);
if (ifs->if_bridge != NULL)
return (EBUSY);
switch (ifs->if_type) {
case IFT_ETHER:
case IFT_L2VLAN:
case IFT_GIF:
/* permitted interface types */
break;
default:
return (EINVAL);
}
#ifdef INET6
/*
* Two valid inet6 addresses with link-local scope must not be
* on the parent interface and the member interfaces at the
* same time. This restriction is needed to prevent violation
* of link-local scope zone. Attempts to add a member
* interface which has inet6 addresses when the parent has
* inet6 triggers removal of all inet6 addresses on the member
* interface.
*/
/* Check if the parent interface has a link-local scope addr. */
if (V_allow_llz_overlap == 0 &&
in6ifa_llaonifp(sc->sc_ifp) != NULL) {
/*
* If any, remove all inet6 addresses from the member
* interfaces.
*/
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (in6ifa_llaonifp(bif->bif_ifp)) {
in6_ifdetach(bif->bif_ifp);
if_printf(sc->sc_ifp,
"IPv6 addresses on %s have been removed "
"before adding it as a member to prevent "
"IPv6 address scope violation.\n",
bif->bif_ifp->if_xname);
}
}
if (in6ifa_llaonifp(ifs)) {
in6_ifdetach(ifs);
if_printf(sc->sc_ifp,
"IPv6 addresses on %s have been removed "
"before adding it as a member to prevent "
"IPv6 address scope violation.\n",
ifs->if_xname);
}
}
#endif
/* Allow the first Ethernet member to define the MTU */
if (CK_LIST_EMPTY(&sc->sc_iflist))
sc->sc_ifp->if_mtu = ifs->if_mtu;
else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
struct ifreq ifr;
snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
ifs->if_xname);
ifr.ifr_mtu = sc->sc_ifp->if_mtu;
error = (*ifs->if_ioctl)(ifs,
SIOCSIFMTU, (caddr_t)&ifr);
if (error != 0) {
log(LOG_NOTICE, "%s: invalid MTU: %u for"
" new member %s\n", sc->sc_ifp->if_xname,
ifr.ifr_mtu,
ifs->if_xname);
return (EINVAL);
}
}
bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
if (bif == NULL)
return (ENOMEM);
bif->bif_ifp = ifs;
bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
bif->bif_savedcaps = ifs->if_capenable;
/*
* Assign the interface's MAC address to the bridge if it's the first
* member and the MAC address of the bridge has not been changed from
* the default randomly generated one.
*/
if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
!memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
sc->sc_ifaddr = ifs;
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
}
ifs->if_bridge = sc;
ifs->if_bridge_output = bridge_output;
ifs->if_bridge_input = bridge_input;
ifs->if_bridge_linkstate = bridge_linkstate;
bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
/*
* XXX: XLOCK HERE!?!
*
* NOTE: insert_***HEAD*** should be safe for the traversals.
*/
CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
/* Set interface capabilities to the intersection set of all members */
bridge_mutecaps(sc);
bridge_linkcheck(sc);
/* Place the interface into promiscuous mode */
switch (ifs->if_type) {
case IFT_ETHER:
case IFT_L2VLAN:
error = ifpromisc(ifs, 1);
break;
}
if (error)
bridge_delete_member(sc, bif, 0);
return (error);
}
static int
bridge_ioctl_del(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
if (bif == NULL)
return (ENOENT);
bridge_delete_member(sc, bif, 0);
return (0);
}
static int
bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif;
struct bstp_port *bp;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
if (bif == NULL)
return (ENOENT);
bp = &bif->bif_stp;
req->ifbr_ifsflags = bif->bif_flags;
req->ifbr_state = bp->bp_state;
req->ifbr_priority = bp->bp_priority;
req->ifbr_path_cost = bp->bp_path_cost;
req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
req->ifbr_proto = bp->bp_protover;
req->ifbr_role = bp->bp_role;
req->ifbr_stpflags = bp->bp_flags;
req->ifbr_addrcnt = bif->bif_addrcnt;
req->ifbr_addrmax = bif->bif_addrmax;
req->ifbr_addrexceeded = bif->bif_addrexceeded;
/* Copy STP state options as flags */
if (bp->bp_operedge)
req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
if (bp->bp_ptp_link)
req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
if (bp->bp_flags & BSTP_PORT_AUTOPTP)
req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
if (bp->bp_flags & BSTP_PORT_ADMEDGE)
req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
if (bp->bp_flags & BSTP_PORT_ADMCOST)
req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
return (0);
}
static int
bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
{
struct epoch_tracker et;
struct ifbreq *req = arg;
struct bridge_iflist *bif;
struct bstp_port *bp;
int error;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
if (bif == NULL)
return (ENOENT);
bp = &bif->bif_stp;
if (req->ifbr_ifsflags & IFBIF_SPAN)
/* SPAN is readonly */
return (EINVAL);
NET_EPOCH_ENTER(et);
if (req->ifbr_ifsflags & IFBIF_STP) {
if ((bif->bif_flags & IFBIF_STP) == 0) {
error = bstp_enable(&bif->bif_stp);
if (error) {
NET_EPOCH_EXIT(et);
return (error);
}
}
} else {
if ((bif->bif_flags & IFBIF_STP) != 0)
bstp_disable(&bif->bif_stp);
}
/* Pass on STP flags */
bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
/* Save the bits relating to the bridge */
bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
NET_EPOCH_EXIT(et);
return (0);
}
static int
bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
sc->sc_brtmax = param->ifbrp_csize;
bridge_rttrim(sc);
return (0);
}
static int
bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
param->ifbrp_csize = sc->sc_brtmax;
return (0);
}
static int
bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
{
struct ifbifconf *bifc = arg;
struct bridge_iflist *bif;
struct ifbreq breq;
char *buf, *outbuf;
int count, buflen, len, error = 0;
count = 0;
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
count++;
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
count++;
buflen = sizeof(breq) * count;
if (bifc->ifbic_len == 0) {
bifc->ifbic_len = buflen;
return (0);
}
outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
if (outbuf == NULL)
return (ENOMEM);
count = 0;
buf = outbuf;
len = min(bifc->ifbic_len, buflen);
bzero(&breq, sizeof(breq));
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (len < sizeof(breq))
break;
strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
sizeof(breq.ifbr_ifsname));
/* Fill in the ifbreq structure */
error = bridge_ioctl_gifflags(sc, &breq);
if (error)
break;
memcpy(buf, &breq, sizeof(breq));
count++;
buf += sizeof(breq);
len -= sizeof(breq);
}
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
if (len < sizeof(breq))
break;
strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
sizeof(breq.ifbr_ifsname));
breq.ifbr_ifsflags = bif->bif_flags;
breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
memcpy(buf, &breq, sizeof(breq));
count++;
buf += sizeof(breq);
len -= sizeof(breq);
}
bifc->ifbic_len = sizeof(breq) * count;
error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
free(outbuf, M_TEMP);
return (error);
}
static int
bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
{
struct ifbaconf *bac = arg;
struct bridge_rtnode *brt;
struct ifbareq bareq;
char *buf, *outbuf;
int count, buflen, len, error = 0;
if (bac->ifbac_len == 0)
return (0);
count = 0;
CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
count++;
buflen = sizeof(bareq) * count;
outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
if (outbuf == NULL)
return (ENOMEM);
count = 0;
buf = outbuf;
len = min(bac->ifbac_len, buflen);
bzero(&bareq, sizeof(bareq));
CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
if (len < sizeof(bareq))
goto out;
strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
sizeof(bareq.ifba_ifsname));
memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
bareq.ifba_vlan = brt->brt_vlan;
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
time_uptime < brt->brt_expire)
bareq.ifba_expire = brt->brt_expire - time_uptime;
else
bareq.ifba_expire = 0;
bareq.ifba_flags = brt->brt_flags;
memcpy(buf, &bareq, sizeof(bareq));
count++;
buf += sizeof(bareq);
len -= sizeof(bareq);
}
out:
bac->ifbac_len = sizeof(bareq) * count;
error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
free(outbuf, M_TEMP);
return (error);
}
static int
bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
{
struct ifbareq *req = arg;
struct bridge_iflist *bif;
struct epoch_tracker et;
int error;
NET_EPOCH_ENTER(et);
bif = bridge_lookup_member(sc, req->ifba_ifsname);
if (bif == NULL) {
NET_EPOCH_EXIT(et);
return (ENOENT);
}
/* bridge_rtupdate() may acquire the lock. */
error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
req->ifba_flags);
NET_EPOCH_EXIT(et);
return (error);
}
static int
bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
sc->sc_brttimeout = param->ifbrp_ctime;
return (0);
}
static int
bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
param->ifbrp_ctime = sc->sc_brttimeout;
return (0);
}
static int
bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
{
struct ifbareq *req = arg;
int vlan = req->ifba_vlan;
/* Userspace uses '0' to mean 'any vlan' */
if (vlan == 0)
vlan = DOT1Q_VID_RSVD_IMPL;
return (bridge_rtdaddr(sc, req->ifba_dst, vlan));
}
static int
bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
BRIDGE_RT_LOCK(sc);
bridge_rtflush(sc, req->ifbr_ifsflags);
BRIDGE_RT_UNLOCK(sc);
return (0);
}
static int
bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
struct bstp_state *bs = &sc->sc_stp;
param->ifbrp_prio = bs->bs_bridge_priority;
return (0);
}
static int
bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
}
static int
bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
struct bstp_state *bs = &sc->sc_stp;
param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
return (0);
}
static int
bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
}
static int
bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
struct bstp_state *bs = &sc->sc_stp;
param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
return (0);
}
static int
bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
}
static int
bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
struct bstp_state *bs = &sc->sc_stp;
param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
return (0);
}
static int
bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
}
static int
bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
if (bif == NULL)
return (ENOENT);
return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
}
static int
bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
if (bif == NULL)
return (ENOENT);
return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
}
static int
bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
if (bif == NULL)
return (ENOENT);
bif->bif_addrmax = req->ifbr_addrmax;
return (0);
}
static int
bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif = NULL;
struct ifnet *ifs;
ifs = ifunit(req->ifbr_ifsname);
if (ifs == NULL)
return (ENOENT);
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
if (ifs == bif->bif_ifp)
return (EBUSY);
if (ifs->if_bridge != NULL)
return (EBUSY);
switch (ifs->if_type) {
case IFT_ETHER:
case IFT_GIF:
case IFT_L2VLAN:
break;
default:
return (EINVAL);
}
bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
if (bif == NULL)
return (ENOMEM);
bif->bif_ifp = ifs;
bif->bif_flags = IFBIF_SPAN;
CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
return (0);
}
static int
bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
{
struct ifbreq *req = arg;
struct bridge_iflist *bif;
struct ifnet *ifs;
ifs = ifunit(req->ifbr_ifsname);
if (ifs == NULL)
return (ENOENT);
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
if (ifs == bif->bif_ifp)
break;
if (bif == NULL)
return (ENOENT);
bridge_delete_span(sc, bif);
return (0);
}
static int
bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
{
struct ifbropreq *req = arg;
struct bstp_state *bs = &sc->sc_stp;
struct bstp_port *root_port;
req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
root_port = bs->bs_root_port;
if (root_port == NULL)
req->ifbop_root_port = 0;
else
req->ifbop_root_port = root_port->bp_ifp->if_index;
req->ifbop_holdcount = bs->bs_txholdcount;
req->ifbop_priority = bs->bs_bridge_priority;
req->ifbop_protocol = bs->bs_protover;
req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
return (0);
}
static int
bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
param->ifbrp_cexceeded = sc->sc_brtexceeded;
return (0);
}
static int
bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
{
struct ifbpstpconf *bifstp = arg;
struct bridge_iflist *bif;
struct bstp_port *bp;
struct ifbpstpreq bpreq;
char *buf, *outbuf;
int count, buflen, len, error = 0;
count = 0;
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if ((bif->bif_flags & IFBIF_STP) != 0)
count++;
}
buflen = sizeof(bpreq) * count;
if (bifstp->ifbpstp_len == 0) {
bifstp->ifbpstp_len = buflen;
return (0);
}
outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
if (outbuf == NULL)
return (ENOMEM);
count = 0;
buf = outbuf;
len = min(bifstp->ifbpstp_len, buflen);
bzero(&bpreq, sizeof(bpreq));
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (len < sizeof(bpreq))
break;
if ((bif->bif_flags & IFBIF_STP) == 0)
continue;
bp = &bif->bif_stp;
bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
memcpy(buf, &bpreq, sizeof(bpreq));
count++;
buf += sizeof(bpreq);
len -= sizeof(bpreq);
}
bifstp->ifbpstp_len = sizeof(bpreq) * count;
error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
free(outbuf, M_TEMP);
return (error);
}
static int
bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
}
static int
bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
{
struct ifbrparam *param = arg;
return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
}
/*
* bridge_ifdetach:
*
* Detach an interface from a bridge. Called when a member
* interface is detaching.
*/
static void
bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
{
struct bridge_softc *sc = ifp->if_bridge;
struct bridge_iflist *bif;
if (ifp->if_flags & IFF_RENAMING)
return;
if (V_bridge_cloner == NULL) {
/*
* This detach handler can be called after
* vnet_bridge_uninit(). Just return in that case.
*/
return;
}
/* Check if the interface is a bridge member */
if (sc != NULL) {
BRIDGE_LOCK(sc);
bif = bridge_lookup_member_if(sc, ifp);
if (bif != NULL)
bridge_delete_member(sc, bif, 1);
BRIDGE_UNLOCK(sc);
return;
}
/* Check if the interface is a span port */
BRIDGE_LIST_LOCK();
LIST_FOREACH(sc, &V_bridge_list, sc_list) {
BRIDGE_LOCK(sc);
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
if (ifp == bif->bif_ifp) {
bridge_delete_span(sc, bif);
break;
}
BRIDGE_UNLOCK(sc);
}
BRIDGE_LIST_UNLOCK();
}
/*
* bridge_init:
*
* Initialize a bridge interface.
*/
static void
bridge_init(void *xsc)
{
struct bridge_softc *sc = (struct bridge_softc *)xsc;
struct ifnet *ifp = sc->sc_ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
BRIDGE_LOCK(sc);
callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
bridge_timer, sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
BRIDGE_UNLOCK(sc);
}
/*
* bridge_stop:
*
* Stop the bridge interface.
*/
static void
bridge_stop(struct ifnet *ifp, int disable)
{
struct bridge_softc *sc = ifp->if_softc;
BRIDGE_LOCK_ASSERT(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
BRIDGE_RT_LOCK(sc);
callout_stop(&sc->sc_brcallout);
bstp_stop(&sc->sc_stp);
bridge_rtflush(sc, IFBF_FLUSHDYN);
BRIDGE_RT_UNLOCK(sc);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
}
/*
* bridge_enqueue:
*
* Enqueue a packet on a bridge member interface.
*
*/
static int
bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
{
int len, err = 0;
short mflags;
struct mbuf *m0;
/* We may be sending a fragment so traverse the mbuf */
for (; m; m = m0) {
m0 = m->m_nextpkt;
m->m_nextpkt = NULL;
len = m->m_pkthdr.len;
mflags = m->m_flags;
/*
* If underlying interface can not do VLAN tag insertion itself
* then attach a packet tag that holds it.
*/
if ((m->m_flags & M_VLANTAG) &&
(dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
if (m == NULL) {
if_printf(dst_ifp,
"unable to prepend VLAN header\n");
if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
continue;
}
m->m_flags &= ~M_VLANTAG;
}
M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
int n;
for (m = m0, n = 1; m != NULL; m = m0, n++) {
m0 = m->m_nextpkt;
m_freem(m);
}
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
break;
}
if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
if (mflags & M_MCAST)
if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
}
return (err);
}
/*
* bridge_dummynet:
*
* Receive a queued packet from dummynet and pass it on to the output
* interface.
*
* The mbuf has the Ethernet header already attached.
*/
static void
bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
{
struct bridge_softc *sc;
sc = ifp->if_bridge;
/*
* The packet didnt originate from a member interface. This should only
* ever happen if a member interface is removed while packets are
* queued for it.
*/
if (sc == NULL) {
m_freem(m);
return;
}
if (PFIL_HOOKED_OUT_46) {
if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
return;
if (m == NULL)
return;
}
bridge_enqueue(sc, ifp, m);
}
/*
* bridge_output:
*
* Send output from a bridge member interface. This
* performs the bridging function for locally originated
* packets.
*
* The mbuf has the Ethernet header already attached. We must
* enqueue or free the mbuf before returning.
*/
static int
bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
struct rtentry *rt)
{
struct ether_header *eh;
struct ifnet *bifp, *dst_if;
struct bridge_softc *sc;
uint16_t vlan;
NET_EPOCH_ASSERT();
if (m->m_len < ETHER_HDR_LEN) {
m = m_pullup(m, ETHER_HDR_LEN);
if (m == NULL)
return (0);
}
eh = mtod(m, struct ether_header *);
sc = ifp->if_bridge;
vlan = VLANTAGOF(m);
bifp = sc->sc_ifp;
/*
* If bridge is down, but the original output interface is up,
* go ahead and send out that interface. Otherwise, the packet
* is dropped below.
*/
if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
dst_if = ifp;
goto sendunicast;
}
/*
* If the packet is a multicast, or we don't know a better way to
* get there, send to all interfaces.
*/
if (ETHER_IS_MULTICAST(eh->ether_dhost))
dst_if = NULL;
else
dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
/* Tap any traffic not passing back out the originating interface */
if (dst_if != ifp)
ETHER_BPF_MTAP(bifp, m);
if (dst_if == NULL) {
struct bridge_iflist *bif;
struct mbuf *mc;
int used = 0;
bridge_span(sc, m);
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
dst_if = bif->bif_ifp;
if (dst_if->if_type == IFT_GIF)
continue;
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
continue;
/*
* If this is not the original output interface,
* and the interface is participating in spanning
* tree, make sure the port is in a state that
* allows forwarding.
*/
if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
continue;
if (CK_LIST_NEXT(bif, bif_next) == NULL) {
used = 1;
mc = m;
} else {
mc = m_dup(m, M_NOWAIT);
if (mc == NULL) {
if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
continue;
}
}
bridge_enqueue(sc, dst_if, mc);
}
if (used == 0)
m_freem(m);
return (0);
}
sendunicast:
/*
* XXX Spanning tree consideration here?
*/
bridge_span(sc, m);
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
m_freem(m);
return (0);
}
bridge_enqueue(sc, dst_if, m);
return (0);
}
/*
* bridge_transmit:
*
* Do output on a bridge.
*
*/
static int
bridge_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct bridge_softc *sc;
struct ether_header *eh;
struct ifnet *dst_if;
int error = 0;
sc = ifp->if_softc;
ETHER_BPF_MTAP(ifp, m);
eh = mtod(m, struct ether_header *);
if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
(dst_if = bridge_rtlookup(sc, eh->ether_dhost, DOT1Q_VID_NULL)) !=
NULL) {
error = bridge_enqueue(sc, dst_if, m);
} else
bridge_broadcast(sc, ifp, m, 0);
return (error);
}
#ifdef ALTQ
static void
bridge_altq_start(if_t ifp)
{
struct ifaltq *ifq = &ifp->if_snd;
struct mbuf *m;
IFQ_LOCK(ifq);
IFQ_DEQUEUE_NOLOCK(ifq, m);
while (m != NULL) {
bridge_transmit(ifp, m);
IFQ_DEQUEUE_NOLOCK(ifq, m);
}
IFQ_UNLOCK(ifq);
}
static int
bridge_altq_transmit(if_t ifp, struct mbuf *m)
{
int err;
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
IFQ_ENQUEUE(&ifp->if_snd, m, err);
if (err == 0)
bridge_altq_start(ifp);
} else
err = bridge_transmit(ifp, m);
return (err);
}
#endif /* ALTQ */
/*
* The ifp->if_qflush entry point for if_bridge(4) is no-op.
*/
static void
bridge_qflush(struct ifnet *ifp __unused)
{
}
/*
* bridge_forward:
*
* The forwarding function of the bridge.
*
* NOTE: Releases the lock on return.
*/
static void
bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
struct mbuf *m)
{
struct bridge_iflist *dbif;
struct ifnet *src_if, *dst_if, *ifp;
struct ether_header *eh;
uint16_t vlan;
uint8_t *dst;
int error;
NET_EPOCH_ASSERT();
src_if = m->m_pkthdr.rcvif;
ifp = sc->sc_ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
vlan = VLANTAGOF(m);
if ((sbif->bif_flags & IFBIF_STP) &&
sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
goto drop;
eh = mtod(m, struct ether_header *);
dst = eh->ether_dhost;
/* If the interface is learning, record the address. */
if (sbif->bif_flags & IFBIF_LEARNING) {
error = bridge_rtupdate(sc, eh->ether_shost, vlan,
sbif, 0, IFBAF_DYNAMIC);
/*
* If the interface has addresses limits then deny any source
* that is not in the cache.
*/
if (error && sbif->bif_addrmax)
goto drop;
}
if ((sbif->bif_flags & IFBIF_STP) != 0 &&
sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
goto drop;
#ifdef DEV_NETMAP
/*
* Hand the packet to netmap only if it wasn't injected by netmap
* itself.
*/
if ((m->m_flags & M_BRIDGE_INJECT) == 0 &&
(if_getcapenable(ifp) & IFCAP_NETMAP) != 0) {
ifp->if_input(ifp, m);
return;
}
m->m_flags &= ~M_BRIDGE_INJECT;
#endif
/*
* At this point, the port either doesn't participate
* in spanning tree or it is in the forwarding state.
*/
/*
* If the packet is unicast, destined for someone on
* "this" side of the bridge, drop it.
*/
if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
dst_if = bridge_rtlookup(sc, dst, vlan);
if (src_if == dst_if)
goto drop;
} else {
/*
* Check if its a reserved multicast address, any address
* listed in 802.1D section 7.12.6 may not be forwarded by the
* bridge.
* This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
*/
if (dst[0] == 0x01 && dst[1] == 0x80 &&
dst[2] == 0xc2 && dst[3] == 0x00 &&
dst[4] == 0x00 && dst[5] <= 0x0f)
goto drop;
/* ...forward it to all interfaces. */
if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
dst_if = NULL;
}
/*
* If we have a destination interface which is a member of our bridge,
* OR this is a unicast packet, push it through the bpf(4) machinery.
* For broadcast or multicast packets, don't bother because it will
* be reinjected into ether_input. We do this before we pass the packets
* through the pfil(9) framework, as it is possible that pfil(9) will
* drop the packet, or possibly modify it, making it difficult to debug
* firewall issues on the bridge.
*/
if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
ETHER_BPF_MTAP(ifp, m);
/* run the packet filter */
if (PFIL_HOOKED_IN_46) {
if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
return;
if (m == NULL)
return;
}
if (dst_if == NULL) {
bridge_broadcast(sc, src_if, m, 1);
return;
}
/*
* At this point, we're dealing with a unicast frame
* going to a different interface.
*/
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
goto drop;
dbif = bridge_lookup_member_if(sc, dst_if);
if (dbif == NULL)
/* Not a member of the bridge (anymore?) */
goto drop;
/* Private segments can not talk to each other */
if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
goto drop;
if ((dbif->bif_flags & IFBIF_STP) &&
dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
goto drop;
if (PFIL_HOOKED_OUT_46) {
if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
return;
if (m == NULL)
return;
}
bridge_enqueue(sc, dst_if, m);
return;
drop:
m_freem(m);
}
/*
* bridge_input:
*
* Receive input from a member interface. Queue the packet for
* bridging if it is not for us.
*/
static struct mbuf *
bridge_input(struct ifnet *ifp, struct mbuf *m)
{
struct bridge_softc *sc;
struct bridge_iflist *bif, *bif2;
struct ifnet *bifp;
struct ether_header *eh;
struct mbuf *mc, *mc2;
uint16_t vlan;
int error;
NET_EPOCH_ASSERT();
eh = mtod(m, struct ether_header *);
vlan = VLANTAGOF(m);
sc = ifp->if_bridge;
if (sc == NULL) {
/*
* This packet originated from the bridge itself, so it must
* have been transmitted by netmap. Derive the "source"
* interface from the source address and drop the packet if the
* source address isn't known.
*/
KASSERT((m->m_flags & M_BRIDGE_INJECT) != 0,
("%s: ifnet %p missing a bridge softc", __func__, ifp));
sc = if_getsoftc(ifp);
ifp = bridge_rtlookup(sc, eh->ether_shost, vlan);
if (ifp == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
return (NULL);
}
m->m_pkthdr.rcvif = ifp;
}
bifp = sc->sc_ifp;
if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return (m);
/*
* Implement support for bridge monitoring. If this flag has been
* set on this interface, discard the packet once we push it through
* the bpf(4) machinery, but before we do, increment the byte and
* packet counters associated with this interface.
*/
if ((bifp->if_flags & IFF_MONITOR) != 0) {
m->m_pkthdr.rcvif = bifp;
ETHER_BPF_MTAP(bifp, m);
if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
m_freem(m);
return (NULL);
}
bif = bridge_lookup_member_if(sc, ifp);
if (bif == NULL) {
return (m);
}
bridge_span(sc, m);
if (m->m_flags & (M_BCAST|M_MCAST)) {
/* Tap off 802.1D packets; they do not get forwarded. */
if (memcmp(eh->ether_dhost, bstp_etheraddr,
ETHER_ADDR_LEN) == 0) {
bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
return (NULL);
}
if ((bif->bif_flags & IFBIF_STP) &&
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
return (m);
}
/*
* Make a deep copy of the packet and enqueue the copy
* for bridge processing; return the original packet for
* local processing.
*/
mc = m_dup(m, M_NOWAIT);
if (mc == NULL) {
return (m);
}
/* Perform the bridge forwarding function with the copy. */
bridge_forward(sc, bif, mc);
#ifdef DEV_NETMAP
/*
* If netmap is enabled and has not already seen this packet,
* then it will be consumed by bridge_forward().
*/
if ((if_getcapenable(bifp) & IFCAP_NETMAP) != 0 &&
(m->m_flags & M_BRIDGE_INJECT) == 0) {
m_freem(m);
return (NULL);
}
#endif
/*
* Reinject the mbuf as arriving on the bridge so we have a
* chance at claiming multicast packets. We can not loop back
* here from ether_input as a bridge is never a member of a
* bridge.
*/
KASSERT(bifp->if_bridge == NULL,
("loop created in bridge_input"));
mc2 = m_dup(m, M_NOWAIT);
if (mc2 != NULL) {
/* Keep the layer3 header aligned */
int i = min(mc2->m_pkthdr.len, max_protohdr);
mc2 = m_copyup(mc2, i, ETHER_ALIGN);
}
if (mc2 != NULL) {
mc2->m_pkthdr.rcvif = bifp;
mc2->m_flags &= ~M_BRIDGE_INJECT;
sc->sc_if_input(bifp, mc2);
}
/* Return the original packet for local processing. */
return (m);
}
if ((bif->bif_flags & IFBIF_STP) &&
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
return (m);
}
#if defined(INET) || defined(INET6)
#define CARP_CHECK_WE_ARE_DST(iface) \
((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_dhost))
#define CARP_CHECK_WE_ARE_SRC(iface) \
((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_shost))
#else
#define CARP_CHECK_WE_ARE_DST(iface) false
#define CARP_CHECK_WE_ARE_SRC(iface) false
#endif
#ifdef DEV_NETMAP
#define GRAB_FOR_NETMAP(ifp, m) do { \
if ((if_getcapenable(ifp) & IFCAP_NETMAP) != 0 && \
((m)->m_flags & M_BRIDGE_INJECT) == 0) { \
(ifp)->if_input(ifp, m); \
return (NULL); \
} \
} while (0)
#else
#define GRAB_FOR_NETMAP(ifp, m)
#endif
#define GRAB_OUR_PACKETS(iface) \
if ((iface)->if_type == IFT_GIF) \
continue; \
/* It is destined for us. */ \
if (memcmp(IF_LLADDR(iface), eh->ether_dhost, ETHER_ADDR_LEN) == 0 || \
CARP_CHECK_WE_ARE_DST(iface)) { \
if (bif->bif_flags & IFBIF_LEARNING) { \
error = bridge_rtupdate(sc, eh->ether_shost, \
vlan, bif, 0, IFBAF_DYNAMIC); \
if (error && bif->bif_addrmax) { \
m_freem(m); \
return (NULL); \
} \
} \
m->m_pkthdr.rcvif = iface; \
if ((iface) == ifp) { \
/* Skip bridge processing... src == dest */ \
return (m); \
} \
/* It's passing over or to the bridge, locally. */ \
ETHER_BPF_MTAP(bifp, m); \
if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1); \
if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);\
/* Hand the packet over to netmap if necessary. */ \
GRAB_FOR_NETMAP(bifp, m); \
/* Filter on the physical interface. */ \
if (V_pfil_local_phys && PFIL_HOOKED_IN_46) { \
if (bridge_pfil(&m, NULL, ifp, \
PFIL_IN) != 0 || m == NULL) { \
return (NULL); \
} \
} \
if ((iface) != bifp) \
ETHER_BPF_MTAP(iface, m); \
return (m); \
} \
\
/* We just received a packet that we sent out. */ \
if (memcmp(IF_LLADDR(iface), eh->ether_shost, ETHER_ADDR_LEN) == 0 || \
CARP_CHECK_WE_ARE_SRC(iface)) { \
m_freem(m); \
return (NULL); \
}
/*
* Unicast. Make sure it's not for the bridge.
*/
do { GRAB_OUR_PACKETS(bifp) } while (0);
/*
* Give a chance for ifp at first priority. This will help when the
* packet comes through the interface like VLAN's with the same MACs
* on several interfaces from the same bridge. This also will save
* some CPU cycles in case the destination interface and the input
* interface (eq ifp) are the same.
*/
do { GRAB_OUR_PACKETS(ifp) } while (0);
/* Now check the all bridge members. */
CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
GRAB_OUR_PACKETS(bif2->bif_ifp)
}
#undef CARP_CHECK_WE_ARE_DST
#undef CARP_CHECK_WE_ARE_SRC
#undef GRAB_FOR_NETMAP
#undef GRAB_OUR_PACKETS
/* Perform the bridge forwarding function. */
bridge_forward(sc, bif, m);
return (NULL);
}
/*
* Inject a packet back into the host ethernet stack. This will generally only
* be used by netmap when an application writes to the host TX ring. The
* M_BRIDGE_INJECT flag ensures that the packet is re-routed to the bridge
* interface after ethernet processing.
*/
static void
bridge_inject(struct ifnet *ifp, struct mbuf *m)
{
struct bridge_softc *sc;
KASSERT((if_getcapenable(ifp) & IFCAP_NETMAP) != 0,
("%s: iface %s is not running in netmap mode",
__func__, if_name(ifp)));
KASSERT((m->m_flags & M_BRIDGE_INJECT) == 0,
("%s: mbuf %p has M_BRIDGE_INJECT set", __func__, m));
m->m_flags |= M_BRIDGE_INJECT;
sc = if_getsoftc(ifp);
sc->sc_if_input(ifp, m);
}
/*
* bridge_broadcast:
*
* Send a frame to all interfaces that are members of
* the bridge, except for the one on which the packet
* arrived.
*
* NOTE: Releases the lock on return.
*/
static void
bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
struct mbuf *m, int runfilt)
{
struct bridge_iflist *dbif, *sbif;
struct mbuf *mc;
struct ifnet *dst_if;
int used = 0, i;
NET_EPOCH_ASSERT();
sbif = bridge_lookup_member_if(sc, src_if);
/* Filter on the bridge interface before broadcasting */
if (runfilt && PFIL_HOOKED_OUT_46) {
if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
return;
if (m == NULL)
return;
}
CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
dst_if = dbif->bif_ifp;
if (dst_if == src_if)
continue;
/* Private segments can not talk to each other */
if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
continue;
if ((dbif->bif_flags & IFBIF_STP) &&
dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
continue;
if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
(m->m_flags & (M_BCAST|M_MCAST)) == 0)
continue;
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
continue;
if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
mc = m;
used = 1;
} else {
mc = m_dup(m, M_NOWAIT);
if (mc == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
continue;
}
}
/*
* Filter on the output interface. Pass a NULL bridge interface
* pointer so we do not redundantly filter on the bridge for
* each interface we broadcast on.
*/
if (runfilt && PFIL_HOOKED_OUT_46) {
if (used == 0) {
/* Keep the layer3 header aligned */
i = min(mc->m_pkthdr.len, max_protohdr);
mc = m_copyup(mc, i, ETHER_ALIGN);
if (mc == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
continue;
}
}
if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
continue;
if (mc == NULL)
continue;
}
bridge_enqueue(sc, dst_if, mc);
}
if (used == 0)
m_freem(m);
}
/*
* bridge_span:
*
* Duplicate a packet out one or more interfaces that are in span mode,
* the original mbuf is unmodified.
*/
static void
bridge_span(struct bridge_softc *sc, struct mbuf *m)
{
struct bridge_iflist *bif;
struct ifnet *dst_if;
struct mbuf *mc;
NET_EPOCH_ASSERT();
if (CK_LIST_EMPTY(&sc->sc_spanlist))
return;
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
dst_if = bif->bif_ifp;
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
continue;
mc = m_dup(m, M_NOWAIT);
if (mc == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
continue;
}
bridge_enqueue(sc, dst_if, mc);
}
}
/*
* bridge_rtupdate:
*
* Add a bridge routing entry.
*/
static int
bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
struct bridge_iflist *bif, int setflags, uint8_t flags)
{
struct bridge_rtnode *brt;
struct bridge_iflist *obif;
int error;
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
/* Check the source address is valid and not multicast. */
if (ETHER_IS_MULTICAST(dst) ||
(dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
return (EINVAL);
/*
* A route for this destination might already exist. If so,
* update it, otherwise create a new one.
*/
if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
BRIDGE_RT_LOCK(sc);
/* Check again, now that we have the lock. There could have
* been a race and we only want to insert this once. */
if (bridge_rtnode_lookup(sc, dst, vlan) != NULL) {
BRIDGE_RT_UNLOCK(sc);
return (0);
}
if (sc->sc_brtcnt >= sc->sc_brtmax) {
sc->sc_brtexceeded++;
BRIDGE_RT_UNLOCK(sc);
return (ENOSPC);
}
/* Check per interface address limits (if enabled) */
if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
bif->bif_addrexceeded++;
BRIDGE_RT_UNLOCK(sc);
return (ENOSPC);
}
/*
* Allocate a new bridge forwarding node, and
* initialize the expiration time and Ethernet
* address.
*/
brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
if (brt == NULL) {
BRIDGE_RT_UNLOCK(sc);
return (ENOMEM);
}
brt->brt_vnet = curvnet;
if (bif->bif_flags & IFBIF_STICKY)
brt->brt_flags = IFBAF_STICKY;
else
brt->brt_flags = IFBAF_DYNAMIC;
memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
brt->brt_vlan = vlan;
brt->brt_dst = bif;
if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
uma_zfree(V_bridge_rtnode_zone, brt);
BRIDGE_RT_UNLOCK(sc);
return (error);
}
bif->bif_addrcnt++;
BRIDGE_RT_UNLOCK(sc);
}
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
(obif = brt->brt_dst) != bif) {
MPASS(obif != NULL);
BRIDGE_RT_LOCK(sc);
brt->brt_dst->bif_addrcnt--;
brt->brt_dst = bif;
brt->brt_dst->bif_addrcnt++;
BRIDGE_RT_UNLOCK(sc);
if (V_log_mac_flap &&
ppsratecheck(&V_log_last, &V_log_count, V_log_interval)) {
log(LOG_NOTICE,
"%s: mac address %6D vlan %d moved from %s to %s\n",
sc->sc_ifp->if_xname,
&brt->brt_addr[0], ":",
brt->brt_vlan,
obif->bif_ifp->if_xname,
bif->bif_ifp->if_xname);
}
}
if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
brt->brt_expire = time_uptime + sc->sc_brttimeout;
if (setflags)
brt->brt_flags = flags;
return (0);
}
/*
* bridge_rtlookup:
*
* Lookup the destination interface for an address.
*/
static struct ifnet *
bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
{
struct bridge_rtnode *brt;
NET_EPOCH_ASSERT();
if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
return (NULL);
return (brt->brt_ifp);
}
/*
* bridge_rttrim:
*
* Trim the routine table so that we have a number
* of routing entries less than or equal to the
* maximum number.
*/
static void
bridge_rttrim(struct bridge_softc *sc)
{
struct bridge_rtnode *brt, *nbrt;
NET_EPOCH_ASSERT();
BRIDGE_RT_LOCK_ASSERT(sc);
/* Make sure we actually need to do this. */
if (sc->sc_brtcnt <= sc->sc_brtmax)
return;
/* Force an aging cycle; this might trim enough addresses. */
bridge_rtage(sc);
if (sc->sc_brtcnt <= sc->sc_brtmax)
return;
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
bridge_rtnode_destroy(sc, brt);
if (sc->sc_brtcnt <= sc->sc_brtmax)
return;
}
}
}
/*
* bridge_timer:
*
* Aging timer for the bridge.
*/
static void
bridge_timer(void *arg)
{
struct bridge_softc *sc = arg;
BRIDGE_RT_LOCK_ASSERT(sc);
/* Destruction of rtnodes requires a proper vnet context */
CURVNET_SET(sc->sc_ifp->if_vnet);
bridge_rtage(sc);
if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
callout_reset(&sc->sc_brcallout,
bridge_rtable_prune_period * hz, bridge_timer, sc);
CURVNET_RESTORE();
}
/*
* bridge_rtage:
*
* Perform an aging cycle.
*/
static void
bridge_rtage(struct bridge_softc *sc)
{
struct bridge_rtnode *brt, *nbrt;
BRIDGE_RT_LOCK_ASSERT(sc);
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
if (time_uptime >= brt->brt_expire)
bridge_rtnode_destroy(sc, brt);
}
}
}
/*
* bridge_rtflush:
*
* Remove all dynamic addresses from the bridge.
*/
static void
bridge_rtflush(struct bridge_softc *sc, int full)
{
struct bridge_rtnode *brt, *nbrt;
BRIDGE_RT_LOCK_ASSERT(sc);
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
bridge_rtnode_destroy(sc, brt);
}
}
/*
* bridge_rtdaddr:
*
* Remove an address from the table.
*/
static int
bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
{
struct bridge_rtnode *brt;
int found = 0;
BRIDGE_RT_LOCK(sc);
/*
* If vlan is DOT1Q_VID_RSVD_IMPL then we want to delete for all vlans
* so the lookup may return more than one.
*/
while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
bridge_rtnode_destroy(sc, brt);
found = 1;
}
BRIDGE_RT_UNLOCK(sc);
return (found ? 0 : ENOENT);
}
/*
* bridge_rtdelete:
*
* Delete routes to a speicifc member interface.
*/
static void
bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
{
struct bridge_rtnode *brt, *nbrt;
BRIDGE_RT_LOCK_ASSERT(sc);
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if (brt->brt_ifp == ifp && (full ||
(brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
bridge_rtnode_destroy(sc, brt);
}
}
/*
* bridge_rtable_init:
*
* Initialize the route table for this bridge.
*/
static void
bridge_rtable_init(struct bridge_softc *sc)
{
int i;
sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
M_DEVBUF, M_WAITOK);
for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
CK_LIST_INIT(&sc->sc_rthash[i]);
sc->sc_rthash_key = arc4random();
CK_LIST_INIT(&sc->sc_rtlist);
}
/*
* bridge_rtable_fini:
*
* Deconstruct the route table for this bridge.
*/
static void
bridge_rtable_fini(struct bridge_softc *sc)
{
KASSERT(sc->sc_brtcnt == 0,
("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
free(sc->sc_rthash, M_DEVBUF);
}
/*
* The following hash function is adapted from "Hash Functions" by Bob Jenkins
* ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
*/
#define mix(a, b, c) \
do { \
a -= b; a -= c; a ^= (c >> 13); \
b -= c; b -= a; b ^= (a << 8); \
c -= a; c -= b; c ^= (b >> 13); \
a -= b; a -= c; a ^= (c >> 12); \
b -= c; b -= a; b ^= (a << 16); \
c -= a; c -= b; c ^= (b >> 5); \
a -= b; a -= c; a ^= (c >> 3); \
b -= c; b -= a; b ^= (a << 10); \
c -= a; c -= b; c ^= (b >> 15); \
} while (/*CONSTCOND*/0)
static __inline uint32_t
bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
{
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
b += addr[5] << 8;
b += addr[4];
a += addr[3] << 24;
a += addr[2] << 16;
a += addr[1] << 8;
a += addr[0];
mix(a, b, c);
return (c & BRIDGE_RTHASH_MASK);
}
#undef mix
static int
bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
{
int i, d;
for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
d = ((int)a[i]) - ((int)b[i]);
}
return (d);
}
/*
* bridge_rtnode_lookup:
*
* Look up a bridge route node for the specified destination. Compare the
* vlan id or if zero then just return the first match.
*/
static struct bridge_rtnode *
bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
{
struct bridge_rtnode *brt;
uint32_t hash;
int dir;
BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
hash = bridge_rthash(sc, addr);
CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
if (dir == 0 && (brt->brt_vlan == vlan || vlan == DOT1Q_VID_RSVD_IMPL))
return (brt);
if (dir > 0)
return (NULL);
}
return (NULL);
}
/*
* bridge_rtnode_insert:
*
* Insert the specified bridge node into the route table. We
* assume the entry is not already in the table.
*/
static int
bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
{
struct bridge_rtnode *lbrt;
uint32_t hash;
int dir;
BRIDGE_RT_LOCK_ASSERT(sc);
hash = bridge_rthash(sc, brt->brt_addr);
lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
if (lbrt == NULL) {
CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
goto out;
}
do {
dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
return (EEXIST);
if (dir > 0) {
CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
goto out;
}
if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
goto out;
}
lbrt = CK_LIST_NEXT(lbrt, brt_hash);
} while (lbrt != NULL);
#ifdef DIAGNOSTIC
panic("bridge_rtnode_insert: impossible");
#endif
out:
CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
sc->sc_brtcnt++;
return (0);
}
static void
bridge_rtnode_destroy_cb(struct epoch_context *ctx)
{
struct bridge_rtnode *brt;
brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
CURVNET_SET(brt->brt_vnet);
uma_zfree(V_bridge_rtnode_zone, brt);
CURVNET_RESTORE();
}
/*
* bridge_rtnode_destroy:
*
* Destroy a bridge rtnode.
*/
static void
bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
{
BRIDGE_RT_LOCK_ASSERT(sc);
CK_LIST_REMOVE(brt, brt_hash);
CK_LIST_REMOVE(brt, brt_list);
sc->sc_brtcnt--;
brt->brt_dst->bif_addrcnt--;
NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
}
/*
* bridge_rtable_expire:
*
* Set the expiry time for all routes on an interface.
*/
static void
bridge_rtable_expire(struct ifnet *ifp, int age)
{
struct bridge_softc *sc = ifp->if_bridge;
struct bridge_rtnode *brt;
CURVNET_SET(ifp->if_vnet);
BRIDGE_RT_LOCK(sc);
/*
* If the age is zero then flush, otherwise set all the expiry times to
* age for the interface
*/
if (age == 0)
bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
else {
CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
/* Cap the expiry time to 'age' */
if (brt->brt_ifp == ifp &&
brt->brt_expire > time_uptime + age &&
(brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
brt->brt_expire = time_uptime + age;
}
}
BRIDGE_RT_UNLOCK(sc);
CURVNET_RESTORE();
}
/*
* bridge_state_change:
*
* Callback from the bridgestp code when a port changes states.
*/
static void
bridge_state_change(struct ifnet *ifp, int state)
{
struct bridge_softc *sc = ifp->if_bridge;
static const char *stpstates[] = {
"disabled",
"listening",
"learning",
"forwarding",
"blocking",
"discarding"
};
CURVNET_SET(ifp->if_vnet);
if (V_log_stp)
log(LOG_NOTICE, "%s: state changed to %s on %s\n",
sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
CURVNET_RESTORE();
}
/*
* Send bridge packets through pfil if they are one of the types pfil can deal
* with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
* question.) If *bifp or *ifp are NULL then packet filtering is skipped for
* that interface.
*/
static int
bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
{
int snap, error, i;
struct ether_header *eh1, eh2;
struct llc llc1;
u_int16_t ether_type;
pfil_return_t rv;
#ifdef INET
struct ip *ip = NULL;
int hlen = 0;
#endif
snap = 0;
error = -1; /* Default error if not error == 0 */
#if 0
/* we may return with the IP fields swapped, ensure its not shared */
KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
#endif
if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
return (0); /* filtering is disabled */
i = min((*mp)->m_pkthdr.len, max_protohdr);
if ((*mp)->m_len < i) {
*mp = m_pullup(*mp, i);
if (*mp == NULL) {
printf("%s: m_pullup failed\n", __func__);
return (-1);
}
}
eh1 = mtod(*mp, struct ether_header *);
ether_type = ntohs(eh1->ether_type);
/*
* Check for SNAP/LLC.
*/
if (ether_type < ETHERMTU) {
struct llc *llc2 = (struct llc *)(eh1 + 1);
if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
llc2->llc_dsap == LLC_SNAP_LSAP &&
llc2->llc_ssap == LLC_SNAP_LSAP &&
llc2->llc_control == LLC_UI) {
ether_type = htons(llc2->llc_un.type_snap.ether_type);
snap = 1;
}
}
/*
* If we're trying to filter bridge traffic, only look at traffic for
* protocols available in the kernel (IPv4 and/or IPv6) to avoid
* passing traffic for an unsupported protocol to the filter. This is
* lame since if we really wanted, say, an AppleTalk filter, we are
* hosed, but of course we don't have an AppleTalk filter to begin
* with. (Note that since pfil doesn't understand ARP it will pass
* *ALL* ARP traffic.)
*/
switch (ether_type) {
#ifdef INET
case ETHERTYPE_ARP:
case ETHERTYPE_REVARP:
if (V_pfil_ipfw_arp == 0)
return (0); /* Automatically pass */
/* FALLTHROUGH */
case ETHERTYPE_IP:
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
#endif /* INET6 */
break;
default:
/*
* We get here if the packet isn't from a supported
* protocol. Check to see if the user wants to pass
* non-IP packets, these will not be checked by pfil(9)
* and passed unconditionally so the default is to
* drop.
*/
if (V_pfil_onlyip)
goto bad;
}
/* Run the packet through pfil before stripping link headers */
if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
dir == PFIL_OUT && ifp != NULL) {
switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
case PFIL_DROPPED:
return (EACCES);
case PFIL_CONSUMED:
return (0);
}
}
/* Strip off the Ethernet header and keep a copy. */
m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
m_adj(*mp, ETHER_HDR_LEN);
/* Strip off snap header, if present */
if (snap) {
m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
m_adj(*mp, sizeof(struct llc));
}
/*
* Check the IP header for alignment and errors
*/
if (dir == PFIL_IN) {
switch (ether_type) {
#ifdef INET
case ETHERTYPE_IP:
error = bridge_ip_checkbasic(mp);
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
error = bridge_ip6_checkbasic(mp);
break;
#endif /* INET6 */
default:
error = 0;
}
if (error)
goto bad;
}
error = 0;
/*
* Run the packet through pfil
*/
rv = PFIL_PASS;
switch (ether_type) {
#ifdef INET
case ETHERTYPE_IP:
/*
* Run pfil on the member interface and the bridge, both can
* be skipped by clearing pfil_member or pfil_bridge.
*
* Keep the order:
* in_if -> bridge_if -> out_if
*/
if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
PFIL_PASS)
break;
if (V_pfil_member && ifp != NULL) {
rv = (dir == PFIL_OUT) ?
pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
if (rv != PFIL_PASS)
break;
}
if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
PFIL_PASS)
break;
/* check if we need to fragment the packet */
/* bridge_fragment generates a mbuf chain of packets */
/* that already include eth headers */
if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
i = (*mp)->m_pkthdr.len;
if (i > ifp->if_mtu) {
error = bridge_fragment(ifp, mp, &eh2, snap,
&llc1);
return (error);
}
}
/* Recalculate the ip checksum. */
ip = mtod(*mp, struct ip *);
hlen = ip->ip_hl << 2;
if (hlen < sizeof(struct ip))
goto bad;
if (hlen > (*mp)->m_len) {
if ((*mp = m_pullup(*mp, hlen)) == NULL)
goto bad;
ip = mtod(*mp, struct ip *);
if (ip == NULL)
goto bad;
}
ip->ip_sum = 0;
if (hlen == sizeof(struct ip))
ip->ip_sum = in_cksum_hdr(ip);
else
ip->ip_sum = in_cksum(*mp, hlen);
break;
#endif /* INET */
#ifdef INET6
case ETHERTYPE_IPV6:
if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
PFIL_PASS)
break;
if (V_pfil_member && ifp != NULL) {
rv = (dir == PFIL_OUT) ?
pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
if (rv != PFIL_PASS)
break;
}
if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
PFIL_PASS)
break;
break;
#endif
}
switch (rv) {
case PFIL_CONSUMED:
return (0);
case PFIL_DROPPED:
return (EACCES);
default:
break;
}
error = -1;
/*
* Finally, put everything back the way it was and return
*/
if (snap) {
M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
if (*mp == NULL)
return (error);
bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
}
M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
if (*mp == NULL)
return (error);
bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
return (0);
bad:
m_freem(*mp);
*mp = NULL;
return (error);
}
#ifdef INET
/*
* Perform basic checks on header size since
* pfil assumes ip_input has already processed
* it for it. Cut-and-pasted from ip_input.c.
* Given how simple the IPv6 version is,
* does the IPv4 version really need to be
* this complicated?
*
* XXX Should we update ipstat here, or not?
* XXX Right now we update ipstat but not
* XXX csum_counter.
*/
static int
bridge_ip_checkbasic(struct mbuf **mp)
{
struct mbuf *m = *mp;
struct ip *ip;
int len, hlen;
u_short sum;
if (*mp == NULL)
return (-1);
if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
if ((m = m_copyup(m, sizeof(struct ip),
(max_linkhdr + 3) & ~3)) == NULL) {
/* XXXJRT new stat, please */
KMOD_IPSTAT_INC(ips_toosmall);
goto bad;
}
} else if (__predict_false(m->m_len < sizeof (struct ip))) {
if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
KMOD_IPSTAT_INC(ips_toosmall);
goto bad;
}
}
ip = mtod(m, struct ip *);
if (ip == NULL) goto bad;
if (ip->ip_v != IPVERSION) {
KMOD_IPSTAT_INC(ips_badvers);
goto bad;
}
hlen = ip->ip_hl << 2;
if (hlen < sizeof(struct ip)) { /* minimum header length */
KMOD_IPSTAT_INC(ips_badhlen);
goto bad;
}
if (hlen > m->m_len) {
if ((m = m_pullup(m, hlen)) == NULL) {
KMOD_IPSTAT_INC(ips_badhlen);
goto bad;
}
ip = mtod(m, struct ip *);
if (ip == NULL) goto bad;
}
if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
} else {
if (hlen == sizeof(struct ip)) {
sum = in_cksum_hdr(ip);
} else {
sum = in_cksum(m, hlen);
}
}
if (sum) {
KMOD_IPSTAT_INC(ips_badsum);
goto bad;
}
/* Retrieve the packet length. */
len = ntohs(ip->ip_len);
/*
* Check for additional length bogosity
*/
if (len < hlen) {
KMOD_IPSTAT_INC(ips_badlen);
goto bad;
}
/*
* Check that the amount of data in the buffers
* is as at least much as the IP header would have us expect.
* Drop packet if shorter than we expect.
*/
if (m->m_pkthdr.len < len) {
KMOD_IPSTAT_INC(ips_tooshort);
goto bad;
}
/* Checks out, proceed */
*mp = m;
return (0);
bad:
*mp = m;
return (-1);
}
#endif /* INET */
#ifdef INET6
/*
* Same as above, but for IPv6.
* Cut-and-pasted from ip6_input.c.
* XXX Should we update ip6stat, or not?
*/
static int
bridge_ip6_checkbasic(struct mbuf **mp)
{
struct mbuf *m = *mp;
struct ip6_hdr *ip6;
/*
* If the IPv6 header is not aligned, slurp it up into a new
* mbuf with space for link headers, in the event we forward
* it. Otherwise, if it is aligned, make sure the entire base
* IPv6 header is in the first mbuf of the chain.
*/
if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
struct ifnet *inifp = m->m_pkthdr.rcvif;
if ((m = m_copyup(m, sizeof(struct ip6_hdr),
(max_linkhdr + 3) & ~3)) == NULL) {
/* XXXJRT new stat, please */
IP6STAT_INC(ip6s_toosmall);
in6_ifstat_inc(inifp, ifs6_in_hdrerr);
goto bad;
}
} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
struct ifnet *inifp = m->m_pkthdr.rcvif;
if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
IP6STAT_INC(ip6s_toosmall);
in6_ifstat_inc(inifp, ifs6_in_hdrerr);
goto bad;
}
}
ip6 = mtod(m, struct ip6_hdr *);
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
IP6STAT_INC(ip6s_badvers);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
goto bad;
}
/* Checks out, proceed */
*mp = m;
return (0);
bad:
*mp = m;
return (-1);
}
#endif /* INET6 */
#ifdef INET
/*
* bridge_fragment:
*
* Fragment mbuf chain in multiple packets and prepend ethernet header.
*/
static int
bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
int snap, struct llc *llc)
{
struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
struct ip *ip;
int error = -1;
if (m->m_len < sizeof(struct ip) &&
(m = m_pullup(m, sizeof(struct ip))) == NULL)
goto dropit;
ip = mtod(m, struct ip *);
m->m_pkthdr.csum_flags |= CSUM_IP;
error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
if (error)
goto dropit;
/*
* Walk the chain and re-add the Ethernet header for
* each mbuf packet.
*/
for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
nextpkt = mcur->m_nextpkt;
mcur->m_nextpkt = NULL;
if (snap) {
M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
if (mcur == NULL) {
error = ENOBUFS;
if (mprev != NULL)
mprev->m_nextpkt = nextpkt;
goto dropit;
}
bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
}
M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
if (mcur == NULL) {
error = ENOBUFS;
if (mprev != NULL)
mprev->m_nextpkt = nextpkt;
goto dropit;
}
bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
/*
* The previous two M_PREPEND could have inserted one or two
* mbufs in front so we have to update the previous packet's
* m_nextpkt.
*/
mcur->m_nextpkt = nextpkt;
if (mprev != NULL)
mprev->m_nextpkt = mcur;
else {
/* The first mbuf in the original chain needs to be
* updated. */
*mp = mcur;
}
mprev = mcur;
}
KMOD_IPSTAT_INC(ips_fragmented);
return (error);
dropit:
for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
m = mcur->m_nextpkt;
m_freem(mcur);
}
return (error);
}
#endif /* INET */
static void
bridge_linkstate(struct ifnet *ifp)
{
struct bridge_softc *sc = ifp->if_bridge;
struct bridge_iflist *bif;
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
bif = bridge_lookup_member_if(sc, ifp);
if (bif == NULL) {
NET_EPOCH_EXIT(et);
return;
}
bridge_linkcheck(sc);
bstp_linkstate(&bif->bif_stp);
NET_EPOCH_EXIT(et);
}
static void
bridge_linkcheck(struct bridge_softc *sc)
{
struct bridge_iflist *bif;
int new_link, hasls;
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
new_link = LINK_STATE_DOWN;
hasls = 0;
/* Our link is considered up if at least one of our ports is active */
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
hasls++;
if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
new_link = LINK_STATE_UP;
break;
}
}
if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
/* If no interfaces support link-state then we default to up */
new_link = LINK_STATE_UP;
}
if_link_state_change(sc->sc_ifp, new_link);
}
diff --git a/sys/net/if_disc.c b/sys/net/if_disc.c
index 9740a25f35c6..bafa676c941c 100644
--- a/sys/net/if_disc.c
+++ b/sys/net/if_disc.c
@@ -1,244 +1,239 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1982, 1986, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Discard interface driver for protocol testing and timing.
* (Based on the loopback.)
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_clone.h>
#include <net/if_types.h>
#include <net/route.h>
#include <net/bpf.h>
#include <net/vnet.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#ifdef TINY_DSMTU
#define DSMTU (1024+512)
#else
#define DSMTU 65532
#endif
struct disc_softc {
struct ifnet *sc_ifp;
};
static int discoutput(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
static int discioctl(struct ifnet *, u_long, caddr_t);
static int disc_clone_create(struct if_clone *, int, caddr_t);
static void disc_clone_destroy(struct ifnet *);
static const char discname[] = "disc";
static MALLOC_DEFINE(M_DISC, discname, "Discard interface");
VNET_DEFINE_STATIC(struct if_clone *, disc_cloner);
#define V_disc_cloner VNET(disc_cloner)
static int
disc_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct ifnet *ifp;
struct disc_softc *sc;
sc = malloc(sizeof(struct disc_softc), M_DISC, M_WAITOK | M_ZERO);
ifp = sc->sc_ifp = if_alloc(IFT_LOOP);
- if (ifp == NULL) {
- free(sc, M_DISC);
- return (ENOSPC);
- }
-
ifp->if_softc = sc;
if_initname(ifp, discname, unit);
ifp->if_mtu = DSMTU;
/*
* IFF_LOOPBACK should not be removed from disc's flags because
* it controls what PF-specific routes are magically added when
* a network address is assigned to the interface. Things just
* won't work as intended w/o such routes because the output
* interface selection for a packet is totally route-driven.
* A valid alternative to IFF_LOOPBACK can be IFF_BROADCAST or
* IFF_POINTOPOINT, but it would result in different properties
* of the interface.
*/
ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
ifp->if_drv_flags = IFF_DRV_RUNNING;
ifp->if_ioctl = discioctl;
ifp->if_output = discoutput;
ifp->if_hdrlen = 0;
ifp->if_addrlen = 0;
ifp->if_snd.ifq_maxlen = 20;
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
return (0);
}
static void
disc_clone_destroy(struct ifnet *ifp)
{
struct disc_softc *sc;
sc = ifp->if_softc;
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
free(sc, M_DISC);
}
static void
vnet_disc_init(const void *unused __unused)
{
V_disc_cloner = if_clone_simple(discname, disc_clone_create,
disc_clone_destroy, 0);
}
VNET_SYSINIT(vnet_disc_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
vnet_disc_init, NULL);
static void
vnet_disc_uninit(const void *unused __unused)
{
if_clone_detach(V_disc_cloner);
}
VNET_SYSUNINIT(vnet_disc_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_disc_uninit, NULL);
static int
disc_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
case MOD_UNLOAD:
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t disc_mod = {
"if_disc",
disc_modevent,
NULL
};
DECLARE_MODULE(if_disc, disc_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
static int
discoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
u_int32_t af;
M_ASSERTPKTHDR(m);
/* BPF writes need to be handled specially. */
if (dst->sa_family == AF_UNSPEC || dst->sa_family == pseudo_AF_HDRCMPLT)
bcopy(dst->sa_data, &af, sizeof(af));
else
af = RO_GET_FAMILY(ro, dst);
if (bpf_peers_present(ifp->if_bpf))
bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
m->m_pkthdr.rcvif = ifp;
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
m_freem(m);
return (0);
}
/*
* Process an ioctl request.
*/
static int
discioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifreq *ifr = (struct ifreq *)data;
int error = 0;
switch (cmd) {
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
/*
* Everything else is done at a higher level.
*/
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifr == NULL) {
error = EAFNOSUPPORT; /* XXX */
break;
}
switch (ifr->ifr_addr.sa_family) {
#ifdef INET
case AF_INET:
break;
#endif
#ifdef INET6
case AF_INET6:
break;
#endif
default:
error = EAFNOSUPPORT;
break;
}
break;
case SIOCSIFMTU:
ifp->if_mtu = ifr->ifr_mtu;
break;
default:
error = EINVAL;
}
return (error);
}
diff --git a/sys/net/if_edsc.c b/sys/net/if_edsc.c
index 5a4dfb3dbed2..d5525f279034 100644
--- a/sys/net/if_edsc.c
+++ b/sys/net/if_edsc.c
@@ -1,372 +1,367 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1982, 1986, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following edsclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following edsclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE EDSCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Discard interface driver for protocol testing and timing.
* Mimics an Ethernet device so that VLANs can be attached to it etc.
*/
#include <sys/param.h> /* types, important constants */
#include <sys/kernel.h> /* SYSINIT for load-time initializations */
#include <sys/malloc.h> /* malloc(9) */
#include <sys/module.h> /* module(9) */
#include <sys/mbuf.h> /* mbuf(9) */
#include <sys/socket.h> /* struct ifreq */
#include <sys/sockio.h> /* socket ioctl's */
/* #include <sys/systm.h> if you need printf(9) or other all-purpose globals */
#include <net/bpf.h> /* bpf(9) */
#include <net/ethernet.h> /* Ethernet related constants and types */
#include <net/if.h>
#include <net/if_var.h> /* basic part of ifnet(9) */
#include <net/if_private.h>
#include <net/if_clone.h> /* network interface cloning */
#include <net/if_types.h> /* IFT_ETHER and friends */
#include <net/vnet.h>
static const char edscname[] = "edsc";
/*
* Software configuration of an interface specific to this device type.
*/
struct edsc_softc {
struct ifnet *sc_ifp; /* ptr to generic interface configuration */
/*
* A non-null driver can keep various things here, for instance,
* the hardware revision, cached values of write-only registers, etc.
*/
};
/*
* Attach to the interface cloning framework.
*/
VNET_DEFINE_STATIC(struct if_clone *, edsc_cloner);
#define V_edsc_cloner VNET(edsc_cloner)
static int edsc_clone_create(struct if_clone *, int, caddr_t);
static void edsc_clone_destroy(struct ifnet *);
/*
* Interface driver methods.
*/
static void edsc_init(void *dummy);
/* static void edsc_input(struct ifnet *ifp, struct mbuf *m); would be here */
static int edsc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
static void edsc_start(struct ifnet *ifp);
/*
* We'll allocate softc instances from this.
*/
static MALLOC_DEFINE(M_EDSC, edscname, "Ethernet discard interface");
/*
* Create an interface instance.
*/
static int
edsc_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct edsc_softc *sc;
struct ifnet *ifp;
struct ether_addr eaddr;
/*
* Allocate soft and ifnet structures. Link each to the other.
*/
sc = malloc(sizeof(struct edsc_softc), M_EDSC, M_WAITOK | M_ZERO);
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- free(sc, M_EDSC);
- return (ENOSPC);
- }
-
ifp->if_softc = sc;
/*
* Get a name for this particular interface in its ifnet structure.
*/
if_initname(ifp, edscname, unit);
/*
* Typical Ethernet interface flags: we can do broadcast and
* multicast but can't hear our own broadcasts or multicasts.
*/
ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
/*
* We can pretent we have the whole set of hardware features
* because we just discard all packets we get from the upper layer.
* However, the features are disabled initially. They can be
* enabled via edsc_ioctl() when needed.
*/
ifp->if_capabilities =
IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM |
IFCAP_HWCSUM | IFCAP_TSO |
IFCAP_JUMBO_MTU;
ifp->if_capenable = 0;
/*
* Set the interface driver methods.
*/
ifp->if_init = edsc_init;
/* ifp->if_input = edsc_input; */
ifp->if_ioctl = edsc_ioctl;
ifp->if_start = edsc_start;
/*
* Set the maximum output queue length from the global parameter.
*/
ifp->if_snd.ifq_maxlen = ifqmaxlen;
/*
* Generate an arbitrary MAC address for the cloned interface.
*/
ether_gen_addr(ifp, &eaddr);
/*
* Do ifnet initializations common to all Ethernet drivers
* and attach to the network interface framework.
*/
ether_ifattach(ifp, eaddr.octet);
/*
* Now we can mark the interface as running, i.e., ready
* for operation.
*/
ifp->if_drv_flags |= IFF_DRV_RUNNING;
return (0);
}
/*
* Destroy an interface instance.
*/
static void
edsc_clone_destroy(struct ifnet *ifp)
{
struct edsc_softc *sc = ifp->if_softc;
/*
* Detach from the network interface framework.
*/
ether_ifdetach(ifp);
/*
* Free memory occupied by ifnet and softc.
*/
if_free(ifp);
free(sc, M_EDSC);
}
/*
* This method is invoked from ether_ioctl() when it's time
* to bring up the hardware.
*/
static void
edsc_init(void *dummy)
{
#if 0 /* what a hardware driver would do here... */
struct edsc_soft *sc = (struct edsc_softc *)dummy;
struct ifnet *ifp = sc->sc_ifp;
/* blah-blah-blah */
#endif
}
/*
* Network interfaces are controlled via the ioctl(2) syscall.
*/
static int
edsc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifreq *ifr = (struct ifreq *)data;
switch (cmd) {
case SIOCSIFCAP:
#if 1
/*
* Just turn on any capabilities requested.
* The generic ifioctl() function has already made sure
* that they are supported, i.e., set in if_capabilities.
*/
ifp->if_capenable = ifr->ifr_reqcap;
#else
/*
* A h/w driver would need to analyze the requested
* bits and program the hardware, e.g.:
*/
mask = ifp->if_capenable ^ ifr->ifr_reqcap;
if (mask & IFCAP_VLAN_HWTAGGING) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
/* blah-blah-blah */
else
/* etc-etc-etc */
}
#endif
break;
default:
/*
* Offload the rest onto the common Ethernet handler.
*/
return (ether_ioctl(ifp, cmd, data));
}
return (0);
}
/*
* Process the output queue.
*/
static void
edsc_start(struct ifnet *ifp)
{
struct mbuf *m;
/*
* A hardware interface driver can set IFF_DRV_OACTIVE
* in ifp->if_drv_flags:
*
* ifp->if_drv_flags |= IFF_DRV_OACTIVE;
*
* to prevent if_start from being invoked again while the
* transmission is under way. The flag is to protect the
* device's transmitter, not the method itself. The output
* queue is locked and several threads can process it in
* parallel safely, so the driver can use other means to
* serialize access to the transmitter.
*
* If using IFF_DRV_OACTIVE, the driver should clear the flag
* not earlier than the current transmission is complete, e.g.,
* upon an interrupt from the device, not just before returning
* from if_start. This method merely starts the transmission,
* which may proceed asynchronously.
*/
/*
* We loop getting packets from the queue until it's empty.
* A h/w driver would loop until the device can accept more
* data into its buffer, or while there are free transmit
* descriptors, or whatever.
*/
for (;;) {
/*
* Try to dequeue one packet. Stop if the queue is empty.
* Use IF_DEQUEUE() here if ALTQ(9) support is unneeded.
*/
IFQ_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
break;
/*
* Let bpf(9) at the packet.
*/
BPF_MTAP(ifp, m);
/*
* Update the interface counters.
*/
if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
/*
* Finally, just drop the packet.
* TODO: Reply to ARP requests unless IFF_NOARP is set.
*/
m_freem(m);
}
/*
* ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
* would be here only if the transmission were synchronous.
*/
}
static void
vnet_edsc_init(const void *unused __unused)
{
/*
* Connect to the network interface cloning framework.
* The last argument is the number of units to be created
* from the outset. It's also the minimum number of units
* allowed. We don't want any units created as soon as the
* driver is loaded.
*/
V_edsc_cloner = if_clone_simple(edscname, edsc_clone_create,
edsc_clone_destroy, 0);
}
VNET_SYSINIT(vnet_edsc_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
vnet_edsc_init, NULL);
static void
vnet_edsc_uninit(const void *unused __unused)
{
/*
* Disconnect from the cloning framework.
* Existing interfaces will be disposed of properly.
*/
if_clone_detach(V_edsc_cloner);
}
VNET_SYSUNINIT(vnet_edsc_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_edsc_uninit, NULL);
/*
* This function provides handlers for module events, namely load and unload.
*/
static int
edsc_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
case MOD_UNLOAD:
break;
default:
/*
* There are other event types, but we don't handle them.
* See module(9).
*/
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t edsc_mod = {
"if_edsc", /* name */
edsc_modevent, /* event handler */
NULL /* additional data */
};
DECLARE_MODULE(if_edsc, edsc_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
diff --git a/sys/net/if_enc.c b/sys/net/if_enc.c
index eaac0a843189..18587dc1cb46 100644
--- a/sys/net/if_enc.c
+++ b/sys/net/if_enc.c
@@ -1,452 +1,448 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2006 The FreeBSD Project.
* Copyright (c) 2015 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_ipsec.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/hhook.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_enc.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_clone.h>
#include <net/if_types.h>
#include <net/pfil.h>
#include <net/route.h>
#include <net/netisr.h>
#include <net/bpf.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/in_var.h>
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#endif
#include <netipsec/ipsec.h>
#include <netipsec/xform.h>
#define ENCMTU (1024+512)
/* XXX this define must have the same value as in OpenBSD */
#define M_CONF 0x0400 /* payload was encrypted (ESP-transport) */
#define M_AUTH 0x0800 /* payload was authenticated (AH or ESP auth) */
#define M_AUTH_AH 0x2000 /* header was authenticated (AH) */
struct enchdr {
u_int32_t af;
u_int32_t spi;
u_int32_t flags;
};
struct enc_softc {
struct ifnet *sc_ifp;
};
VNET_DEFINE_STATIC(struct enc_softc *, enc_sc);
#define V_enc_sc VNET(enc_sc)
VNET_DEFINE_STATIC(struct if_clone *, enc_cloner);
#define V_enc_cloner VNET(enc_cloner)
static int enc_ioctl(struct ifnet *, u_long, caddr_t);
static int enc_output(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
static int enc_clone_create(struct if_clone *, int, caddr_t);
static void enc_clone_destroy(struct ifnet *);
static int enc_add_hhooks(struct enc_softc *);
static void enc_remove_hhooks(struct enc_softc *);
static const char encname[] = "enc";
#define IPSEC_ENC_AFTER_PFIL 0x04
/*
* Before and after are relative to when we are stripping the
* outer IP header.
*
* AFTER_PFIL flag used only for bpf_mask_*. It enables BPF capturing
* after PFIL hook execution. It might be useful when PFIL hook does
* some changes to the packet, e.g. address translation. If PFIL hook
* consumes mbuf, nothing will be captured.
*/
VNET_DEFINE_STATIC(int, filter_mask_in) = IPSEC_ENC_BEFORE;
VNET_DEFINE_STATIC(int, bpf_mask_in) = IPSEC_ENC_BEFORE;
VNET_DEFINE_STATIC(int, filter_mask_out) = IPSEC_ENC_BEFORE;
VNET_DEFINE_STATIC(int, bpf_mask_out) = IPSEC_ENC_BEFORE | IPSEC_ENC_AFTER;
#define V_filter_mask_in VNET(filter_mask_in)
#define V_bpf_mask_in VNET(bpf_mask_in)
#define V_filter_mask_out VNET(filter_mask_out)
#define V_bpf_mask_out VNET(bpf_mask_out)
static SYSCTL_NODE(_net, OID_AUTO, enc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"enc sysctl");
static SYSCTL_NODE(_net_enc, OID_AUTO, in, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"enc input sysctl");
static SYSCTL_NODE(_net_enc, OID_AUTO, out, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"enc output sysctl");
SYSCTL_INT(_net_enc_in, OID_AUTO, ipsec_filter_mask,
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(filter_mask_in), 0,
"IPsec input firewall filter mask");
SYSCTL_INT(_net_enc_in, OID_AUTO, ipsec_bpf_mask,
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(bpf_mask_in), 0,
"IPsec input bpf mask");
SYSCTL_INT(_net_enc_out, OID_AUTO, ipsec_filter_mask,
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(filter_mask_out), 0,
"IPsec output firewall filter mask");
SYSCTL_INT(_net_enc_out, OID_AUTO, ipsec_bpf_mask,
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(bpf_mask_out), 0,
"IPsec output bpf mask");
static void
enc_clone_destroy(struct ifnet *ifp)
{
struct enc_softc *sc;
sc = ifp->if_softc;
KASSERT(sc == V_enc_sc, ("sc != ifp->if_softc"));
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
free(sc, M_DEVBUF);
V_enc_sc = NULL;
}
static int
enc_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct ifnet *ifp;
struct enc_softc *sc;
sc = malloc(sizeof(struct enc_softc), M_DEVBUF,
M_WAITOK | M_ZERO);
ifp = sc->sc_ifp = if_alloc(IFT_ENC);
- if (ifp == NULL) {
- free(sc, M_DEVBUF);
- return (ENOSPC);
- }
if (V_enc_sc != NULL) {
if_free(ifp);
free(sc, M_DEVBUF);
return (EEXIST);
}
V_enc_sc = sc;
if_initname(ifp, encname, unit);
ifp->if_mtu = ENCMTU;
ifp->if_ioctl = enc_ioctl;
ifp->if_output = enc_output;
ifp->if_softc = sc;
if_attach(ifp);
bpfattach(ifp, DLT_ENC, sizeof(struct enchdr));
return (0);
}
static int
enc_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
m_freem(m);
return (0);
}
static int
enc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
if (cmd != SIOCSIFFLAGS)
return (EINVAL);
if (ifp->if_flags & IFF_UP)
ifp->if_drv_flags |= IFF_DRV_RUNNING;
else
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
return (0);
}
static void
enc_bpftap(struct ifnet *ifp, struct mbuf *m, const struct secasvar *sav,
int32_t hhook_type, uint8_t enc, uint8_t af)
{
struct enchdr hdr;
if (hhook_type == HHOOK_TYPE_IPSEC_IN &&
(enc & V_bpf_mask_in) == 0)
return;
else if (hhook_type == HHOOK_TYPE_IPSEC_OUT &&
(enc & V_bpf_mask_out) == 0)
return;
if (!bpf_peers_present(ifp->if_bpf))
return;
hdr.af = af;
hdr.spi = sav->spi;
hdr.flags = 0;
if (sav->alg_enc != SADB_EALG_NONE)
hdr.flags |= M_CONF;
if (sav->alg_auth != SADB_AALG_NONE)
hdr.flags |= M_AUTH;
bpf_mtap2(ifp->if_bpf, &hdr, sizeof(hdr), m);
}
/*
* One helper hook function is used by any hook points.
* + from hhook_type we can determine the packet direction:
* HHOOK_TYPE_IPSEC_IN or HHOOK_TYPE_IPSEC_OUT;
* + from hhook_id we can determine address family: AF_INET or AF_INET6;
* + udata contains pointer to enc_softc;
* + ctx_data contains pointer to struct ipsec_ctx_data.
*/
static int
enc_hhook(int32_t hhook_type, int32_t hhook_id, void *udata, void *ctx_data,
void *hdata, struct osd *hosd)
{
struct ipsec_ctx_data *ctx;
struct enc_softc *sc;
struct ifnet *ifp, *rcvif;
struct pfil_head *ph;
int pdir, ret;
sc = (struct enc_softc *)udata;
ifp = sc->sc_ifp;
if ((ifp->if_flags & IFF_UP) == 0)
return (0);
ctx = (struct ipsec_ctx_data *)ctx_data;
/* XXX: wrong hook point was used by caller? */
if (ctx->af != hhook_id)
return (EPFNOSUPPORT);
enc_bpftap(ifp, *ctx->mp, ctx->sav, hhook_type, ctx->enc, ctx->af);
switch (hhook_type) {
case HHOOK_TYPE_IPSEC_IN:
if (ctx->enc == IPSEC_ENC_BEFORE) {
/* Do accounting only once */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_IBYTES,
(*ctx->mp)->m_pkthdr.len);
}
if ((ctx->enc & V_filter_mask_in) == 0)
return (0); /* skip pfil processing */
pdir = PFIL_IN;
break;
case HHOOK_TYPE_IPSEC_OUT:
if (ctx->enc == IPSEC_ENC_BEFORE) {
/* Do accounting only once */
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES,
(*ctx->mp)->m_pkthdr.len);
}
if ((ctx->enc & V_filter_mask_out) == 0)
return (0); /* skip pfil processing */
pdir = PFIL_OUT;
break;
default:
return (EINVAL);
}
switch (hhook_id) {
#ifdef INET
case AF_INET:
ph = V_inet_pfil_head;
break;
#endif
#ifdef INET6
case AF_INET6:
ph = V_inet6_pfil_head;
break;
#endif
default:
ph = NULL;
}
if (ph == NULL || (pdir == PFIL_OUT && !PFIL_HOOKED_OUT(ph)) ||
(pdir == PFIL_IN && !PFIL_HOOKED_IN(ph)))
return (0);
/* Make a packet looks like it was received on enc(4) */
rcvif = (*ctx->mp)->m_pkthdr.rcvif;
(*ctx->mp)->m_pkthdr.rcvif = ifp;
if (pdir == PFIL_IN)
ret = pfil_mbuf_in(ph, ctx->mp, ifp, ctx->inp);
else
ret = pfil_mbuf_out(ph, ctx->mp, ifp, ctx->inp);
if (ret != PFIL_PASS) {
*ctx->mp = NULL; /* consumed by filter */
return (EACCES);
}
(*ctx->mp)->m_pkthdr.rcvif = rcvif;
enc_bpftap(ifp, *ctx->mp, ctx->sav, hhook_type,
IPSEC_ENC_AFTER_PFIL, ctx->af);
return (0);
}
static int
enc_add_hhooks(struct enc_softc *sc)
{
struct hookinfo hki;
int error;
error = EPFNOSUPPORT;
hki.hook_func = enc_hhook;
hki.hook_helper = NULL;
hki.hook_udata = sc;
#ifdef INET
hki.hook_id = AF_INET;
hki.hook_type = HHOOK_TYPE_IPSEC_IN;
error = hhook_add_hook(V_ipsec_hhh_in[HHOOK_IPSEC_INET],
&hki, HHOOK_WAITOK);
if (error != 0)
return (error);
hki.hook_type = HHOOK_TYPE_IPSEC_OUT;
error = hhook_add_hook(V_ipsec_hhh_out[HHOOK_IPSEC_INET],
&hki, HHOOK_WAITOK);
if (error != 0)
return (error);
#endif
#ifdef INET6
hki.hook_id = AF_INET6;
hki.hook_type = HHOOK_TYPE_IPSEC_IN;
error = hhook_add_hook(V_ipsec_hhh_in[HHOOK_IPSEC_INET6],
&hki, HHOOK_WAITOK);
if (error != 0)
return (error);
hki.hook_type = HHOOK_TYPE_IPSEC_OUT;
error = hhook_add_hook(V_ipsec_hhh_out[HHOOK_IPSEC_INET6],
&hki, HHOOK_WAITOK);
if (error != 0)
return (error);
#endif
return (error);
}
static void
enc_remove_hhooks(struct enc_softc *sc)
{
struct hookinfo hki;
hki.hook_func = enc_hhook;
hki.hook_helper = NULL;
hki.hook_udata = sc;
#ifdef INET
hki.hook_id = AF_INET;
hki.hook_type = HHOOK_TYPE_IPSEC_IN;
hhook_remove_hook(V_ipsec_hhh_in[HHOOK_IPSEC_INET], &hki);
hki.hook_type = HHOOK_TYPE_IPSEC_OUT;
hhook_remove_hook(V_ipsec_hhh_out[HHOOK_IPSEC_INET], &hki);
#endif
#ifdef INET6
hki.hook_id = AF_INET6;
hki.hook_type = HHOOK_TYPE_IPSEC_IN;
hhook_remove_hook(V_ipsec_hhh_in[HHOOK_IPSEC_INET6], &hki);
hki.hook_type = HHOOK_TYPE_IPSEC_OUT;
hhook_remove_hook(V_ipsec_hhh_out[HHOOK_IPSEC_INET6], &hki);
#endif
}
static void
vnet_enc_init(const void *unused __unused)
{
V_enc_sc = NULL;
V_enc_cloner = if_clone_simple(encname, enc_clone_create,
enc_clone_destroy, 1);
}
VNET_SYSINIT(vnet_enc_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
vnet_enc_init, NULL);
static void
vnet_enc_init_proto(void *unused __unused)
{
KASSERT(V_enc_sc != NULL, ("%s: V_enc_sc is %p\n", __func__, V_enc_sc));
if (enc_add_hhooks(V_enc_sc) != 0)
enc_clone_destroy(V_enc_sc->sc_ifp);
}
VNET_SYSINIT(vnet_enc_init_proto, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
vnet_enc_init_proto, NULL);
static void
vnet_enc_uninit(const void *unused __unused)
{
KASSERT(V_enc_sc != NULL, ("%s: V_enc_sc is %p\n", __func__, V_enc_sc));
if_clone_detach(V_enc_cloner);
}
VNET_SYSUNINIT(vnet_enc_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_enc_uninit, NULL);
/*
* The hhook consumer needs to go before ip[6]_destroy are called on
* SI_ORDER_THIRD.
*/
static void
vnet_enc_uninit_hhook(const void *unused __unused)
{
KASSERT(V_enc_sc != NULL, ("%s: V_enc_sc is %p\n", __func__, V_enc_sc));
enc_remove_hhooks(V_enc_sc);
}
VNET_SYSUNINIT(vnet_enc_uninit_hhook, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH,
vnet_enc_uninit_hhook, NULL);
static int
enc_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
case MOD_UNLOAD:
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t enc_mod = {
"if_enc",
enc_modevent,
0
};
DECLARE_MODULE(if_enc, enc_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_enc, 1);
diff --git a/sys/net/if_epair.c b/sys/net/if_epair.c
index 7bc218321f01..7051e31565d4 100644
--- a/sys/net/if_epair.c
+++ b/sys/net/if_epair.c
@@ -1,942 +1,931 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 The FreeBSD Foundation
* Copyright (c) 2009-2021 Bjoern A. Zeeb <bz@FreeBSD.org>
*
* This software was developed by CK Software GmbH under sponsorship
* from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* A pair of virtual back-to-back connected ethernet like interfaces
* (``two interfaces with a virtual cross-over cable'').
*
* This is mostly intended to be used to provide connectivity between
* different virtual network stack instances.
*/
#include <sys/cdefs.h>
#include "opt_rss.h"
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/hash.h>
#include <sys/interrupt.h>
#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/libkern.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_media.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/netisr.h>
#ifdef RSS
#include <net/rss_config.h>
#ifdef INET
#include <netinet/in_rss.h>
#endif
#ifdef INET6
#include <netinet6/in6_rss.h>
#endif
#endif
#include <net/vnet.h>
static const char epairname[] = "epair";
#define RXRSIZE 4096 /* Probably overkill by 4-8x. */
static MALLOC_DEFINE(M_EPAIR, epairname,
"Pair of virtual cross-over connected Ethernet-like interfaces");
VNET_DEFINE_STATIC(struct if_clone *, epair_cloner);
#define V_epair_cloner VNET(epair_cloner)
static unsigned int next_index = 0;
#define EPAIR_LOCK_INIT() mtx_init(&epair_n_index_mtx, "epairidx", \
NULL, MTX_DEF)
#define EPAIR_LOCK_DESTROY() mtx_destroy(&epair_n_index_mtx)
#define EPAIR_LOCK() mtx_lock(&epair_n_index_mtx)
#define EPAIR_UNLOCK() mtx_unlock(&epair_n_index_mtx)
struct epair_softc;
struct epair_queue {
struct mtx mtx;
struct mbufq q;
int id;
enum {
EPAIR_QUEUE_IDLE,
EPAIR_QUEUE_WAKING,
EPAIR_QUEUE_RUNNING,
} state;
struct task tx_task;
struct epair_softc *sc;
};
static struct mtx epair_n_index_mtx;
struct epair_softc {
struct ifnet *ifp; /* This ifp. */
struct ifnet *oifp; /* other ifp of pair. */
int num_queues;
struct epair_queue *queues;
struct ifmedia media; /* Media config (fake). */
STAILQ_ENTRY(epair_softc) entry;
};
struct epair_tasks_t {
int tasks;
struct taskqueue *tq[MAXCPU];
};
static struct epair_tasks_t epair_tasks;
static void
epair_clear_mbuf(struct mbuf *m)
{
M_ASSERTPKTHDR(m);
/* Remove any CSUM_SND_TAG as ether_input will barf. */
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
m_snd_tag_rele(m->m_pkthdr.snd_tag);
m->m_pkthdr.snd_tag = NULL;
m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
}
/* Clear vlan information. */
m->m_flags &= ~M_VLANTAG;
m->m_pkthdr.ether_vtag = 0;
m_tag_delete_nonpersistent(m);
}
static void
epair_tx_start_deferred(void *arg, int pending)
{
struct epair_queue *q = (struct epair_queue *)arg;
if_t ifp;
struct mbuf *m, *n;
bool resched;
ifp = q->sc->ifp;
if_ref(ifp);
CURVNET_SET(ifp->if_vnet);
mtx_lock(&q->mtx);
m = mbufq_flush(&q->q);
q->state = EPAIR_QUEUE_RUNNING;
mtx_unlock(&q->mtx);
while (m != NULL) {
n = STAILQ_NEXT(m, m_stailqpkt);
m->m_nextpkt = NULL;
if_input(ifp, m);
m = n;
}
/*
* Avoid flushing the queue more than once per task. We can otherwise
* end up starving ourselves in a multi-epair routing configuration.
*/
mtx_lock(&q->mtx);
if (!mbufq_empty(&q->q)) {
resched = true;
q->state = EPAIR_QUEUE_WAKING;
} else {
resched = false;
q->state = EPAIR_QUEUE_IDLE;
}
mtx_unlock(&q->mtx);
if (resched)
taskqueue_enqueue(epair_tasks.tq[q->id], &q->tx_task);
CURVNET_RESTORE();
if_rele(ifp);
}
static struct epair_queue *
epair_select_queue(struct epair_softc *sc, struct mbuf *m)
{
uint32_t bucket;
#ifdef RSS
struct ether_header *eh;
int ret;
ret = rss_m2bucket(m, &bucket);
if (ret) {
/* Actually hash the packet. */
eh = mtod(m, struct ether_header *);
switch (ntohs(eh->ether_type)) {
#ifdef INET
case ETHERTYPE_IP:
rss_soft_m2cpuid_v4(m, 0, &bucket);
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
rss_soft_m2cpuid_v6(m, 0, &bucket);
break;
#endif
default:
bucket = 0;
break;
}
}
bucket %= sc->num_queues;
#else
bucket = 0;
#endif
return (&sc->queues[bucket]);
}
static void
epair_prepare_mbuf(struct mbuf *m, struct ifnet *src_ifp)
{
M_ASSERTPKTHDR(m);
epair_clear_mbuf(m);
if_setrcvif(m, src_ifp);
M_SETFIB(m, src_ifp->if_fib);
MPASS(m->m_nextpkt == NULL);
MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
}
static void
epair_menq(struct mbuf *m, struct epair_softc *osc)
{
struct epair_queue *q;
struct ifnet *ifp, *oifp;
int error, len;
bool mcast;
/*
* I know this looks weird. We pass the "other sc" as we need that one
* and can get both ifps from it as well.
*/
oifp = osc->ifp;
ifp = osc->oifp;
epair_prepare_mbuf(m, oifp);
/* Save values as once the mbuf is queued, it's not ours anymore. */
len = m->m_pkthdr.len;
mcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0;
q = epair_select_queue(osc, m);
mtx_lock(&q->mtx);
if (q->state == EPAIR_QUEUE_IDLE) {
q->state = EPAIR_QUEUE_WAKING;
taskqueue_enqueue(epair_tasks.tq[q->id], &q->tx_task);
}
error = mbufq_enqueue(&q->q, m);
mtx_unlock(&q->mtx);
if (error != 0) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
} else {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
if (mcast)
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
if_inc_counter(oifp, IFCOUNTER_IPACKETS, 1);
}
}
static void
epair_start(struct ifnet *ifp)
{
struct mbuf *m;
struct epair_softc *sc;
struct ifnet *oifp;
/*
* We get packets here from ether_output via if_handoff()
* and need to put them into the input queue of the oifp
* and will put the packet into the receive-queue (rxq) of the
* other interface (oifp) of our pair.
*/
sc = ifp->if_softc;
oifp = sc->oifp;
sc = oifp->if_softc;
for (;;) {
IFQ_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
break;
M_ASSERTPKTHDR(m);
BPF_MTAP(ifp, m);
/* In case either interface is not usable drop the packet. */
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
(ifp->if_flags & IFF_UP) == 0 ||
(oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
(oifp->if_flags & IFF_UP) == 0) {
m_freem(m);
continue;
}
epair_menq(m, sc);
}
}
static int
epair_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct epair_softc *sc;
struct ifnet *oifp;
#ifdef ALTQ
int len;
bool mcast;
#endif
if (m == NULL)
return (0);
M_ASSERTPKTHDR(m);
/*
* We could just transmit this, but it makes testing easier if we're a
* little bit more like real hardware.
* Allow just that little bit extra for ethernet (and vlan) headers.
*/
if (m->m_pkthdr.len > (ifp->if_mtu + sizeof(struct ether_vlan_header))) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (E2BIG);
}
/*
* We are not going to use the interface en/dequeue mechanism
* on the TX side. We are called from ether_output_frame()
* and will put the packet into the receive-queue (rxq) of the
* other interface (oifp) of our pair.
*/
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENXIO);
}
if ((ifp->if_flags & IFF_UP) == 0) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENETDOWN);
}
BPF_MTAP(ifp, m);
/*
* In case the outgoing interface is not usable,
* drop the packet.
*/
sc = ifp->if_softc;
oifp = sc->oifp;
if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
(oifp->if_flags & IFF_UP) == 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (0);
}
#ifdef ALTQ
len = m->m_pkthdr.len;
mcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0;
int error = 0;
/* Support ALTQ via the classic if_start() path. */
IF_LOCK(&ifp->if_snd);
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
ALTQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
if (error)
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
IF_UNLOCK(&ifp->if_snd);
if (!error) {
if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
if (mcast)
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
epair_start(ifp);
}
return (error);
}
IF_UNLOCK(&ifp->if_snd);
#endif
epair_menq(m, oifp->if_softc);
return (0);
}
static void
epair_qflush(struct ifnet *ifp __unused)
{
}
static int
epair_media_change(struct ifnet *ifp __unused)
{
/* Do nothing. */
return (0);
}
static void
epair_media_status(struct ifnet *ifp __unused, struct ifmediareq *imr)
{
imr->ifm_status = IFM_AVALID | IFM_ACTIVE;
imr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
}
static int
epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct epair_softc *sc;
struct ifreq *ifr;
int error;
ifr = (struct ifreq *)data;
switch (cmd) {
case SIOCSIFFLAGS:
case SIOCADDMULTI:
case SIOCDELMULTI:
error = 0;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
sc = ifp->if_softc;
error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
break;
case SIOCSIFMTU:
/* We basically allow all kinds of MTUs. */
ifp->if_mtu = ifr->ifr_mtu;
error = 0;
break;
default:
/* Let the common ethernet handler process this. */
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
epair_init(void *dummy __unused)
{
}
/*
* Interface cloning functions.
* We use our private ones so that we can create/destroy our secondary
* device along with the primary one.
*/
static int
epair_clone_match(struct if_clone *ifc, const char *name)
{
const char *cp;
/*
* Our base name is epair.
* Our interfaces will be named epair<n>[ab].
* So accept anything of the following list:
* - epair
* - epair<n>
* but not the epair<n>[ab] versions.
*/
if (strncmp(epairname, name, sizeof(epairname)-1) != 0)
return (0);
for (cp = name + sizeof(epairname) - 1; *cp != '\0'; cp++) {
if (*cp < '0' || *cp > '9')
return (0);
}
return (1);
}
static void
epair_clone_add(struct if_clone *ifc, struct epair_softc *scb)
{
struct ifnet *ifp;
uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
ifp = scb->ifp;
/* Copy epairNa etheraddr and change the last byte. */
memcpy(eaddr, scb->oifp->if_hw_addr, ETHER_ADDR_LEN);
eaddr[5] = 0x0b;
ether_ifattach(ifp, eaddr);
if_clone_addif(ifc, ifp);
}
static struct epair_softc *
epair_alloc_sc(struct if_clone *ifc)
{
struct epair_softc *sc;
struct ifnet *ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (NULL);
-
sc = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO);
sc->ifp = ifp;
sc->num_queues = epair_tasks.tasks;
sc->queues = mallocarray(sc->num_queues, sizeof(struct epair_queue),
M_EPAIR, M_WAITOK);
for (int i = 0; i < sc->num_queues; i++) {
struct epair_queue *q = &sc->queues[i];
q->id = i;
q->state = EPAIR_QUEUE_IDLE;
mtx_init(&q->mtx, "epairq", NULL, MTX_DEF | MTX_NEW);
mbufq_init(&q->q, RXRSIZE);
q->sc = sc;
NET_TASK_INIT(&q->tx_task, 0, epair_tx_start_deferred, q);
}
/* Initialise pseudo media types. */
ifmedia_init(&sc->media, 0, epair_media_change, epair_media_status);
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T);
return (sc);
}
static void
epair_setup_ifp(struct epair_softc *sc, char *name, int unit)
{
struct ifnet *ifp = sc->ifp;
ifp->if_softc = sc;
strlcpy(ifp->if_xname, name, IFNAMSIZ);
ifp->if_dname = epairname;
ifp->if_dunit = unit;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_capabilities = IFCAP_VLAN_MTU;
ifp->if_capenable = IFCAP_VLAN_MTU;
ifp->if_transmit = epair_transmit;
ifp->if_qflush = epair_qflush;
ifp->if_start = epair_start;
ifp->if_ioctl = epair_ioctl;
ifp->if_init = epair_init;
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
ifp->if_baudrate = IF_Gbps(10); /* arbitrary maximum */
}
static void
epair_generate_mac(struct epair_softc *sc, uint8_t *eaddr)
{
uint32_t key[3];
uint32_t hash;
uint64_t hostid;
EPAIR_LOCK();
#ifdef SMP
/* Get an approximate distribution. */
hash = next_index % mp_ncpus;
#else
hash = 0;
#endif
EPAIR_UNLOCK();
/*
* Calculate the etheraddr hashing the hostid and the
* interface index. The result would be hopefully unique.
* Note that the "a" component of an epair instance may get moved
* to a different VNET after creation. In that case its index
* will be freed and the index can get reused by new epair instance.
* Make sure we do not create same etheraddr again.
*/
getcredhostid(curthread->td_ucred, (unsigned long *)&hostid);
if (hostid == 0)
arc4rand(&hostid, sizeof(hostid), 0);
struct ifnet *ifp = sc->ifp;
EPAIR_LOCK();
if (ifp->if_index > next_index)
next_index = ifp->if_index;
else
next_index++;
key[0] = (uint32_t)next_index;
EPAIR_UNLOCK();
key[1] = (uint32_t)(hostid & 0xffffffff);
key[2] = (uint32_t)((hostid >> 32) & 0xfffffffff);
hash = jenkins_hash32(key, 3, 0);
eaddr[0] = 0x02;
memcpy(&eaddr[1], &hash, 4);
eaddr[5] = 0x0a;
}
static void
epair_free_sc(struct epair_softc *sc)
{
- if (sc == NULL)
- return;
if_free(sc->ifp);
ifmedia_removeall(&sc->media);
for (int i = 0; i < sc->num_queues; i++) {
struct epair_queue *q = &sc->queues[i];
mtx_destroy(&q->mtx);
}
free(sc->queues, M_EPAIR);
free(sc, M_EPAIR);
}
static void
epair_set_state(struct ifnet *ifp, bool running)
{
if (running) {
ifp->if_drv_flags |= IFF_DRV_RUNNING;
if_link_state_change(ifp, LINK_STATE_UP);
} else {
if_link_state_change(ifp, LINK_STATE_DOWN);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
}
}
static int
epair_handle_unit(struct if_clone *ifc, char *name, size_t len, int *punit)
{
int error = 0, unit, wildcard;
char *dp;
/* Try to see if a special unit was requested. */
error = ifc_name2unit(name, &unit);
if (error != 0)
return (error);
wildcard = (unit < 0);
error = ifc_alloc_unit(ifc, &unit);
if (error != 0)
return (error);
/*
* If no unit had been given, we need to adjust the ifName.
* Also make sure there is space for our extra [ab] suffix.
*/
for (dp = name; *dp != '\0'; dp++);
if (wildcard) {
int slen = snprintf(dp, len - (dp - name), "%d", unit);
if (slen > len - (dp - name) - 1) {
/* ifName too long. */
error = ENOSPC;
goto done;
}
dp += slen;
}
if (len - (dp - name) - 1 < 1) {
/* No space left for our [ab] suffix. */
error = ENOSPC;
goto done;
}
*dp = 'b';
/* Must not change dp so we can replace 'a' by 'b' later. */
*(dp+1) = '\0';
/* Check if 'a' and 'b' interfaces already exist. */
if (ifunit(name) != NULL) {
error = EEXIST;
goto done;
}
*dp = 'a';
if (ifunit(name) != NULL) {
error = EEXIST;
goto done;
}
*punit = unit;
done:
if (error != 0)
ifc_free_unit(ifc, unit);
return (error);
}
static int
epair_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
struct epair_softc *sca, *scb;
struct ifnet *ifp;
char *dp;
int error, unit;
uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
error = epair_handle_unit(ifc, name, len, &unit);
if (error != 0)
return (error);
/* Allocate memory for both [ab] interfaces */
sca = epair_alloc_sc(ifc);
scb = epair_alloc_sc(ifc);
- if (sca == NULL || scb == NULL) {
- epair_free_sc(sca);
- epair_free_sc(scb);
- ifc_free_unit(ifc, unit);
- return (ENOSPC);
- }
/*
* Cross-reference the interfaces so we will be able to free both.
*/
sca->oifp = scb->ifp;
scb->oifp = sca->ifp;
/* Finish initialization of interface <n>a. */
ifp = sca->ifp;
epair_setup_ifp(sca, name, unit);
epair_generate_mac(sca, eaddr);
ether_ifattach(ifp, eaddr);
/* Swap the name and finish initialization of interface <n>b. */
dp = name + strlen(name) - 1;
*dp = 'b';
epair_setup_ifp(scb, name, unit);
ifp = scb->ifp;
/* We need to play some tricks here for the second interface. */
strlcpy(name, epairname, len);
/* Correctly set the name for the cloner list. */
strlcpy(name, scb->ifp->if_xname, len);
epair_clone_add(ifc, scb);
/*
* Restore name to <n>a as the ifp for this will go into the
* cloner list for the initial call.
*/
strlcpy(name, sca->ifp->if_xname, len);
/* Tell the world, that we are ready to rock. */
epair_set_state(sca->ifp, true);
epair_set_state(scb->ifp, true);
*ifpp = sca->ifp;
return (0);
}
static void
epair_drain_rings(struct epair_softc *sc)
{
for (int i = 0; i < sc->num_queues; i++) {
struct epair_queue *q;
struct mbuf *m, *n;
q = &sc->queues[i];
mtx_lock(&q->mtx);
m = mbufq_flush(&q->q);
mtx_unlock(&q->mtx);
for (; m != NULL; m = n) {
n = m->m_nextpkt;
m_freem(m);
}
}
}
static int
epair_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
struct ifnet *oifp;
struct epair_softc *sca, *scb;
int unit, error;
/*
* In case we called into if_clone_destroyif() ourselves
* again to remove the second interface, the softc will be
* NULL. In that case so not do anything but return success.
*/
if (ifp->if_softc == NULL)
return (0);
unit = ifp->if_dunit;
sca = ifp->if_softc;
oifp = sca->oifp;
scb = oifp->if_softc;
/* Frist get the interfaces down and detached. */
epair_set_state(ifp, false);
epair_set_state(oifp, false);
ether_ifdetach(ifp);
ether_ifdetach(oifp);
/* Third free any queued packets and all the resources. */
CURVNET_SET_QUIET(oifp->if_vnet);
epair_drain_rings(scb);
oifp->if_softc = NULL;
error = if_clone_destroyif(ifc, oifp);
if (error)
panic("%s: if_clone_destroyif() for our 2nd iface failed: %d",
__func__, error);
epair_free_sc(scb);
CURVNET_RESTORE();
epair_drain_rings(sca);
epair_free_sc(sca);
/* Last free the cloner unit. */
ifc_free_unit(ifc, unit);
return (0);
}
static void
vnet_epair_init(const void *unused __unused)
{
struct if_clone_addreq req = {
.match_f = epair_clone_match,
.create_f = epair_clone_create,
.destroy_f = epair_clone_destroy,
};
V_epair_cloner = ifc_attach_cloner(epairname, &req);
}
VNET_SYSINIT(vnet_epair_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
vnet_epair_init, NULL);
static void
vnet_epair_uninit(const void *unused __unused)
{
ifc_detach_cloner(V_epair_cloner);
}
VNET_SYSUNINIT(vnet_epair_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_epair_uninit, NULL);
static int
epair_mod_init(void)
{
char name[32];
epair_tasks.tasks = 0;
#ifdef RSS
int cpu;
CPU_FOREACH(cpu) {
cpuset_t cpu_mask;
/* Pin to this CPU so we get appropriate NUMA allocations. */
thread_lock(curthread);
sched_bind(curthread, cpu);
thread_unlock(curthread);
snprintf(name, sizeof(name), "epair_task_%d", cpu);
epair_tasks.tq[cpu] = taskqueue_create(name, M_WAITOK,
taskqueue_thread_enqueue,
&epair_tasks.tq[cpu]);
CPU_SETOF(cpu, &cpu_mask);
taskqueue_start_threads_cpuset(&epair_tasks.tq[cpu], 1, PI_NET,
&cpu_mask, "%s", name);
epair_tasks.tasks++;
}
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
#else
snprintf(name, sizeof(name), "epair_task");
epair_tasks.tq[0] = taskqueue_create(name, M_WAITOK,
taskqueue_thread_enqueue,
&epair_tasks.tq[0]);
taskqueue_start_threads(&epair_tasks.tq[0], 1, PI_NET, "%s", name);
epair_tasks.tasks = 1;
#endif
return (0);
}
static void
epair_mod_cleanup(void)
{
for (int i = 0; i < epair_tasks.tasks; i++) {
taskqueue_drain_all(epair_tasks.tq[i]);
taskqueue_free(epair_tasks.tq[i]);
}
}
static int
epair_modevent(module_t mod, int type, void *data)
{
int ret;
switch (type) {
case MOD_LOAD:
EPAIR_LOCK_INIT();
ret = epair_mod_init();
if (ret != 0)
return (ret);
if (bootverbose)
printf("%s: %s initialized.\n", __func__, epairname);
break;
case MOD_UNLOAD:
epair_mod_cleanup();
EPAIR_LOCK_DESTROY();
if (bootverbose)
printf("%s: %s unloaded.\n", __func__, epairname);
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t epair_mod = {
"if_epair",
epair_modevent,
0
};
DECLARE_MODULE(if_epair, epair_mod, SI_SUB_PSEUDO, SI_ORDER_MIDDLE);
MODULE_VERSION(if_epair, 3);
diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c
index e323bb01b977..2f558fba32de 100644
--- a/sys/net/if_lagg.c
+++ b/sys/net/if_lagg.c
@@ -1,2725 +1,2721 @@
/* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
/*
* Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
* Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_kern_tls.h"
#include "opt_ratelimit.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/module.h>
#include <sys/priv.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <sys/eventhandler.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_clone.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/bpf.h>
#include <net/route.h>
#include <net/vnet.h>
#include <net/infiniband.h>
#if defined(INET) || defined(INET6)
#include <netinet/in.h>
#include <netinet/ip.h>
#endif
#ifdef INET
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#endif
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/in6_var.h>
#include <netinet6/in6_ifattach.h>
#endif
#include <net/if_vlan_var.h>
#include <net/if_lagg.h>
#include <net/ieee8023ad_lacp.h>
#ifdef DEV_NETMAP
MODULE_DEPEND(if_lagg, netmap, 1, 1, 1);
#endif
#define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
#define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
#define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
#define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
#define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
#define LAGG_SLOCK(_sc) sx_slock(&(_sc)->sc_sx)
#define LAGG_SUNLOCK(_sc) sx_sunlock(&(_sc)->sc_sx)
#define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
/* Special flags we should propagate to the lagg ports. */
static struct {
int flag;
int (*func)(struct ifnet *, int);
} lagg_pflags[] = {
{IFF_PROMISC, ifpromisc},
{IFF_ALLMULTI, if_allmulti},
{0, NULL}
};
struct lagg_snd_tag {
struct m_snd_tag com;
struct m_snd_tag *tag;
};
VNET_DEFINE_STATIC(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */
#define V_lagg_list VNET(lagg_list)
VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx);
#define V_lagg_list_mtx VNET(lagg_list_mtx)
#define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \
"if_lagg list", NULL, MTX_DEF)
#define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx)
#define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx)
#define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx)
static eventhandler_tag lagg_detach_cookie = NULL;
static int lagg_clone_create(struct if_clone *, char *, size_t,
struct ifc_data *, struct ifnet **);
static int lagg_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner);
#define V_lagg_cloner VNET(lagg_cloner)
static const char laggname[] = "lagg";
static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface");
static void lagg_capabilities(struct lagg_softc *);
static int lagg_port_create(struct lagg_softc *, struct ifnet *);
static int lagg_port_destroy(struct lagg_port *, int);
static struct mbuf *lagg_input_ethernet(struct ifnet *, struct mbuf *);
static struct mbuf *lagg_input_infiniband(struct ifnet *, struct mbuf *);
static void lagg_linkstate(struct lagg_softc *);
static void lagg_port_state(struct ifnet *, int);
static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
static int lagg_port_output(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
#ifdef LAGG_PORT_STACKING
static int lagg_port_checkstacking(struct lagg_softc *);
#endif
static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
static void lagg_if_updown(struct lagg_softc *, bool);
static void lagg_init(void *);
static void lagg_stop(struct lagg_softc *);
static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
#if defined(KERN_TLS) || defined(RATELIMIT)
static int lagg_snd_tag_alloc(struct ifnet *,
union if_snd_tag_alloc_params *,
struct m_snd_tag **);
static int lagg_snd_tag_modify(struct m_snd_tag *,
union if_snd_tag_modify_params *);
static int lagg_snd_tag_query(struct m_snd_tag *,
union if_snd_tag_query_params *);
static void lagg_snd_tag_free(struct m_snd_tag *);
static struct m_snd_tag *lagg_next_snd_tag(struct m_snd_tag *);
static void lagg_ratelimit_query(struct ifnet *,
struct if_ratelimit_query_results *);
#endif
static int lagg_setmulti(struct lagg_port *);
static int lagg_clrmulti(struct lagg_port *);
static void lagg_setcaps(struct lagg_port *, int cap, int cap2);
static int lagg_setflag(struct lagg_port *, int, int,
int (*func)(struct ifnet *, int));
static int lagg_setflags(struct lagg_port *, int status);
static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
static int lagg_transmit_ethernet(struct ifnet *, struct mbuf *);
static int lagg_transmit_infiniband(struct ifnet *, struct mbuf *);
static void lagg_qflush(struct ifnet *);
static int lagg_media_change(struct ifnet *);
static void lagg_media_status(struct ifnet *, struct ifmediareq *);
static struct lagg_port *lagg_link_active(struct lagg_softc *,
struct lagg_port *);
/* Simple round robin */
static void lagg_rr_attach(struct lagg_softc *);
static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
/* Active failover */
static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
struct mbuf *);
/* Loadbalancing */
static void lagg_lb_attach(struct lagg_softc *);
static void lagg_lb_detach(struct lagg_softc *);
static int lagg_lb_port_create(struct lagg_port *);
static void lagg_lb_port_destroy(struct lagg_port *);
static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
/* Broadcast */
static int lagg_bcast_start(struct lagg_softc *, struct mbuf *);
/* 802.3ad LACP */
static void lagg_lacp_attach(struct lagg_softc *);
static void lagg_lacp_detach(struct lagg_softc *);
static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
struct mbuf *);
static void lagg_lacp_lladdr(struct lagg_softc *);
/* Default input */
static struct mbuf *lagg_default_input(struct lagg_softc *, struct lagg_port *,
struct mbuf *);
/* lagg protocol table */
static const struct lagg_proto {
lagg_proto pr_num;
void (*pr_attach)(struct lagg_softc *);
void (*pr_detach)(struct lagg_softc *);
int (*pr_start)(struct lagg_softc *, struct mbuf *);
struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *,
struct mbuf *);
int (*pr_addport)(struct lagg_port *);
void (*pr_delport)(struct lagg_port *);
void (*pr_linkstate)(struct lagg_port *);
void (*pr_init)(struct lagg_softc *);
void (*pr_stop)(struct lagg_softc *);
void (*pr_lladdr)(struct lagg_softc *);
void (*pr_request)(struct lagg_softc *, void *);
void (*pr_portreq)(struct lagg_port *, void *);
} lagg_protos[] = {
{
.pr_num = LAGG_PROTO_NONE
},
{
.pr_num = LAGG_PROTO_ROUNDROBIN,
.pr_attach = lagg_rr_attach,
.pr_start = lagg_rr_start,
.pr_input = lagg_default_input,
},
{
.pr_num = LAGG_PROTO_FAILOVER,
.pr_start = lagg_fail_start,
.pr_input = lagg_fail_input,
},
{
.pr_num = LAGG_PROTO_LOADBALANCE,
.pr_attach = lagg_lb_attach,
.pr_detach = lagg_lb_detach,
.pr_start = lagg_lb_start,
.pr_input = lagg_default_input,
.pr_addport = lagg_lb_port_create,
.pr_delport = lagg_lb_port_destroy,
},
{
.pr_num = LAGG_PROTO_LACP,
.pr_attach = lagg_lacp_attach,
.pr_detach = lagg_lacp_detach,
.pr_start = lagg_lacp_start,
.pr_input = lagg_lacp_input,
.pr_addport = lacp_port_create,
.pr_delport = lacp_port_destroy,
.pr_linkstate = lacp_linkstate,
.pr_init = lacp_init,
.pr_stop = lacp_stop,
.pr_lladdr = lagg_lacp_lladdr,
.pr_request = lacp_req,
.pr_portreq = lacp_portreq,
},
{
.pr_num = LAGG_PROTO_BROADCAST,
.pr_start = lagg_bcast_start,
.pr_input = lagg_default_input,
},
};
SYSCTL_DECL(_net_link);
SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Link Aggregation");
/* Allow input on any failover links */
VNET_DEFINE_STATIC(int, lagg_failover_rx_all);
#define V_lagg_failover_rx_all VNET(lagg_failover_rx_all)
SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
&VNET_NAME(lagg_failover_rx_all), 0,
"Accept input from any interface in a failover lagg");
/* Default value for using flowid */
VNET_DEFINE_STATIC(int, def_use_flowid) = 0;
#define V_def_use_flowid VNET(def_use_flowid)
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_use_flowid), 0,
"Default setting for using flow id for load sharing");
/* Default value for using numa */
VNET_DEFINE_STATIC(int, def_use_numa) = 1;
#define V_def_use_numa VNET(def_use_numa)
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_numa,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_use_numa), 0,
"Use numa to steer flows");
/* Default value for flowid shift */
VNET_DEFINE_STATIC(int, def_flowid_shift) = 16;
#define V_def_flowid_shift VNET(def_flowid_shift)
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift,
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_flowid_shift), 0,
"Default setting for flowid shift for load sharing");
static void
vnet_lagg_init(const void *unused __unused)
{
LAGG_LIST_LOCK_INIT();
SLIST_INIT(&V_lagg_list);
struct if_clone_addreq req = {
.create_f = lagg_clone_create,
.destroy_f = lagg_clone_destroy,
.flags = IFC_F_AUTOUNIT,
};
V_lagg_cloner = ifc_attach_cloner(laggname, &req);
}
VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
vnet_lagg_init, NULL);
static void
vnet_lagg_uninit(const void *unused __unused)
{
ifc_detach_cloner(V_lagg_cloner);
LAGG_LIST_LOCK_DESTROY();
}
VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_lagg_uninit, NULL);
static int
lagg_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
lagg_input_ethernet_p = lagg_input_ethernet;
lagg_input_infiniband_p = lagg_input_infiniband;
lagg_linkstate_p = lagg_port_state;
lagg_detach_cookie = EVENTHANDLER_REGISTER(
ifnet_departure_event, lagg_port_ifdetach, NULL,
EVENTHANDLER_PRI_ANY);
break;
case MOD_UNLOAD:
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
lagg_detach_cookie);
lagg_input_ethernet_p = NULL;
lagg_input_infiniband_p = NULL;
lagg_linkstate_p = NULL;
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t lagg_mod = {
"if_lagg",
lagg_modevent,
0
};
DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_lagg, 1);
MODULE_DEPEND(if_lagg, if_infiniband, 1, 1, 1);
static void
lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
{
LAGG_XLOCK_ASSERT(sc);
KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
__func__, sc));
if (sc->sc_ifflags & IFF_DEBUG)
if_printf(sc->sc_ifp, "using proto %u\n", pr);
if (lagg_protos[pr].pr_attach != NULL)
lagg_protos[pr].pr_attach(sc);
sc->sc_proto = pr;
}
static void
lagg_proto_detach(struct lagg_softc *sc)
{
lagg_proto pr;
LAGG_XLOCK_ASSERT(sc);
pr = sc->sc_proto;
sc->sc_proto = LAGG_PROTO_NONE;
if (lagg_protos[pr].pr_detach != NULL)
lagg_protos[pr].pr_detach(sc);
}
static inline int
lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
{
return (lagg_protos[sc->sc_proto].pr_start(sc, m));
}
static inline struct mbuf *
lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
{
return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
}
static int
lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
{
if (lagg_protos[sc->sc_proto].pr_addport == NULL)
return (0);
else
return (lagg_protos[sc->sc_proto].pr_addport(lp));
}
static void
lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
{
if (lagg_protos[sc->sc_proto].pr_delport != NULL)
lagg_protos[sc->sc_proto].pr_delport(lp);
}
static void
lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
{
if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
lagg_protos[sc->sc_proto].pr_linkstate(lp);
}
static void
lagg_proto_init(struct lagg_softc *sc)
{
if (lagg_protos[sc->sc_proto].pr_init != NULL)
lagg_protos[sc->sc_proto].pr_init(sc);
}
static void
lagg_proto_stop(struct lagg_softc *sc)
{
if (lagg_protos[sc->sc_proto].pr_stop != NULL)
lagg_protos[sc->sc_proto].pr_stop(sc);
}
static void
lagg_proto_lladdr(struct lagg_softc *sc)
{
if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
lagg_protos[sc->sc_proto].pr_lladdr(sc);
}
static void
lagg_proto_request(struct lagg_softc *sc, void *v)
{
if (lagg_protos[sc->sc_proto].pr_request != NULL)
lagg_protos[sc->sc_proto].pr_request(sc, v);
}
static void
lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
{
if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
lagg_protos[sc->sc_proto].pr_portreq(lp, v);
}
/*
* This routine is run via an vlan
* config EVENT
*/
static void
lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
{
struct lagg_softc *sc = ifp->if_softc;
struct lagg_port *lp;
if (ifp->if_softc != arg) /* Not our event */
return;
LAGG_XLOCK(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
LAGG_XUNLOCK(sc);
}
/*
* This routine is run via an vlan
* unconfig EVENT
*/
static void
lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
{
struct lagg_softc *sc = ifp->if_softc;
struct lagg_port *lp;
if (ifp->if_softc != arg) /* Not our event */
return;
LAGG_XLOCK(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
LAGG_XUNLOCK(sc);
}
static int
lagg_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
struct iflaggparam iflp;
struct lagg_softc *sc;
struct ifnet *ifp;
int if_type;
int error;
static const uint8_t eaddr[LAGG_ADDR_LEN];
if (ifd->params != NULL) {
error = ifc_copyin(ifd, &iflp, sizeof(iflp));
if (error)
return (error);
switch (iflp.lagg_type) {
case LAGG_TYPE_ETHERNET:
if_type = IFT_ETHER;
break;
case LAGG_TYPE_INFINIBAND:
if_type = IFT_INFINIBAND;
break;
default:
return (EINVAL);
}
} else {
if_type = IFT_ETHER;
}
sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK | M_ZERO);
ifp = sc->sc_ifp = if_alloc(if_type);
- if (ifp == NULL) {
- free(sc, M_LAGG);
- return (ENOSPC);
- }
LAGG_SX_INIT(sc);
mtx_init(&sc->sc_mtx, "lagg-mtx", NULL, MTX_DEF);
callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
LAGG_XLOCK(sc);
if (V_def_use_flowid)
sc->sc_opts |= LAGG_OPT_USE_FLOWID;
if (V_def_use_numa)
sc->sc_opts |= LAGG_OPT_USE_NUMA;
sc->flowid_shift = V_def_flowid_shift;
/* Hash all layers by default */
sc->sc_flags = MBUF_HASHFLAG_L2 | MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4;
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
CK_SLIST_INIT(&sc->sc_ports);
switch (if_type) {
case IFT_ETHER:
/* Initialise pseudo media types */
ifmedia_init(&sc->sc_media, 0, lagg_media_change,
lagg_media_status);
ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
if_initname(ifp, laggname, ifd->unit);
ifp->if_transmit = lagg_transmit_ethernet;
break;
case IFT_INFINIBAND:
if_initname(ifp, laggname, ifd->unit);
ifp->if_transmit = lagg_transmit_infiniband;
break;
default:
break;
}
ifp->if_softc = sc;
ifp->if_qflush = lagg_qflush;
ifp->if_init = lagg_init;
ifp->if_ioctl = lagg_ioctl;
ifp->if_get_counter = lagg_get_counter;
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
#if defined(KERN_TLS) || defined(RATELIMIT)
ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
ifp->if_ratelimit_query = lagg_ratelimit_query;
#endif
ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
/*
* Attach as an ordinary ethernet device, children will be attached
* as special device IFT_IEEE8023ADLAG or IFT_INFINIBANDLAG.
*/
switch (if_type) {
case IFT_ETHER:
ether_ifattach(ifp, eaddr);
break;
case IFT_INFINIBAND:
infiniband_ifattach(ifp, eaddr, sc->sc_bcast_addr);
break;
default:
break;
}
sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
/* Insert into the global list of laggs */
LAGG_LIST_LOCK();
SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
LAGG_LIST_UNLOCK();
LAGG_XUNLOCK(sc);
*ifpp = ifp;
return (0);
}
static int
lagg_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
struct lagg_port *lp;
LAGG_XLOCK(sc);
sc->sc_destroying = 1;
lagg_stop(sc);
ifp->if_flags &= ~IFF_UP;
EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
/* Shutdown and remove lagg ports */
while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
lagg_port_destroy(lp, 1);
/* Unhook the aggregation protocol */
lagg_proto_detach(sc);
LAGG_XUNLOCK(sc);
switch (ifp->if_type) {
case IFT_ETHER:
ether_ifdetach(ifp);
ifmedia_removeall(&sc->sc_media);
break;
case IFT_INFINIBAND:
infiniband_ifdetach(ifp);
break;
default:
break;
}
if_free(ifp);
LAGG_LIST_LOCK();
SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
LAGG_LIST_UNLOCK();
mtx_destroy(&sc->sc_mtx);
LAGG_SX_DESTROY(sc);
free(sc, M_LAGG);
return (0);
}
static void
lagg_capabilities(struct lagg_softc *sc)
{
struct lagg_port *lp;
int cap, cap2, ena, ena2, pena, pena2;
uint64_t hwa;
struct ifnet_hw_tsomax hw_tsomax;
LAGG_XLOCK_ASSERT(sc);
/* Get common enabled capabilities for the lagg ports */
ena = ena2 = ~0;
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
ena &= lp->lp_ifp->if_capenable;
ena2 &= lp->lp_ifp->if_capenable2;
}
if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
ena = ena2 = 0;
/*
* Apply common enabled capabilities back to the lagg ports.
* May require several iterations if they are dependent.
*/
do {
pena = ena;
pena2 = ena2;
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
lagg_setcaps(lp, ena, ena2);
ena &= lp->lp_ifp->if_capenable;
ena2 &= lp->lp_ifp->if_capenable2;
}
} while (pena != ena || pena2 != ena2);
/* Get other capabilities from the lagg ports */
cap = cap2 = ~0;
hwa = ~(uint64_t)0;
memset(&hw_tsomax, 0, sizeof(hw_tsomax));
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
cap &= lp->lp_ifp->if_capabilities;
cap2 &= lp->lp_ifp->if_capabilities2;
hwa &= lp->lp_ifp->if_hwassist;
if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
}
if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
cap = cap2 = hwa = 0;
if (sc->sc_ifp->if_capabilities != cap ||
sc->sc_ifp->if_capenable != ena ||
sc->sc_ifp->if_capenable2 != ena2 ||
sc->sc_ifp->if_hwassist != hwa ||
if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
sc->sc_ifp->if_capabilities = cap;
sc->sc_ifp->if_capabilities2 = cap2;
sc->sc_ifp->if_capenable = ena;
sc->sc_ifp->if_capenable2 = ena2;
sc->sc_ifp->if_hwassist = hwa;
getmicrotime(&sc->sc_ifp->if_lastchange);
if (sc->sc_ifflags & IFF_DEBUG)
if_printf(sc->sc_ifp,
"capabilities 0x%08x enabled 0x%08x\n", cap, ena);
}
}
static int
lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
{
struct lagg_softc *sc_ptr;
struct lagg_port *lp, *tlp;
struct ifreq ifr;
int error, i, oldmtu;
int if_type;
uint64_t *pval;
LAGG_XLOCK_ASSERT(sc);
if (sc->sc_ifp == ifp) {
if_printf(sc->sc_ifp,
"cannot add a lagg to itself as a port\n");
return (EINVAL);
}
if (sc->sc_destroying == 1)
return (ENXIO);
/* Limit the maximal number of lagg ports */
if (sc->sc_count >= LAGG_MAX_PORTS)
return (ENOSPC);
/* Check if port has already been associated to a lagg */
if (ifp->if_lagg != NULL) {
/* Port is already in the current lagg? */
lp = (struct lagg_port *)ifp->if_lagg;
if (lp->lp_softc == sc)
return (EEXIST);
return (EBUSY);
}
switch (sc->sc_ifp->if_type) {
case IFT_ETHER:
/* XXX Disallow non-ethernet interfaces (this should be any of 802) */
if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
return (EPROTONOSUPPORT);
if_type = IFT_IEEE8023ADLAG;
break;
case IFT_INFINIBAND:
/* XXX Disallow non-infiniband interfaces */
if (ifp->if_type != IFT_INFINIBAND)
return (EPROTONOSUPPORT);
if_type = IFT_INFINIBANDLAG;
break;
default:
break;
}
/* Allow the first Ethernet member to define the MTU */
oldmtu = -1;
if (CK_SLIST_EMPTY(&sc->sc_ports)) {
sc->sc_ifp->if_mtu = ifp->if_mtu;
} else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
if (ifp->if_ioctl == NULL) {
if_printf(sc->sc_ifp, "cannot change MTU for %s\n",
ifp->if_xname);
return (EINVAL);
}
oldmtu = ifp->if_mtu;
strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name));
ifr.ifr_mtu = sc->sc_ifp->if_mtu;
error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
if (error != 0) {
if_printf(sc->sc_ifp, "invalid MTU for %s\n",
ifp->if_xname);
return (error);
}
ifr.ifr_mtu = oldmtu;
}
lp = malloc(sizeof(struct lagg_port), M_LAGG, M_WAITOK | M_ZERO);
lp->lp_softc = sc;
/* Check if port is a stacked lagg */
LAGG_LIST_LOCK();
SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
if (ifp == sc_ptr->sc_ifp) {
LAGG_LIST_UNLOCK();
free(lp, M_LAGG);
if (oldmtu != -1)
(*ifp->if_ioctl)(ifp, SIOCSIFMTU,
(caddr_t)&ifr);
return (EINVAL);
/* XXX disable stacking for the moment, its untested */
#ifdef LAGG_PORT_STACKING
lp->lp_flags |= LAGG_PORT_STACK;
if (lagg_port_checkstacking(sc_ptr) >=
LAGG_MAX_STACKING) {
LAGG_LIST_UNLOCK();
free(lp, M_LAGG);
if (oldmtu != -1)
(*ifp->if_ioctl)(ifp, SIOCSIFMTU,
(caddr_t)&ifr);
return (E2BIG);
}
#endif
}
}
LAGG_LIST_UNLOCK();
if_ref(ifp);
lp->lp_ifp = ifp;
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ifp->if_addrlen);
lp->lp_ifcapenable = ifp->if_capenable;
if (CK_SLIST_EMPTY(&sc->sc_ports)) {
bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
lagg_proto_lladdr(sc);
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
} else {
if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
}
lagg_setflags(lp, 1);
if (CK_SLIST_EMPTY(&sc->sc_ports))
sc->sc_primary = lp;
/* Change the interface type */
lp->lp_iftype = ifp->if_type;
ifp->if_type = if_type;
ifp->if_lagg = lp;
lp->lp_ioctl = ifp->if_ioctl;
ifp->if_ioctl = lagg_port_ioctl;
lp->lp_output = ifp->if_output;
ifp->if_output = lagg_port_output;
/* Read port counters */
pval = lp->port_counters.val;
for (i = 0; i < IFCOUNTERS; i++, pval++)
*pval = ifp->if_get_counter(ifp, i);
/*
* Insert into the list of ports.
* Keep ports sorted by if_index. It is handy, when configuration
* is predictable and `ifconfig laggN create ...` command
* will lead to the same result each time.
*/
CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
if (tlp->lp_ifp->if_index < ifp->if_index && (
CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
ifp->if_index))
break;
}
if (tlp != NULL)
CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
else
CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
sc->sc_count++;
lagg_setmulti(lp);
if ((error = lagg_proto_addport(sc, lp)) != 0) {
/* Remove the port, without calling pr_delport. */
lagg_port_destroy(lp, 0);
if (oldmtu != -1)
(*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
return (error);
}
/* Update lagg capabilities */
lagg_capabilities(sc);
lagg_linkstate(sc);
return (0);
}
#ifdef LAGG_PORT_STACKING
static int
lagg_port_checkstacking(struct lagg_softc *sc)
{
struct lagg_softc *sc_ptr;
struct lagg_port *lp;
int m = 0;
LAGG_SXLOCK_ASSERT(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_flags & LAGG_PORT_STACK) {
sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
m = MAX(m, lagg_port_checkstacking(sc_ptr));
}
}
return (m + 1);
}
#endif
static void
lagg_port_destroy_cb(epoch_context_t ec)
{
struct lagg_port *lp;
struct ifnet *ifp;
lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
ifp = lp->lp_ifp;
if_rele(ifp);
free(lp, M_LAGG);
}
static int
lagg_port_destroy(struct lagg_port *lp, int rundelport)
{
struct lagg_softc *sc = lp->lp_softc;
struct lagg_port *lp_ptr, *lp0;
struct ifnet *ifp = lp->lp_ifp;
uint64_t *pval, vdiff;
int i;
LAGG_XLOCK_ASSERT(sc);
if (rundelport)
lagg_proto_delport(sc, lp);
if (lp->lp_detaching == 0)
lagg_clrmulti(lp);
/* Restore interface */
ifp->if_type = lp->lp_iftype;
ifp->if_ioctl = lp->lp_ioctl;
ifp->if_output = lp->lp_output;
ifp->if_lagg = NULL;
/* Update detached port counters */
pval = lp->port_counters.val;
for (i = 0; i < IFCOUNTERS; i++, pval++) {
vdiff = ifp->if_get_counter(ifp, i) - *pval;
sc->detached_counters.val[i] += vdiff;
}
/* Finally, remove the port from the lagg */
CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
sc->sc_count--;
/* Update the primary interface */
if (lp == sc->sc_primary) {
uint8_t lladdr[LAGG_ADDR_LEN];
if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
bzero(&lladdr, LAGG_ADDR_LEN);
else
bcopy(lp0->lp_lladdr, lladdr, LAGG_ADDR_LEN);
sc->sc_primary = lp0;
if (sc->sc_destroying == 0) {
bcopy(lladdr, IF_LLADDR(sc->sc_ifp), sc->sc_ifp->if_addrlen);
lagg_proto_lladdr(sc);
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
/*
* Update lladdr for each port (new primary needs update
* as well, to switch from old lladdr to its 'real' one).
* We can skip this if the lagg is being destroyed.
*/
CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
if_setlladdr(lp_ptr->lp_ifp, lladdr,
lp_ptr->lp_ifp->if_addrlen);
}
}
if (lp->lp_ifflags)
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
if (lp->lp_detaching == 0) {
lagg_setflags(lp, 0);
lagg_setcaps(lp, lp->lp_ifcapenable, lp->lp_ifcapenable2);
if_setlladdr(ifp, lp->lp_lladdr, ifp->if_addrlen);
}
/*
* free port and release it's ifnet reference after a grace period has
* elapsed.
*/
NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx);
/* Update lagg capabilities */
lagg_capabilities(sc);
lagg_linkstate(sc);
return (0);
}
static int
lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct lagg_reqport *rp = (struct lagg_reqport *)data;
struct lagg_softc *sc;
struct lagg_port *lp = NULL;
int error = 0;
/* Should be checked by the caller */
switch (ifp->if_type) {
case IFT_IEEE8023ADLAG:
case IFT_INFINIBANDLAG:
if ((lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
goto fallback;
break;
default:
goto fallback;
}
switch (cmd) {
case SIOCGLAGGPORT:
if (rp->rp_portname[0] == '\0' ||
ifunit(rp->rp_portname) != ifp) {
error = EINVAL;
break;
}
LAGG_SLOCK(sc);
if (__predict_true((lp = ifp->if_lagg) != NULL &&
lp->lp_softc == sc))
lagg_port2req(lp, rp);
else
error = ENOENT; /* XXXGL: can happen? */
LAGG_SUNLOCK(sc);
break;
case SIOCSIFCAP:
case SIOCSIFCAPNV:
if (lp->lp_ioctl == NULL) {
error = EINVAL;
break;
}
error = (*lp->lp_ioctl)(ifp, cmd, data);
if (error)
break;
/* Update lagg interface capabilities */
LAGG_XLOCK(sc);
lagg_capabilities(sc);
LAGG_XUNLOCK(sc);
VLAN_CAPABILITIES(sc->sc_ifp);
break;
case SIOCSIFMTU:
/* Do not allow the MTU to be changed once joined */
error = EINVAL;
break;
default:
goto fallback;
}
return (error);
fallback:
if (lp != NULL && lp->lp_ioctl != NULL)
return ((*lp->lp_ioctl)(ifp, cmd, data));
return (EINVAL);
}
/*
* Requests counter @cnt data.
*
* Counter value is calculated the following way:
* 1) for each port, sum difference between current and "initial" measurements.
* 2) add lagg logical interface counters.
* 3) add data from detached_counters array.
*
* We also do the following things on ports attach/detach:
* 1) On port attach we store all counters it has into port_counter array.
* 2) On port detach we add the different between "initial" and
* current counters data to detached_counters array.
*/
static uint64_t
lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
{
struct epoch_tracker et;
struct lagg_softc *sc;
struct lagg_port *lp;
struct ifnet *lpifp;
uint64_t newval, oldval, vsum;
/* Revise this when we've got non-generic counters. */
KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
sc = (struct lagg_softc *)ifp->if_softc;
vsum = 0;
NET_EPOCH_ENTER(et);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
/* Saved attached value */
oldval = lp->port_counters.val[cnt];
/* current value */
lpifp = lp->lp_ifp;
newval = lpifp->if_get_counter(lpifp, cnt);
/* Calculate diff and save new */
vsum += newval - oldval;
}
NET_EPOCH_EXIT(et);
/*
* Add counter data which might be added by upper
* layer protocols operating on logical interface.
*/
vsum += if_get_counter_default(ifp, cnt);
/*
* Add counter data from detached ports counters
*/
vsum += sc->detached_counters.val[cnt];
return (vsum);
}
/*
* For direct output to child ports.
*/
static int
lagg_port_output(struct ifnet *ifp, struct mbuf *m,
const struct sockaddr *dst, struct route *ro)
{
struct lagg_port *lp = ifp->if_lagg;
switch (dst->sa_family) {
case pseudo_AF_HDRCMPLT:
case AF_UNSPEC:
if (lp != NULL)
return ((*lp->lp_output)(ifp, m, dst, ro));
}
/* drop any other frames */
m_freem(m);
return (ENETDOWN);
}
static void
lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
{
struct lagg_port *lp;
struct lagg_softc *sc;
if ((lp = ifp->if_lagg) == NULL)
return;
/* If the ifnet is just being renamed, don't do anything. */
if (ifp->if_flags & IFF_RENAMING)
return;
sc = lp->lp_softc;
LAGG_XLOCK(sc);
lp->lp_detaching = 1;
lagg_port_destroy(lp, 1);
LAGG_XUNLOCK(sc);
VLAN_CAPABILITIES(sc->sc_ifp);
}
static void
lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
{
struct lagg_softc *sc = lp->lp_softc;
strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
rp->rp_prio = lp->lp_prio;
rp->rp_flags = lp->lp_flags;
lagg_proto_portreq(sc, lp, &rp->rp_psc);
/* Add protocol specific flags */
switch (sc->sc_proto) {
case LAGG_PROTO_FAILOVER:
if (lp == sc->sc_primary)
rp->rp_flags |= LAGG_PORT_MASTER;
if (lp == lagg_link_active(sc, sc->sc_primary))
rp->rp_flags |= LAGG_PORT_ACTIVE;
break;
case LAGG_PROTO_ROUNDROBIN:
case LAGG_PROTO_LOADBALANCE:
case LAGG_PROTO_BROADCAST:
if (LAGG_PORTACTIVE(lp))
rp->rp_flags |= LAGG_PORT_ACTIVE;
break;
case LAGG_PROTO_LACP:
/* LACP has a different definition of active */
if (lacp_isactive(lp))
rp->rp_flags |= LAGG_PORT_ACTIVE;
if (lacp_iscollecting(lp))
rp->rp_flags |= LAGG_PORT_COLLECTING;
if (lacp_isdistributing(lp))
rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
break;
}
}
static void
lagg_watchdog_infiniband(void *arg)
{
struct epoch_tracker et;
struct lagg_softc *sc;
struct lagg_port *lp;
struct ifnet *ifp;
struct ifnet *lp_ifp;
sc = arg;
/*
* Because infiniband nodes have a fixed MAC address, which is
* generated by the so-called GID, we need to regularly update
* the link level address of the parent lagg<N> device when
* the active port changes. Possibly we could piggy-back on
* link up/down events aswell, but using a timer also provides
* a guarantee against too frequent events. This operation
* does not have to be atomic.
*/
NET_EPOCH_ENTER(et);
lp = lagg_link_active(sc, sc->sc_primary);
if (lp != NULL) {
ifp = sc->sc_ifp;
lp_ifp = lp->lp_ifp;
if (ifp != NULL && lp_ifp != NULL &&
(memcmp(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen) != 0 ||
memcmp(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen) != 0)) {
memcpy(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen);
memcpy(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen);
CURVNET_SET(ifp->if_vnet);
EVENTHANDLER_INVOKE(iflladdr_event, ifp);
CURVNET_RESTORE();
}
}
NET_EPOCH_EXIT(et);
callout_reset(&sc->sc_watchdog, hz, &lagg_watchdog_infiniband, arg);
}
static void
lagg_if_updown(struct lagg_softc *sc, bool up)
{
struct ifreq ifr = {};
struct lagg_port *lp;
LAGG_XLOCK_ASSERT(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (up)
if_up(lp->lp_ifp);
else
if_down(lp->lp_ifp);
if (lp->lp_ioctl != NULL)
lp->lp_ioctl(lp->lp_ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
}
}
static void
lagg_init(void *xsc)
{
struct lagg_softc *sc = (struct lagg_softc *)xsc;
struct ifnet *ifp = sc->sc_ifp;
struct lagg_port *lp;
LAGG_XLOCK(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
LAGG_XUNLOCK(sc);
return;
}
ifp->if_drv_flags |= IFF_DRV_RUNNING;
/*
* Update the port lladdrs if needed.
* This might be if_setlladdr() notification
* that lladdr has been changed.
*/
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
ifp->if_addrlen) != 0)
if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ifp->if_addrlen);
}
lagg_if_updown(sc, true);
lagg_proto_init(sc);
if (ifp->if_type == IFT_INFINIBAND) {
mtx_lock(&sc->sc_mtx);
lagg_watchdog_infiniband(sc);
mtx_unlock(&sc->sc_mtx);
}
LAGG_XUNLOCK(sc);
}
static void
lagg_stop(struct lagg_softc *sc)
{
struct ifnet *ifp = sc->sc_ifp;
LAGG_XLOCK_ASSERT(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
lagg_proto_stop(sc);
mtx_lock(&sc->sc_mtx);
callout_stop(&sc->sc_watchdog);
mtx_unlock(&sc->sc_mtx);
lagg_if_updown(sc, false);
callout_drain(&sc->sc_watchdog);
}
static int
lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
struct lagg_reqall *ra = (struct lagg_reqall *)data;
struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
struct ifreq *ifr = (struct ifreq *)data;
struct lagg_port *lp;
struct ifnet *tpif;
struct thread *td = curthread;
char *buf, *outbuf;
int count, buflen, len, error = 0, oldmtu;
bzero(&rpbuf, sizeof(rpbuf));
/* XXX: This can race with lagg_clone_destroy. */
switch (cmd) {
case SIOCGLAGG:
LAGG_XLOCK(sc);
buflen = sc->sc_count * sizeof(struct lagg_reqport);
outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
ra->ra_proto = sc->sc_proto;
lagg_proto_request(sc, &ra->ra_psc);
count = 0;
buf = outbuf;
len = min(ra->ra_size, buflen);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (len < sizeof(rpbuf))
break;
lagg_port2req(lp, &rpbuf);
memcpy(buf, &rpbuf, sizeof(rpbuf));
count++;
buf += sizeof(rpbuf);
len -= sizeof(rpbuf);
}
LAGG_XUNLOCK(sc);
ra->ra_ports = count;
ra->ra_size = count * sizeof(rpbuf);
error = copyout(outbuf, ra->ra_port, ra->ra_size);
free(outbuf, M_TEMP);
break;
case SIOCSLAGG:
error = priv_check(td, PRIV_NET_LAGG);
if (error)
break;
if (ra->ra_proto >= LAGG_PROTO_MAX) {
error = EPROTONOSUPPORT;
break;
}
/* Infiniband only supports the failover protocol. */
if (ra->ra_proto != LAGG_PROTO_FAILOVER &&
ifp->if_type == IFT_INFINIBAND) {
error = EPROTONOSUPPORT;
break;
}
LAGG_XLOCK(sc);
lagg_proto_detach(sc);
lagg_proto_attach(sc, ra->ra_proto);
LAGG_XUNLOCK(sc);
break;
case SIOCGLAGGOPTS:
LAGG_XLOCK(sc);
ro->ro_opts = sc->sc_opts;
if (sc->sc_proto == LAGG_PROTO_LACP) {
struct lacp_softc *lsc;
lsc = (struct lacp_softc *)sc->sc_psc;
if (lsc->lsc_debug.lsc_tx_test != 0)
ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
if (lsc->lsc_debug.lsc_rx_test != 0)
ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
if (lsc->lsc_strict_mode != 0)
ro->ro_opts |= LAGG_OPT_LACP_STRICT;
if (lsc->lsc_fast_timeout != 0)
ro->ro_opts |= LAGG_OPT_LACP_FAST_TIMO;
ro->ro_active = sc->sc_active;
} else {
ro->ro_active = 0;
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
ro->ro_active += LAGG_PORTACTIVE(lp);
}
ro->ro_bkt = sc->sc_stride;
ro->ro_flapping = sc->sc_flapping;
ro->ro_flowid_shift = sc->flowid_shift;
LAGG_XUNLOCK(sc);
break;
case SIOCSLAGGOPTS:
error = priv_check(td, PRIV_NET_LAGG);
if (error)
break;
/*
* The stride option was added without defining a corresponding
* LAGG_OPT flag, so handle a non-zero value before checking
* anything else to preserve compatibility.
*/
LAGG_XLOCK(sc);
if (ro->ro_opts == 0 && ro->ro_bkt != 0) {
if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN) {
LAGG_XUNLOCK(sc);
error = EINVAL;
break;
}
sc->sc_stride = ro->ro_bkt;
}
if (ro->ro_opts == 0) {
LAGG_XUNLOCK(sc);
break;
}
/*
* Set options. LACP options are stored in sc->sc_psc,
* not in sc_opts.
*/
int valid, lacp;
switch (ro->ro_opts) {
case LAGG_OPT_USE_FLOWID:
case -LAGG_OPT_USE_FLOWID:
case LAGG_OPT_USE_NUMA:
case -LAGG_OPT_USE_NUMA:
case LAGG_OPT_FLOWIDSHIFT:
case LAGG_OPT_RR_LIMIT:
valid = 1;
lacp = 0;
break;
case LAGG_OPT_LACP_TXTEST:
case -LAGG_OPT_LACP_TXTEST:
case LAGG_OPT_LACP_RXTEST:
case -LAGG_OPT_LACP_RXTEST:
case LAGG_OPT_LACP_STRICT:
case -LAGG_OPT_LACP_STRICT:
case LAGG_OPT_LACP_FAST_TIMO:
case -LAGG_OPT_LACP_FAST_TIMO:
valid = lacp = 1;
break;
default:
valid = lacp = 0;
break;
}
if (valid == 0 ||
(lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
/* Invalid combination of options specified. */
error = EINVAL;
LAGG_XUNLOCK(sc);
break; /* Return from SIOCSLAGGOPTS. */
}
/*
* Store new options into sc->sc_opts except for
* FLOWIDSHIFT, RR and LACP options.
*/
if (lacp == 0) {
if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
sc->flowid_shift = ro->ro_flowid_shift;
else if (ro->ro_opts == LAGG_OPT_RR_LIMIT) {
if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN ||
ro->ro_bkt == 0) {
error = EINVAL;
LAGG_XUNLOCK(sc);
break;
}
sc->sc_stride = ro->ro_bkt;
} else if (ro->ro_opts > 0)
sc->sc_opts |= ro->ro_opts;
else
sc->sc_opts &= ~ro->ro_opts;
} else {
struct lacp_softc *lsc;
struct lacp_port *lp;
lsc = (struct lacp_softc *)sc->sc_psc;
switch (ro->ro_opts) {
case LAGG_OPT_LACP_TXTEST:
lsc->lsc_debug.lsc_tx_test = 1;
break;
case -LAGG_OPT_LACP_TXTEST:
lsc->lsc_debug.lsc_tx_test = 0;
break;
case LAGG_OPT_LACP_RXTEST:
lsc->lsc_debug.lsc_rx_test = 1;
break;
case -LAGG_OPT_LACP_RXTEST:
lsc->lsc_debug.lsc_rx_test = 0;
break;
case LAGG_OPT_LACP_STRICT:
lsc->lsc_strict_mode = 1;
break;
case -LAGG_OPT_LACP_STRICT:
lsc->lsc_strict_mode = 0;
break;
case LAGG_OPT_LACP_FAST_TIMO:
LACP_LOCK(lsc);
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
lp->lp_state |= LACP_STATE_TIMEOUT;
LACP_UNLOCK(lsc);
lsc->lsc_fast_timeout = 1;
break;
case -LAGG_OPT_LACP_FAST_TIMO:
LACP_LOCK(lsc);
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
lp->lp_state &= ~LACP_STATE_TIMEOUT;
LACP_UNLOCK(lsc);
lsc->lsc_fast_timeout = 0;
break;
}
}
LAGG_XUNLOCK(sc);
break;
case SIOCGLAGGFLAGS:
rf->rf_flags = 0;
LAGG_XLOCK(sc);
if (sc->sc_flags & MBUF_HASHFLAG_L2)
rf->rf_flags |= LAGG_F_HASHL2;
if (sc->sc_flags & MBUF_HASHFLAG_L3)
rf->rf_flags |= LAGG_F_HASHL3;
if (sc->sc_flags & MBUF_HASHFLAG_L4)
rf->rf_flags |= LAGG_F_HASHL4;
LAGG_XUNLOCK(sc);
break;
case SIOCSLAGGHASH:
error = priv_check(td, PRIV_NET_LAGG);
if (error)
break;
if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
error = EINVAL;
break;
}
LAGG_XLOCK(sc);
sc->sc_flags = 0;
if (rf->rf_flags & LAGG_F_HASHL2)
sc->sc_flags |= MBUF_HASHFLAG_L2;
if (rf->rf_flags & LAGG_F_HASHL3)
sc->sc_flags |= MBUF_HASHFLAG_L3;
if (rf->rf_flags & LAGG_F_HASHL4)
sc->sc_flags |= MBUF_HASHFLAG_L4;
LAGG_XUNLOCK(sc);
break;
case SIOCGLAGGPORT:
if (rp->rp_portname[0] == '\0' ||
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
error = EINVAL;
break;
}
LAGG_SLOCK(sc);
if (__predict_true((lp = tpif->if_lagg) != NULL &&
lp->lp_softc == sc))
lagg_port2req(lp, rp);
else
error = ENOENT; /* XXXGL: can happen? */
LAGG_SUNLOCK(sc);
if_rele(tpif);
break;
case SIOCSLAGGPORT:
error = priv_check(td, PRIV_NET_LAGG);
if (error)
break;
if (rp->rp_portname[0] == '\0' ||
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
error = EINVAL;
break;
}
#ifdef INET6
/*
* A laggport interface should not have inet6 address
* because two interfaces with a valid link-local
* scope zone must not be merged in any form. This
* restriction is needed to prevent violation of
* link-local scope zone. Attempts to add a laggport
* interface which has inet6 addresses triggers
* removal of all inet6 addresses on the member
* interface.
*/
if (in6ifa_llaonifp(tpif)) {
in6_ifdetach(tpif);
if_printf(sc->sc_ifp,
"IPv6 addresses on %s have been removed "
"before adding it as a member to prevent "
"IPv6 address scope violation.\n",
tpif->if_xname);
}
#endif
oldmtu = ifp->if_mtu;
LAGG_XLOCK(sc);
error = lagg_port_create(sc, tpif);
LAGG_XUNLOCK(sc);
if_rele(tpif);
/*
* LAGG MTU may change during addition of the first port.
* If it did, do network layer specific procedure.
*/
if (ifp->if_mtu != oldmtu)
if_notifymtu(ifp);
VLAN_CAPABILITIES(ifp);
break;
case SIOCSLAGGDELPORT:
error = priv_check(td, PRIV_NET_LAGG);
if (error)
break;
if (rp->rp_portname[0] == '\0' ||
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
error = EINVAL;
break;
}
LAGG_XLOCK(sc);
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
lp->lp_softc != sc) {
error = ENOENT;
LAGG_XUNLOCK(sc);
if_rele(tpif);
break;
}
error = lagg_port_destroy(lp, 1);
LAGG_XUNLOCK(sc);
if_rele(tpif);
VLAN_CAPABILITIES(ifp);
break;
case SIOCSIFFLAGS:
/* Set flags on ports too */
LAGG_XLOCK(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
lagg_setflags(lp, 1);
}
if (!(ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
/*
* If interface is marked down and it is running,
* then stop and disable it.
*/
lagg_stop(sc);
LAGG_XUNLOCK(sc);
} else if ((ifp->if_flags & IFF_UP) &&
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
/*
* If interface is marked up and it is stopped, then
* start it.
*/
LAGG_XUNLOCK(sc);
(*ifp->if_init)(sc);
} else
LAGG_XUNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
LAGG_XLOCK(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
lagg_clrmulti(lp);
lagg_setmulti(lp);
}
LAGG_XUNLOCK(sc);
error = 0;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
if (ifp->if_type == IFT_INFINIBAND)
error = EINVAL;
else
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
break;
case SIOCSIFCAP:
case SIOCSIFCAPNV:
LAGG_XLOCK(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_ioctl != NULL)
(*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
}
lagg_capabilities(sc);
LAGG_XUNLOCK(sc);
VLAN_CAPABILITIES(ifp);
error = 0;
break;
case SIOCGIFCAPNV:
error = 0;
break;
case SIOCSIFMTU:
LAGG_XLOCK(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_ioctl != NULL)
error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
else
error = EINVAL;
if (error != 0) {
if_printf(ifp,
"failed to change MTU to %d on port %s, "
"reverting all ports to original MTU (%d)\n",
ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu);
break;
}
}
if (error == 0) {
ifp->if_mtu = ifr->ifr_mtu;
} else {
/* set every port back to the original MTU */
ifr->ifr_mtu = ifp->if_mtu;
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_ioctl != NULL)
(*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
}
}
lagg_capabilities(sc);
LAGG_XUNLOCK(sc);
VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
#if defined(KERN_TLS) || defined(RATELIMIT)
#ifdef RATELIMIT
static const struct if_snd_tag_sw lagg_snd_tag_ul_sw = {
.snd_tag_modify = lagg_snd_tag_modify,
.snd_tag_query = lagg_snd_tag_query,
.snd_tag_free = lagg_snd_tag_free,
.next_snd_tag = lagg_next_snd_tag,
.type = IF_SND_TAG_TYPE_UNLIMITED
};
static const struct if_snd_tag_sw lagg_snd_tag_rl_sw = {
.snd_tag_modify = lagg_snd_tag_modify,
.snd_tag_query = lagg_snd_tag_query,
.snd_tag_free = lagg_snd_tag_free,
.next_snd_tag = lagg_next_snd_tag,
.type = IF_SND_TAG_TYPE_RATE_LIMIT
};
#endif
#ifdef KERN_TLS
static const struct if_snd_tag_sw lagg_snd_tag_tls_sw = {
.snd_tag_modify = lagg_snd_tag_modify,
.snd_tag_query = lagg_snd_tag_query,
.snd_tag_free = lagg_snd_tag_free,
.next_snd_tag = lagg_next_snd_tag,
.type = IF_SND_TAG_TYPE_TLS
};
#ifdef RATELIMIT
static const struct if_snd_tag_sw lagg_snd_tag_tls_rl_sw = {
.snd_tag_modify = lagg_snd_tag_modify,
.snd_tag_query = lagg_snd_tag_query,
.snd_tag_free = lagg_snd_tag_free,
.next_snd_tag = lagg_next_snd_tag,
.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT
};
#endif
#endif
static inline struct lagg_snd_tag *
mst_to_lst(struct m_snd_tag *mst)
{
return (__containerof(mst, struct lagg_snd_tag, com));
}
/*
* Look up the port used by a specific flow. This only works for lagg
* protocols with deterministic port mappings (e.g. not roundrobin).
* In addition protocols which use a hash to map flows to ports must
* be configured to use the mbuf flowid rather than hashing packet
* contents.
*/
static struct lagg_port *
lookup_snd_tag_port(struct ifnet *ifp, uint32_t flowid, uint32_t flowtype,
uint8_t numa_domain)
{
struct lagg_softc *sc;
struct lagg_port *lp;
struct lagg_lb *lb;
uint32_t hash, p;
int err;
sc = ifp->if_softc;
switch (sc->sc_proto) {
case LAGG_PROTO_FAILOVER:
return (lagg_link_active(sc, sc->sc_primary));
case LAGG_PROTO_LOADBALANCE:
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
flowtype == M_HASHTYPE_NONE)
return (NULL);
p = flowid >> sc->flowid_shift;
p %= sc->sc_count;
lb = (struct lagg_lb *)sc->sc_psc;
lp = lb->lb_ports[p];
return (lagg_link_active(sc, lp));
case LAGG_PROTO_LACP:
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
flowtype == M_HASHTYPE_NONE)
return (NULL);
hash = flowid >> sc->flowid_shift;
return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, &err));
default:
return (NULL);
}
}
static int
lagg_snd_tag_alloc(struct ifnet *ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
struct epoch_tracker et;
const struct if_snd_tag_sw *sw;
struct lagg_snd_tag *lst;
struct lagg_port *lp;
struct ifnet *lp_ifp;
struct m_snd_tag *mst;
int error;
switch (params->hdr.type) {
#ifdef RATELIMIT
case IF_SND_TAG_TYPE_UNLIMITED:
sw = &lagg_snd_tag_ul_sw;
break;
case IF_SND_TAG_TYPE_RATE_LIMIT:
sw = &lagg_snd_tag_rl_sw;
break;
#endif
#ifdef KERN_TLS
case IF_SND_TAG_TYPE_TLS:
sw = &lagg_snd_tag_tls_sw;
break;
case IF_SND_TAG_TYPE_TLS_RX:
/* Return tag from port interface directly. */
sw = NULL;
break;
#ifdef RATELIMIT
case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
sw = &lagg_snd_tag_tls_rl_sw;
break;
#endif
#endif
default:
return (EOPNOTSUPP);
}
NET_EPOCH_ENTER(et);
lp = lookup_snd_tag_port(ifp, params->hdr.flowid,
params->hdr.flowtype, params->hdr.numa_domain);
if (lp == NULL) {
NET_EPOCH_EXIT(et);
return (EOPNOTSUPP);
}
if (lp->lp_ifp == NULL) {
NET_EPOCH_EXIT(et);
return (EOPNOTSUPP);
}
lp_ifp = lp->lp_ifp;
if_ref(lp_ifp);
NET_EPOCH_EXIT(et);
if (sw != NULL) {
lst = malloc(sizeof(*lst), M_LAGG, M_NOWAIT);
if (lst == NULL) {
if_rele(lp_ifp);
return (ENOMEM);
}
} else
lst = NULL;
error = m_snd_tag_alloc(lp_ifp, params, &mst);
if_rele(lp_ifp);
if (error) {
free(lst, M_LAGG);
return (error);
}
if (sw != NULL) {
m_snd_tag_init(&lst->com, ifp, sw);
lst->tag = mst;
*ppmt = &lst->com;
} else
*ppmt = mst;
return (0);
}
static struct m_snd_tag *
lagg_next_snd_tag(struct m_snd_tag *mst)
{
struct lagg_snd_tag *lst;
lst = mst_to_lst(mst);
return (lst->tag);
}
static int
lagg_snd_tag_modify(struct m_snd_tag *mst,
union if_snd_tag_modify_params *params)
{
struct lagg_snd_tag *lst;
lst = mst_to_lst(mst);
return (lst->tag->sw->snd_tag_modify(lst->tag, params));
}
static int
lagg_snd_tag_query(struct m_snd_tag *mst,
union if_snd_tag_query_params *params)
{
struct lagg_snd_tag *lst;
lst = mst_to_lst(mst);
return (lst->tag->sw->snd_tag_query(lst->tag, params));
}
static void
lagg_snd_tag_free(struct m_snd_tag *mst)
{
struct lagg_snd_tag *lst;
lst = mst_to_lst(mst);
m_snd_tag_rele(lst->tag);
free(lst, M_LAGG);
}
static void
lagg_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
{
/*
* For lagg, we have an indirect
* interface. The caller needs to
* get a ratelimit tag on the actual
* interface the flow will go on.
*/
q->rate_table = NULL;
q->flags = RT_IS_INDIRECT;
q->max_flows = 0;
q->number_of_rates = 0;
}
#endif
static int
lagg_setmulti(struct lagg_port *lp)
{
struct lagg_softc *sc = lp->lp_softc;
struct ifnet *ifp = lp->lp_ifp;
struct ifnet *scifp = sc->sc_ifp;
struct lagg_mc *mc;
struct ifmultiaddr *ifma;
int error;
IF_ADDR_WLOCK(scifp);
CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
mc = malloc(sizeof(struct lagg_mc), M_LAGG, M_NOWAIT);
if (mc == NULL) {
IF_ADDR_WUNLOCK(scifp);
return (ENOMEM);
}
bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len);
mc->mc_addr.sdl_index = ifp->if_index;
mc->mc_ifma = NULL;
SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
}
IF_ADDR_WUNLOCK(scifp);
SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
error = if_addmulti(ifp,
(struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
if (error)
return (error);
}
return (0);
}
static int
lagg_clrmulti(struct lagg_port *lp)
{
struct lagg_mc *mc;
LAGG_XLOCK_ASSERT(lp->lp_softc);
while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
if (mc->mc_ifma && lp->lp_detaching == 0)
if_delmulti_ifma(mc->mc_ifma);
free(mc, M_LAGG);
}
return (0);
}
static void
lagg_setcaps(struct lagg_port *lp, int cap, int cap2)
{
struct ifreq ifr;
struct siocsifcapnv_driver_data drv_ioctl_data;
if (lp->lp_ifp->if_capenable == cap &&
lp->lp_ifp->if_capenable2 == cap2)
return;
if (lp->lp_ioctl == NULL)
return;
/* XXX */
if ((lp->lp_ifp->if_capabilities & IFCAP_NV) != 0) {
drv_ioctl_data.reqcap = cap;
drv_ioctl_data.reqcap2 = cap2;
drv_ioctl_data.nvcap = NULL;
(*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAPNV,
(caddr_t)&drv_ioctl_data);
} else {
ifr.ifr_reqcap = cap;
(*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr);
}
}
/* Handle a ref counted flag that should be set on the lagg port as well */
static int
lagg_setflag(struct lagg_port *lp, int flag, int status,
int (*func)(struct ifnet *, int))
{
struct lagg_softc *sc = lp->lp_softc;
struct ifnet *scifp = sc->sc_ifp;
struct ifnet *ifp = lp->lp_ifp;
int error;
LAGG_XLOCK_ASSERT(sc);
status = status ? (scifp->if_flags & flag) : 0;
/* Now "status" contains the flag value or 0 */
/*
* See if recorded ports status is different from what
* we want it to be. If it is, flip it. We record ports
* status in lp_ifflags so that we won't clear ports flag
* we haven't set. In fact, we don't clear or set ports
* flags directly, but get or release references to them.
* That's why we can be sure that recorded flags still are
* in accord with actual ports flags.
*/
if (status != (lp->lp_ifflags & flag)) {
error = (*func)(ifp, status);
if (error)
return (error);
lp->lp_ifflags &= ~flag;
lp->lp_ifflags |= status;
}
return (0);
}
/*
* Handle IFF_* flags that require certain changes on the lagg port
* if "status" is true, update ports flags respective to the lagg
* if "status" is false, forcedly clear the flags set on port.
*/
static int
lagg_setflags(struct lagg_port *lp, int status)
{
int error, i;
for (i = 0; lagg_pflags[i].flag; i++) {
error = lagg_setflag(lp, lagg_pflags[i].flag,
status, lagg_pflags[i].func);
if (error)
return (error);
}
return (0);
}
static int
lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m)
{
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
NET_EPOCH_ASSERT();
#if defined(KERN_TLS) || defined(RATELIMIT)
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
#endif
/* We need a Tx algorithm and at least one port */
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENXIO);
}
ETHER_BPF_MTAP(ifp, m);
return (lagg_proto_start(sc, m));
}
static int
lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m)
{
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
NET_EPOCH_ASSERT();
#if defined(KERN_TLS) || defined(RATELIMIT)
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
#endif
/* We need a Tx algorithm and at least one port */
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENXIO);
}
infiniband_bpf_mtap(ifp, m);
return (lagg_proto_start(sc, m));
}
/*
* The ifp->if_qflush entry point for lagg(4) is no-op.
*/
static void
lagg_qflush(struct ifnet *ifp __unused)
{
}
static struct mbuf *
lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m)
{
struct lagg_port *lp = ifp->if_lagg;
struct lagg_softc *sc = lp->lp_softc;
struct ifnet *scifp = sc->sc_ifp;
NET_EPOCH_ASSERT();
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
lp->lp_detaching != 0 ||
sc->sc_proto == LAGG_PROTO_NONE) {
m_freem(m);
return (NULL);
}
m = lagg_proto_input(sc, lp, m);
if (m != NULL) {
ETHER_BPF_MTAP(scifp, m);
if ((scifp->if_flags & IFF_MONITOR) != 0) {
m_freem(m);
m = NULL;
}
}
#ifdef DEV_NETMAP
if (m != NULL && scifp->if_capenable & IFCAP_NETMAP) {
scifp->if_input(scifp, m);
m = NULL;
}
#endif /* DEV_NETMAP */
return (m);
}
static struct mbuf *
lagg_input_infiniband(struct ifnet *ifp, struct mbuf *m)
{
struct lagg_port *lp = ifp->if_lagg;
struct lagg_softc *sc = lp->lp_softc;
struct ifnet *scifp = sc->sc_ifp;
NET_EPOCH_ASSERT();
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
lp->lp_detaching != 0 ||
sc->sc_proto == LAGG_PROTO_NONE) {
m_freem(m);
return (NULL);
}
m = lagg_proto_input(sc, lp, m);
if (m != NULL) {
infiniband_bpf_mtap(scifp, m);
if ((scifp->if_flags & IFF_MONITOR) != 0) {
m_freem(m);
m = NULL;
}
}
return (m);
}
static int
lagg_media_change(struct ifnet *ifp)
{
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
if (sc->sc_ifflags & IFF_DEBUG)
printf("%s\n", __func__);
/* Ignore */
return (0);
}
static void
lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct epoch_tracker et;
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
struct lagg_port *lp;
imr->ifm_status = IFM_AVALID;
imr->ifm_active = IFM_ETHER | IFM_AUTO;
NET_EPOCH_ENTER(et);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (LAGG_PORTACTIVE(lp))
imr->ifm_status |= IFM_ACTIVE;
}
NET_EPOCH_EXIT(et);
}
static void
lagg_linkstate(struct lagg_softc *sc)
{
struct epoch_tracker et;
struct lagg_port *lp;
int new_link = LINK_STATE_DOWN;
uint64_t speed;
LAGG_XLOCK_ASSERT(sc);
/* LACP handles link state itself */
if (sc->sc_proto == LAGG_PROTO_LACP)
return;
/* Our link is considered up if at least one of our ports is active */
NET_EPOCH_ENTER(et);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
new_link = LINK_STATE_UP;
break;
}
}
NET_EPOCH_EXIT(et);
if_link_state_change(sc->sc_ifp, new_link);
/* Update if_baudrate to reflect the max possible speed */
switch (sc->sc_proto) {
case LAGG_PROTO_FAILOVER:
sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
sc->sc_primary->lp_ifp->if_baudrate : 0;
break;
case LAGG_PROTO_ROUNDROBIN:
case LAGG_PROTO_LOADBALANCE:
case LAGG_PROTO_BROADCAST:
speed = 0;
NET_EPOCH_ENTER(et);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
speed += lp->lp_ifp->if_baudrate;
NET_EPOCH_EXIT(et);
sc->sc_ifp->if_baudrate = speed;
break;
case LAGG_PROTO_LACP:
/* LACP updates if_baudrate itself */
break;
}
}
static void
lagg_port_state(struct ifnet *ifp, int state)
{
struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
struct lagg_softc *sc = NULL;
if (lp != NULL)
sc = lp->lp_softc;
if (sc == NULL)
return;
LAGG_XLOCK(sc);
lagg_linkstate(sc);
lagg_proto_linkstate(sc, lp);
LAGG_XUNLOCK(sc);
}
struct lagg_port *
lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
{
struct lagg_port *lp_next, *rval = NULL;
/*
* Search a port which reports an active link state.
*/
#ifdef INVARIANTS
/*
* This is called with either in the network epoch
* or with LAGG_XLOCK(sc) held.
*/
if (!in_epoch(net_epoch_preempt))
LAGG_XLOCK_ASSERT(sc);
#endif
if (lp == NULL)
goto search;
if (LAGG_PORTACTIVE(lp)) {
rval = lp;
goto found;
}
if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
LAGG_PORTACTIVE(lp_next)) {
rval = lp_next;
goto found;
}
search:
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
if (LAGG_PORTACTIVE(lp_next)) {
return (lp_next);
}
}
found:
return (rval);
}
int
lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
{
#if defined(KERN_TLS) || defined(RATELIMIT)
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
struct lagg_snd_tag *lst;
struct m_snd_tag *mst;
mst = m->m_pkthdr.snd_tag;
lst = mst_to_lst(mst);
if (lst->tag->ifp != ifp) {
m_freem(m);
return (EAGAIN);
}
m->m_pkthdr.snd_tag = m_snd_tag_ref(lst->tag);
m_snd_tag_rele(mst);
}
#endif
return (ifp->if_transmit)(ifp, m);
}
/*
* Simple round robin aggregation
*/
static void
lagg_rr_attach(struct lagg_softc *sc)
{
sc->sc_seq = 0;
sc->sc_stride = 1;
}
static int
lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
{
struct lagg_port *lp;
uint32_t p;
p = atomic_fetchadd_32(&sc->sc_seq, 1);
p /= sc->sc_stride;
p %= sc->sc_count;
lp = CK_SLIST_FIRST(&sc->sc_ports);
while (p--)
lp = CK_SLIST_NEXT(lp, lp_entries);
/*
* Check the port's link state. This will return the next active
* port if the link is down or the port is NULL.
*/
if ((lp = lagg_link_active(sc, lp)) == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENETDOWN);
}
/* Send mbuf */
return (lagg_enqueue(lp->lp_ifp, m));
}
/*
* Broadcast mode
*/
static int
lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
{
int errors = 0;
int ret;
struct lagg_port *lp, *last = NULL;
struct mbuf *m0;
NET_EPOCH_ASSERT();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (!LAGG_PORTACTIVE(lp))
continue;
if (last != NULL) {
m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
if (m0 == NULL) {
ret = ENOBUFS;
errors++;
break;
}
lagg_enqueue(last->lp_ifp, m0);
}
last = lp;
}
if (last == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENOENT);
}
if ((last = lagg_link_active(sc, last)) == NULL) {
errors++;
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
m_freem(m);
return (ENETDOWN);
}
ret = lagg_enqueue(last->lp_ifp, m);
if (errors != 0)
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
return (ret);
}
/*
* Active failover
*/
static int
lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
{
struct lagg_port *lp;
/* Use the master port if active or the next available port */
if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENETDOWN);
}
/* Send mbuf */
return (lagg_enqueue(lp->lp_ifp, m));
}
static struct mbuf *
lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
{
struct ifnet *ifp = sc->sc_ifp;
struct lagg_port *tmp_tp;
if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
m->m_pkthdr.rcvif = ifp;
return (m);
}
if (!LAGG_PORTACTIVE(sc->sc_primary)) {
tmp_tp = lagg_link_active(sc, sc->sc_primary);
/*
* If tmp_tp is null, we've received a packet when all
* our links are down. Weird, but process it anyways.
*/
if (tmp_tp == NULL || tmp_tp == lp) {
m->m_pkthdr.rcvif = ifp;
return (m);
}
}
m_freem(m);
return (NULL);
}
/*
* Loadbalancing
*/
static void
lagg_lb_attach(struct lagg_softc *sc)
{
struct lagg_port *lp;
struct lagg_lb *lb;
LAGG_XLOCK_ASSERT(sc);
lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO);
lb->lb_key = m_ether_tcpip_hash_init();
sc->sc_psc = lb;
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lagg_lb_port_create(lp);
}
static void
lagg_lb_detach(struct lagg_softc *sc)
{
struct lagg_lb *lb;
lb = (struct lagg_lb *)sc->sc_psc;
if (lb != NULL)
free(lb, M_LAGG);
}
static int
lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
{
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
struct lagg_port *lp_next;
int i = 0, rv;
rv = 0;
bzero(&lb->lb_ports, sizeof(lb->lb_ports));
LAGG_XLOCK_ASSERT(sc);
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
if (lp_next == lp)
continue;
if (i >= LAGG_MAX_PORTS) {
rv = EINVAL;
break;
}
if (sc->sc_ifflags & IFF_DEBUG)
printf("%s: port %s at index %d\n",
sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
lb->lb_ports[i++] = lp_next;
}
return (rv);
}
static int
lagg_lb_port_create(struct lagg_port *lp)
{
struct lagg_softc *sc = lp->lp_softc;
return (lagg_lb_porttable(sc, NULL));
}
static void
lagg_lb_port_destroy(struct lagg_port *lp)
{
struct lagg_softc *sc = lp->lp_softc;
lagg_lb_porttable(sc, lp);
}
static int
lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
{
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
struct lagg_port *lp = NULL;
uint32_t p = 0;
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
p = m->m_pkthdr.flowid >> sc->flowid_shift;
else
p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
p %= sc->sc_count;
lp = lb->lb_ports[p];
/*
* Check the port's link state. This will return the next active
* port if the link is down or the port is NULL.
*/
if ((lp = lagg_link_active(sc, lp)) == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENETDOWN);
}
/* Send mbuf */
return (lagg_enqueue(lp->lp_ifp, m));
}
/*
* 802.3ad LACP
*/
static void
lagg_lacp_attach(struct lagg_softc *sc)
{
struct lagg_port *lp;
lacp_attach(sc);
LAGG_XLOCK_ASSERT(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_create(lp);
}
static void
lagg_lacp_detach(struct lagg_softc *sc)
{
struct lagg_port *lp;
void *psc;
LAGG_XLOCK_ASSERT(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_destroy(lp);
psc = sc->sc_psc;
sc->sc_psc = NULL;
lacp_detach(psc);
}
static void
lagg_lacp_lladdr(struct lagg_softc *sc)
{
struct lagg_port *lp;
LAGG_SXLOCK_ASSERT(sc);
/* purge all the lacp ports */
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_destroy(lp);
/* add them back in */
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_create(lp);
}
static int
lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
{
struct lagg_port *lp;
int err;
lp = lacp_select_tx_port(sc, m, &err);
if (lp == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (err);
}
/* Send mbuf */
return (lagg_enqueue(lp->lp_ifp, m));
}
static struct mbuf *
lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
{
struct ifnet *ifp = sc->sc_ifp;
struct ether_header *eh;
u_short etype;
eh = mtod(m, struct ether_header *);
etype = ntohs(eh->ether_type);
/* Tap off LACP control messages */
if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
m = lacp_input(lp, m);
if (m == NULL)
return (NULL);
}
/*
* If the port is not collecting or not in the active aggregator then
* free and return.
*/
if (!lacp_iscollecting(lp) || !lacp_isactive(lp)) {
m_freem(m);
return (NULL);
}
m->m_pkthdr.rcvif = ifp;
return (m);
}
/* Default input */
static struct mbuf *
lagg_default_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
{
struct ifnet *ifp = sc->sc_ifp;
/* Just pass in the packet to our lagg device */
m->m_pkthdr.rcvif = ifp;
return (m);
}
diff --git a/sys/net/if_loop.c b/sys/net/if_loop.c
index 926d264073ec..3005965a4fcb 100644
--- a/sys/net/if_loop.c
+++ b/sys/net/if_loop.c
@@ -1,449 +1,446 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1982, 1986, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Loopback interface driver for protocol testing and timing.
*/
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_clone.h>
#include <net/if_types.h>
#include <net/netisr.h>
#include <net/route.h>
#include <net/bpf.h>
#include <net/vnet.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_var.h>
#endif
#ifdef INET6
#ifndef INET
#include <netinet/in.h>
#endif
#include <netinet6/in6_var.h>
#include <netinet/ip6.h>
#endif
#include <security/mac/mac_framework.h>
#ifdef TINY_LOMTU
#define LOMTU (1024+512)
#elif defined(LARGE_LOMTU)
#define LOMTU 131072
#else
#define LOMTU 16384
#endif
#define LO_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)
#define LO_CSUM_FEATURES6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6)
#define LO_CSUM_SET (CSUM_DATA_VALID | CSUM_DATA_VALID_IPV6 | \
CSUM_PSEUDO_HDR | \
CSUM_IP_CHECKED | CSUM_IP_VALID | \
CSUM_SCTP_VALID)
static int loioctl(struct ifnet *, u_long, caddr_t);
static int looutput(struct ifnet *ifp, struct mbuf *m,
const struct sockaddr *dst, struct route *ro);
VNET_DEFINE(struct ifnet *, loif); /* Used externally */
VNET_DEFINE_STATIC(struct if_clone *, lo_cloner);
#define V_lo_cloner VNET(lo_cloner)
static const char loname[] = "lo";
static int
lo_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
if (ifp->if_dunit == 0 && (flags & IFC_F_FORCE) == 0)
return (EINVAL);
#ifndef VIMAGE
/* XXX: destroying lo0 will lead to panics. */
KASSERT(V_loif != ifp, ("%s: destroying lo0", __func__));
#endif
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
return (0);
}
static int
lo_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
struct ifnet *ifp;
ifp = if_alloc(IFT_LOOP);
- if (ifp == NULL)
- return (ENOSPC);
-
if_initname(ifp, loname, ifd->unit);
ifp->if_mtu = LOMTU;
ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
ifp->if_ioctl = loioctl;
ifp->if_output = looutput;
ifp->if_snd.ifq_maxlen = ifqmaxlen;
ifp->if_capabilities = ifp->if_capenable =
IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_LINKSTATE;
ifp->if_hwassist = LO_CSUM_FEATURES | LO_CSUM_FEATURES6;
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
*ifpp = ifp;
return (0);
}
static void
vnet_loif_init(const void *unused __unused)
{
struct if_clone_addreq req = {
.create_f = lo_clone_create,
.destroy_f = lo_clone_destroy,
.flags = IFC_F_AUTOUNIT,
};
V_lo_cloner = ifc_attach_cloner(loname, &req);
struct ifc_data ifd = { .unit = 0 };
ifc_create_ifp(loname, &ifd, &V_loif);
}
VNET_SYSINIT(vnet_loif_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
vnet_loif_init, NULL);
#ifdef VIMAGE
static void
vnet_loif_uninit(const void *unused __unused)
{
ifc_detach_cloner(V_lo_cloner);
V_loif = NULL;
}
VNET_SYSUNINIT(vnet_loif_uninit, SI_SUB_INIT_IF, SI_ORDER_SECOND,
vnet_loif_uninit, NULL);
#endif
static int
loop_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
break;
case MOD_UNLOAD:
printf("loop module unload - not possible for this module type\n");
return (EINVAL);
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t loop_mod = {
"if_lo",
loop_modevent,
0
};
DECLARE_MODULE(if_lo, loop_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
static int
looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
u_int32_t af;
#ifdef MAC
int error;
#endif
M_ASSERTPKTHDR(m); /* check if we have the packet header */
#ifdef MAC
error = mac_ifnet_check_transmit(ifp, m);
if (error) {
m_freem(m);
return (error);
}
#endif
if (ro != NULL && ro->ro_flags & (RT_REJECT|RT_BLACKHOLE)) {
m_freem(m);
return (ro->ro_flags & RT_BLACKHOLE ? 0 : EHOSTUNREACH);
}
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
#ifdef RSS
M_HASHTYPE_CLEAR(m);
#endif
/* BPF writes need to be handled specially. */
if (dst->sa_family == AF_UNSPEC || dst->sa_family == pseudo_AF_HDRCMPLT)
bcopy(dst->sa_data, &af, sizeof(af));
else
af = RO_GET_FAMILY(ro, dst);
#if 1 /* XXX */
switch (af) {
case AF_INET:
if (ifp->if_capenable & IFCAP_RXCSUM) {
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = LO_CSUM_SET;
}
m->m_pkthdr.csum_flags &= ~LO_CSUM_FEATURES;
break;
case AF_INET6:
#if 0
/*
* XXX-BZ for now always claim the checksum is good despite
* any interface flags. This is a workaround for 9.1-R and
* a proper solution ought to be sought later.
*/
if (ifp->if_capenable & IFCAP_RXCSUM_IPV6) {
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = LO_CSUM_SET;
}
#else
m->m_pkthdr.csum_data = 0xffff;
m->m_pkthdr.csum_flags = LO_CSUM_SET;
#endif
m->m_pkthdr.csum_flags &= ~LO_CSUM_FEATURES6;
break;
default:
printf("looutput: af=%d unexpected\n", af);
m_freem(m);
return (EAFNOSUPPORT);
}
#endif
return (if_simloop(ifp, m, af, 0));
}
/*
* if_simloop()
*
* This function is to support software emulation of hardware loopback,
* i.e., for interfaces with the IFF_SIMPLEX attribute. Since they can't
* hear their own broadcasts, we create a copy of the packet that we
* would normally receive via a hardware loopback.
*
* This function expects the packet to include the media header of length hlen.
*/
int
if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen)
{
int isr;
M_ASSERTPKTHDR(m);
m_tag_delete_nonpersistent(m);
m->m_pkthdr.rcvif = ifp;
#ifdef MAC
mac_ifnet_create_mbuf(ifp, m);
#endif
/*
* Let BPF see incoming packet in the following manner:
* - Emulated packet loopback for a simplex interface
* (net/if_ethersubr.c)
* -> passes it to ifp's BPF
* - IPv4/v6 multicast packet loopback (netinet(6)/ip(6)_output.c)
* -> not passes it to any BPF
* - Normal packet loopback from myself to myself (net/if_loop.c)
* -> passes to lo0's BPF (even in case of IPv6, where ifp!=lo0)
*/
if (hlen > 0) {
if (bpf_peers_present(ifp->if_bpf)) {
bpf_mtap(ifp->if_bpf, m);
}
} else {
if (bpf_peers_present(V_loif->if_bpf)) {
if ((m->m_flags & M_MCAST) == 0 || V_loif == ifp) {
/* XXX beware sizeof(af) != 4 */
u_int32_t af1 = af;
/*
* We need to prepend the address family.
*/
bpf_mtap2(V_loif->if_bpf, &af1, sizeof(af1), m);
}
}
}
/* Strip away media header */
if (hlen > 0) {
m_adj(m, hlen);
#ifndef __NO_STRICT_ALIGNMENT
/*
* Some archs do not like unaligned data, so
* we move data down in the first mbuf.
*/
if (mtod(m, vm_offset_t) & 3) {
KASSERT(hlen >= 3, ("if_simloop: hlen too small"));
bcopy(m->m_data,
(char *)(mtod(m, vm_offset_t)
- (mtod(m, vm_offset_t) & 3)),
m->m_len);
m->m_data -= (mtod(m,vm_offset_t) & 3);
}
#endif
}
/* Deliver to upper layer protocol */
switch (af) {
#ifdef INET
case AF_INET:
isr = NETISR_IP;
break;
#endif
#ifdef INET6
case AF_INET6:
m->m_flags |= M_LOOP;
isr = NETISR_IPV6;
break;
#endif
default:
printf("if_simloop: can't handle af=%d\n", af);
m_freem(m);
return (EAFNOSUPPORT);
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
netisr_queue(isr, m); /* mbuf is free'd on failure. */
return (0);
}
/*
* Process an ioctl request.
*/
static int
loioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifreq *ifr = (struct ifreq *)data;
int error = 0, mask;
switch (cmd) {
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
if_link_state_change(ifp, LINK_STATE_UP);
/*
* Everything else is done at a higher level.
*/
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifr == NULL) {
error = EAFNOSUPPORT; /* XXX */
break;
}
switch (ifr->ifr_addr.sa_family) {
#ifdef INET
case AF_INET:
break;
#endif
#ifdef INET6
case AF_INET6:
break;
#endif
default:
error = EAFNOSUPPORT;
break;
}
break;
case SIOCSIFMTU:
ifp->if_mtu = ifr->ifr_mtu;
break;
case SIOCSIFFLAGS:
if_link_state_change(ifp, (ifp->if_flags & IFF_UP) ?
LINK_STATE_UP: LINK_STATE_DOWN);
break;
case SIOCSIFCAP:
mask = ifp->if_capenable ^ ifr->ifr_reqcap;
if ((mask & IFCAP_RXCSUM) != 0)
ifp->if_capenable ^= IFCAP_RXCSUM;
if ((mask & IFCAP_TXCSUM) != 0)
ifp->if_capenable ^= IFCAP_TXCSUM;
if ((mask & IFCAP_RXCSUM_IPV6) != 0) {
#if 0
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
#else
error = EOPNOTSUPP;
break;
#endif
}
if ((mask & IFCAP_TXCSUM_IPV6) != 0) {
#if 0
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
#else
error = EOPNOTSUPP;
break;
#endif
}
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = LO_CSUM_FEATURES;
#if 0
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
ifp->if_hwassist |= LO_CSUM_FEATURES6;
#endif
break;
default:
error = EINVAL;
}
return (error);
}
diff --git a/sys/net/if_stf.c b/sys/net/if_stf.c
index 9f9e57b84f35..6fa3d03d1cfa 100644
--- a/sys/net/if_stf.c
+++ b/sys/net/if_stf.c
@@ -1,1056 +1,1051 @@
/* $KAME: if_stf.c,v 1.73 2001/12/03 11:08:30 keiichi Exp $ */
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2000 WIDE Project.
* Copyright (c) 2010 Hiroki Sato <hrs@FreeBSD.org>
* Copyright (c) 2013 Ermal Luci <eri@FreeBSD.org>
* Copyright (c) 2017-2021 Rubicon Communications, LLC (Netgate)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* 6to4 interface, based on RFC3056.
*
* 6to4 interface is NOT capable of link-layer (I mean, IPv4) multicasting.
* There is no address mapping defined from IPv6 multicast address to IPv4
* address. Therefore, we do not have IFF_MULTICAST on the interface.
*
* Due to the lack of address mapping for link-local addresses, we cannot
* throw packets toward link-local addresses (fe80::x). Also, we cannot throw
* packets to link-local multicast addresses (ff02::x).
*
* Here are interesting symptoms due to the lack of link-local address:
*
* Unicast routing exchange:
* - RIPng: Impossible. Uses link-local multicast packet toward ff02::9,
* and link-local addresses as nexthop.
* - OSPFv6: Impossible. OSPFv6 assumes that there's link-local address
* assigned to the link, and makes use of them. Also, HELLO packets use
* link-local multicast addresses (ff02::5 and ff02::6).
* - BGP4+: Maybe. You can only use global address as nexthop, and global
* address as TCP endpoint address.
*
* Multicast routing protocols:
* - PIM: Hello packet cannot be used to discover adjacent PIM routers.
* Adjacent PIM routers must be configured manually (is it really spec-wise
* correct thing to do?).
*
* ICMPv6:
* - Redirects cannot be used due to the lack of link-local address.
*
* stf interface does not have, and will not need, a link-local address.
* It seems to have no real benefit and does not help the above symptoms much.
* Even if we assign link-locals to interface, we cannot really
* use link-local unicast/multicast on top of 6to4 cloud (since there's no
* encapsulation defined for link-local address), and the above analysis does
* not change. RFC3056 does not mandate the assignment of link-local address
* either.
*
* 6to4 interface has security issues. Refer to
* http://playground.iijlab.net/i-d/draft-itojun-ipv6-transition-abuse-00.txt
* for details. The code tries to filter out some of malicious packets.
* Note that there is no way to be 100% secure.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sdt.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
#include <sys/malloc.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_clone.h>
#include <net/route.h>
#include <net/route/nhop.h>
#include <net/netisr.h>
#include <net/if_stf.h>
#include <net/if_types.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_fib.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/in_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_fib.h>
#include <netinet6/ip6_var.h>
#include <netinet6/in6_var.h>
#include <netinet/ip_ecn.h>
#include <netinet/ip_encap.h>
#include <machine/stdarg.h>
#include <net/bpf.h>
#include <security/mac/mac_framework.h>
SDT_PROVIDER_DEFINE(if_stf);
SDT_PROBE_DEFINE3(if_stf, , encapcheck, in, "struct mbuf *", "int", "int");
SDT_PROBE_DEFINE0(if_stf, , encapcheck, accept);
SDT_PROBE_DEFINE3(if_stf, , getsrcifa6, in, "struct ifnet *",
"struct in6_addr *", "struct in6_addr *");
SDT_PROBE_DEFINE2(if_stf, , getsrcifa6, found, "struct in6_addr *",
"struct in6_addr *");
SDT_PROBE_DEFINE0(if_stf, , getsrcifa6, notfound);
SDT_PROBE_DEFINE4(if_stf, , stf_output, in, "struct ifnet *", "struct mbuf *",
"struct sockaddr *", "struct route *");
SDT_PROBE_DEFINE2(if_stf, , stf_output, error, "int", "int");
SDT_PROBE_DEFINE1(if_stf, , stf_output, out, "int");
SDT_PROBE_DEFINE3(if_stf, , checkaddr6, in, "struct stf_softc *",
"struct in6_addr *", "struct ifnet *");
SDT_PROBE_DEFINE2(if_stf, , checkaddr6, out, "int", "int");
SDT_PROBE_DEFINE3(if_stf, , stf_input, in, "struct mbuf *", "int", "int");
SDT_PROBE_DEFINE2(if_stf, , stf_input, out, "int", "int");
SDT_PROBE_DEFINE3(if_stf, , ioctl, sv4net, "struct in_addr *",
"struct in_addr *", "int");
SDT_PROBE_DEFINE1(if_stf, , ioctl, sdstv4, "struct in_addr *");
SDT_PROBE_DEFINE1(if_stf, , ioctl, ifaddr, "struct ifaddr *");
SDT_PROBE_DEFINE4(if_stf, , getin4addr_in6, out, "struct in6_addr *",
"struct in6_addr *", "struct in6_addr *", "struct sockaddr_in *");
SDT_PROBE_DEFINE2(if_stf, , getin4addr, in, "struct in6_addr *", "struct in6_addr *");
SDT_PROBE_DEFINE1(if_stf, , getin4addr, out, "struct sockaddr_in *");
SYSCTL_DECL(_net_link);
static SYSCTL_NODE(_net_link, IFT_STF, stf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"6to4 Interface");
static int stf_permit_rfc1918 = 0;
SYSCTL_INT(_net_link_stf, OID_AUTO, permit_rfc1918, CTLFLAG_RWTUN,
&stf_permit_rfc1918, 0, "Permit the use of private IPv4 addresses");
#define STFUNIT 0
#define IN6_IS_ADDR_6TO4(x) (ntohs((x)->s6_addr16[0]) == 0x2002)
/*
* XXX: Return a pointer with 16-bit aligned. Don't cast it to
* struct in_addr *; use bcopy() instead.
*/
#define GET_V4(x) (&(x)->s6_addr16[1])
struct stf_softc {
struct ifnet *sc_ifp;
in_addr_t braddr; /* Border relay IPv4 address */
in_addr_t srcv4_addr; /* Our IPv4 WAN address */
u_int v4prefixlen; /* How much of the v4 address to include in our address. */
u_int sc_fibnum;
const struct encaptab *encap_cookie;
};
#define STF2IFP(sc) ((sc)->sc_ifp)
static const char stfname[] = "stf";
static MALLOC_DEFINE(M_STF, stfname, "6to4 Tunnel Interface");
static const int ip_stf_ttl = 40;
static int in_stf_input(struct mbuf *, int, int, void *);
static char *stfnames[] = {"stf0", "stf", "6to4", NULL};
static int stfmodevent(module_t, int, void *);
static int stf_encapcheck(const struct mbuf *, int, int, void *);
static int stf_getsrcifa6(struct ifnet *, struct in6_addr *, struct in6_addr *);
static int stf_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
struct route *);
static int isrfc1918addr(struct in_addr *);
static int stf_checkaddr4(struct stf_softc *, struct in_addr *,
struct ifnet *);
static int stf_checkaddr6(struct stf_softc *, struct in6_addr *,
struct ifnet *);
static struct sockaddr_in *stf_getin4addr_in6(struct stf_softc *,
struct sockaddr_in *, struct in6_addr, struct in6_addr,
struct in6_addr);
static struct sockaddr_in *stf_getin4addr(struct stf_softc *,
struct sockaddr_in *, struct in6_addr, struct in6_addr);
static int stf_ioctl(struct ifnet *, u_long, caddr_t);
VNET_DEFINE_STATIC(struct if_clone *, stf_cloner);
#define V_stf_cloner VNET(stf_cloner)
static const struct encap_config ipv4_encap_cfg = {
.proto = IPPROTO_IPV6,
.min_length = sizeof(struct ip),
.exact_match = (sizeof(in_addr_t) << 3) + 8,
.check = stf_encapcheck,
.input = in_stf_input
};
static int
stf_clone_match(struct if_clone *ifc, const char *name)
{
int i;
for(i = 0; stfnames[i] != NULL; i++) {
if (strcmp(stfnames[i], name) == 0)
return (1);
}
return (0);
}
static int
stf_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
char *dp;
int err, unit, wildcard;
struct stf_softc *sc;
struct ifnet *ifp;
err = ifc_name2unit(name, &unit);
if (err != 0)
return (err);
wildcard = (unit < 0);
/*
* We can only have one unit, but since unit allocation is
* already locked, we use it to keep from allocating extra
* interfaces.
*/
unit = STFUNIT;
err = ifc_alloc_unit(ifc, &unit);
if (err != 0)
return (err);
sc = malloc(sizeof(struct stf_softc), M_STF, M_WAITOK | M_ZERO);
ifp = STF2IFP(sc) = if_alloc(IFT_STF);
- if (ifp == NULL) {
- free(sc, M_STF);
- ifc_free_unit(ifc, unit);
- return (ENOSPC);
- }
ifp->if_softc = sc;
sc->sc_fibnum = curthread->td_proc->p_fibnum;
/*
* Set the name manually rather then using if_initname because
* we don't conform to the default naming convention for interfaces.
* In the wildcard case, we need to update the name.
*/
if (wildcard) {
for (dp = name; *dp != '\0'; dp++);
if (snprintf(dp, len - (dp-name), "%d", unit) >
len - (dp-name) - 1) {
/*
* This can only be a programmer error and
* there's no straightforward way to recover if
* it happens.
*/
panic("if_clone_create(): interface name too long");
}
}
strlcpy(ifp->if_xname, name, IFNAMSIZ);
ifp->if_dname = stfname;
ifp->if_dunit = IF_DUNIT_NONE;
sc->encap_cookie = ip_encap_attach(&ipv4_encap_cfg, sc, M_WAITOK);
ifp->if_mtu = IPV6_MMTU;
ifp->if_ioctl = stf_ioctl;
ifp->if_output = stf_output;
ifp->if_snd.ifq_maxlen = ifqmaxlen;
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
*ifpp = ifp;
return (0);
}
static int
stf_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
struct stf_softc *sc = ifp->if_softc;
int err __unused;
err = ip_encap_detach(sc->encap_cookie);
KASSERT(err == 0, ("Unexpected error detaching encap_cookie"));
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
free(sc, M_STF);
ifc_free_unit(ifc, STFUNIT);
return (0);
}
static void
vnet_stf_init(const void *unused __unused)
{
struct if_clone_addreq req = {
.match_f = stf_clone_match,
.create_f = stf_clone_create,
.destroy_f = stf_clone_destroy,
};
V_stf_cloner = ifc_attach_cloner(stfname, &req);
}
VNET_SYSINIT(vnet_stf_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_stf_init, NULL);
static void
vnet_stf_uninit(const void *unused __unused)
{
if_clone_detach(V_stf_cloner);
V_stf_cloner = NULL;
}
VNET_SYSUNINIT(vnet_stf_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_stf_uninit,
NULL);
static int
stfmodevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
/* Done in vnet_stf_init() */
break;
case MOD_UNLOAD:
/* Done in vnet_stf_uninit() */
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t stf_mod = {
"if_stf",
stfmodevent,
0
};
DECLARE_MODULE(if_stf, stf_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_stf, 2);
static int
stf_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
{
struct ip ip;
struct stf_softc *sc;
struct in6_addr addr6, mask6;
struct sockaddr_in sin4addr, sin4mask;
SDT_PROBE3(if_stf, , encapcheck, in, m, off, proto);
sc = (struct stf_softc *)arg;
if (sc == NULL)
return (0);
if ((STF2IFP(sc)->if_flags & IFF_UP) == 0)
return (0);
/* IFF_LINK0 means "no decapsulation" */
if ((STF2IFP(sc)->if_flags & IFF_LINK0) != 0)
return (0);
if (proto != IPPROTO_IPV6)
return (0);
m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
if (ip.ip_v != 4)
return (0);
if (stf_getsrcifa6(STF2IFP(sc), &addr6, &mask6) != 0)
return (0);
if (sc->srcv4_addr != INADDR_ANY) {
sin4addr.sin_addr.s_addr = sc->srcv4_addr;
sin4addr.sin_family = AF_INET;
} else
if (stf_getin4addr(sc, &sin4addr, addr6, mask6) == NULL)
return (0);
if (sin4addr.sin_addr.s_addr != ip.ip_dst.s_addr)
return (0);
if (IN6_IS_ADDR_6TO4(&addr6)) {
/*
* 6to4 (RFC 3056).
* Check if IPv4 src matches the IPv4 address derived
* from the local 6to4 address masked by prefixmask.
* success on: src = 10.1.1.1, ia6->ia_addr = 2002:0a00:.../24
* fail on: src = 10.1.1.1, ia6->ia_addr = 2002:0b00:.../24
*/
memcpy(&sin4mask.sin_addr, GET_V4(&mask6),
sizeof(sin4mask.sin_addr));
if ((sin4addr.sin_addr.s_addr & sin4mask.sin_addr.s_addr) !=
(ip.ip_src.s_addr & sin4mask.sin_addr.s_addr))
return (0);
} else {
/* 6rd (RFC 5569) */
/*
* No restriction on the src address in the case of
* 6rd because the stf(4) interface always has a
* prefix which covers whole of IPv4 src address
* range. So, stf_output() will catch all of
* 6rd-capsuled IPv4 traffic with suspicious inner dst
* IPv4 address (i.e. the IPv6 destination address is
* one the admin does not like to route to outside),
* and then it discard them silently.
*/
}
SDT_PROBE0(if_stf, , encapcheck, accept);
/* stf interface makes single side match only */
return (32);
}
static int
stf_getsrcifa6(struct ifnet *ifp, struct in6_addr *addr, struct in6_addr *mask)
{
struct ifaddr *ia;
struct in_ifaddr *ia4;
struct in6_addr addr6, mask6;
struct sockaddr_in sin4;
struct stf_softc *sc;
struct in_addr in;
NET_EPOCH_ASSERT();
sc = ifp->if_softc;
SDT_PROBE3(if_stf, , getsrcifa6, in, ifp, addr, mask);
CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
if (ia->ifa_addr->sa_family != AF_INET6)
continue;
addr6 = *IFA_IN6(ia);
mask6 = *IFA_MASKIN6(ia);
if (sc->srcv4_addr != INADDR_ANY)
bcopy(&sc->srcv4_addr, &in, sizeof(in));
else {
if (stf_getin4addr(sc, &sin4, addr6, mask6) == NULL)
continue;
bcopy(&sin4.sin_addr, &in, sizeof(in));
}
CK_LIST_FOREACH(ia4, INADDR_HASH(in.s_addr), ia_hash)
if (ia4->ia_addr.sin_addr.s_addr == in.s_addr)
break;
if (ia4 == NULL)
continue;
*addr = addr6;
*mask = mask6;
SDT_PROBE2(if_stf, , getsrcifa6, found, addr, mask);
return (0);
}
SDT_PROBE0(if_stf, , getsrcifa6, notfound);
return (ENOENT);
}
static int
stf_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
struct stf_softc *sc;
const struct sockaddr_in6 *dst6;
struct sockaddr_in dst4, src4;
u_int8_t tos;
struct ip *ip;
struct ip6_hdr *ip6;
struct in6_addr addr6, mask6;
int error;
SDT_PROBE4(if_stf, , stf_output, in, ifp, m, dst, ro);
#ifdef MAC
error = mac_ifnet_check_transmit(ifp, m);
if (error) {
m_freem(m);
SDT_PROBE2(if_stf, , stf_output, error, error, __LINE__);
return (error);
}
#endif
sc = ifp->if_softc;
dst6 = (const struct sockaddr_in6 *)dst;
/* just in case */
if ((ifp->if_flags & IFF_UP) == 0) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
SDT_PROBE2(if_stf, , stf_output, error, ENETDOWN, __LINE__);
return (ENETDOWN);
}
/*
* If we don't have an ip4 address that match my inner ip6 address,
* we shouldn't generate output. Without this check, we'll end up
* using wrong IPv4 source.
*/
if (stf_getsrcifa6(ifp, &addr6, &mask6) != 0) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
SDT_PROBE2(if_stf, , stf_output, error, ENETDOWN, __LINE__);
return (ENETDOWN);
}
if (m->m_len < sizeof(*ip6)) {
m = m_pullup(m, sizeof(*ip6));
if (!m) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
SDT_PROBE2(if_stf, , stf_output, error, ENOBUFS,
__LINE__);
return (ENOBUFS);
}
}
ip6 = mtod(m, struct ip6_hdr *);
tos = IPV6_TRAFFIC_CLASS(ip6);
/*
* Pickup the right outer dst addr from the list of candidates.
* ip6_dst has priority as it may be able to give us shorter IPv4 hops.
*/
if (stf_getin4addr_in6(sc, &dst4, addr6, mask6,
ip6->ip6_dst) == NULL) {
if (sc->braddr != INADDR_ANY)
dst4.sin_addr.s_addr = sc->braddr;
else if (stf_getin4addr_in6(sc, &dst4, addr6, mask6,
dst6->sin6_addr) == NULL) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
SDT_PROBE2(if_stf, , stf_output, error, ENETUNREACH,
__LINE__);
return (ENETUNREACH);
}
}
if (bpf_peers_present(ifp->if_bpf)) {
/*
* We need to prepend the address family as
* a four byte field. Cons up a dummy header
* to pacify bpf. This is safe because bpf
* will only read from the mbuf (i.e., it won't
* try to free it or keep a pointer a to it).
*/
u_int af = AF_INET6;
bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
}
M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
SDT_PROBE2(if_stf, , stf_output, error, ENOBUFS, __LINE__);
return (ENOBUFS);
}
ip = mtod(m, struct ip *);
bzero(ip, sizeof(*ip));
if (sc->srcv4_addr != INADDR_ANY)
src4.sin_addr.s_addr = sc->srcv4_addr;
else if (stf_getin4addr(sc, &src4, addr6, mask6) == NULL) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
SDT_PROBE2(if_stf, , stf_output, error, ENETUNREACH, __LINE__);
return (ENETUNREACH);
}
bcopy(&src4.sin_addr, &ip->ip_src, sizeof(ip->ip_src));
bcopy(&dst4.sin_addr, &ip->ip_dst, sizeof(ip->ip_dst));
ip->ip_p = IPPROTO_IPV6;
ip->ip_ttl = ip_stf_ttl;
ip->ip_len = htons(m->m_pkthdr.len);
if (ifp->if_flags & IFF_LINK1)
ip_ecn_ingress(ECN_ALLOWED, &ip->ip_tos, &tos);
else
ip_ecn_ingress(ECN_NOCARE, &ip->ip_tos, &tos);
M_SETFIB(m, sc->sc_fibnum);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
error = ip_output(m, NULL, NULL, 0, NULL, NULL);
SDT_PROBE1(if_stf, , stf_output, out, error);
return (error);
}
static int
isrfc1918addr(struct in_addr *in)
{
/*
* returns 1 if private address range:
* 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
*/
if (stf_permit_rfc1918 == 0 && (
(ntohl(in->s_addr) & 0xff000000) >> 24 == 10 ||
(ntohl(in->s_addr) & 0xfff00000) >> 16 == 172 * 256 + 16 ||
(ntohl(in->s_addr) & 0xffff0000) >> 16 == 192 * 256 + 168))
return (1);
return (0);
}
static int
stf_checkaddr4(struct stf_softc *sc, struct in_addr *in, struct ifnet *inifp)
{
struct in_ifaddr *ia4;
/*
* reject packets with the following address:
* 224.0.0.0/4 0.0.0.0/8 127.0.0.0/8 255.0.0.0/8
*/
if (IN_MULTICAST(ntohl(in->s_addr)))
return (-1);
switch ((ntohl(in->s_addr) & 0xff000000) >> 24) {
case 0: case 127: case 255:
return (-1);
}
/*
* reject packets with broadcast
*/
CK_STAILQ_FOREACH(ia4, &V_in_ifaddrhead, ia_link) {
if ((ia4->ia_ifa.ifa_ifp->if_flags & IFF_BROADCAST) == 0)
continue;
if (in->s_addr == ia4->ia_broadaddr.sin_addr.s_addr) {
return (-1);
}
}
/*
* perform ingress filter
*/
if (sc && (STF2IFP(sc)->if_flags & IFF_LINK2) == 0 && inifp) {
struct nhop_object *nh;
NET_EPOCH_ASSERT();
nh = fib4_lookup(sc->sc_fibnum, *in, 0, 0, 0);
if (nh == NULL)
return (-1);
if (nh->nh_ifp != inifp)
return (-1);
}
return (0);
}
static int
stf_checkaddr6(struct stf_softc *sc, struct in6_addr *in6, struct ifnet *inifp)
{
SDT_PROBE3(if_stf, , checkaddr6, in, sc, in6, inifp);
/*
* check 6to4 addresses
*/
if (IN6_IS_ADDR_6TO4(in6)) {
struct in_addr in4;
int ret;
bcopy(GET_V4(in6), &in4, sizeof(in4));
ret = stf_checkaddr4(sc, &in4, inifp);
SDT_PROBE2(if_stf, , checkaddr6, out, ret, __LINE__);
return (ret);
}
/*
* reject anything that look suspicious. the test is implemented
* in ip6_input too, but we check here as well to
* (1) reject bad packets earlier, and
* (2) to be safe against future ip6_input change.
*/
if (IN6_IS_ADDR_V4COMPAT(in6)) {
SDT_PROBE2(if_stf, , checkaddr6, out, -1, __LINE__);
return (-1);
}
if (IN6_IS_ADDR_V4MAPPED(in6)) {
SDT_PROBE2(if_stf, , checkaddr6, out, -1, __LINE__);
return (-1);
}
SDT_PROBE2(if_stf, , checkaddr6, out, 0, __LINE__);
return (0);
}
static int
in_stf_input(struct mbuf *m, int off, int proto, void *arg)
{
struct stf_softc *sc = arg;
struct ip ip;
struct ip6_hdr *ip6;
u_int8_t otos, itos;
struct ifnet *ifp;
struct nhop_object *nh;
NET_EPOCH_ASSERT();
SDT_PROBE3(if_stf, , stf_input, in, m, off, proto);
if (proto != IPPROTO_IPV6) {
m_freem(m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
m_copydata(m, 0, sizeof(struct ip), (caddr_t)&ip);
if (sc == NULL || (STF2IFP(sc)->if_flags & IFF_UP) == 0) {
m_freem(m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
ifp = STF2IFP(sc);
#ifdef MAC
mac_ifnet_create_mbuf(ifp, m);
#endif
/*
* perform sanity check against outer src/dst.
* for source, perform ingress filter as well.
*/
if (stf_checkaddr4(sc, &ip.ip_dst, NULL) < 0 ||
stf_checkaddr4(sc, &ip.ip_src, m->m_pkthdr.rcvif) < 0) {
m_freem(m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
otos = ip.ip_tos;
m_adj(m, off);
if (m->m_len < sizeof(*ip6)) {
m = m_pullup(m, sizeof(*ip6));
if (!m) {
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE,
__LINE__);
return (IPPROTO_DONE);
}
}
ip6 = mtod(m, struct ip6_hdr *);
/*
* perform sanity check against inner src/dst.
* for source, perform ingress filter as well.
*/
if (stf_checkaddr6(sc, &ip6->ip6_dst, NULL) < 0 ||
stf_checkaddr6(sc, &ip6->ip6_src, m->m_pkthdr.rcvif) < 0) {
m_freem(m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
/*
* reject packets with private address range.
* (requirement from RFC3056 section 2 1st paragraph)
*/
if ((IN6_IS_ADDR_6TO4(&ip6->ip6_src) && isrfc1918addr(&ip.ip_src)) ||
(IN6_IS_ADDR_6TO4(&ip6->ip6_dst) && isrfc1918addr(&ip.ip_dst))) {
m_freem(m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
/*
* Ignore if the destination is the same stf interface because
* all of valid IPv6 outgoing traffic should go interfaces
* except for it.
*/
nh = fib6_lookup(sc->sc_fibnum, &ip6->ip6_dst, 0, 0, 0);
if (nh == NULL) {
m_free(m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
if ((nh->nh_ifp == ifp) &&
(!IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &nh->gw6_sa.sin6_addr))) {
m_free(m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
itos = IPV6_TRAFFIC_CLASS(ip6);
if ((ifp->if_flags & IFF_LINK1) != 0)
ip_ecn_egress(ECN_ALLOWED, &otos, &itos);
else
ip_ecn_egress(ECN_NOCARE, &otos, &itos);
ip6->ip6_flow &= ~htonl(0xff << 20);
ip6->ip6_flow |= htonl((u_int32_t)itos << 20);
m->m_pkthdr.rcvif = ifp;
if (bpf_peers_present(ifp->if_bpf)) {
/*
* We need to prepend the address family as
* a four byte field. Cons up a dummy header
* to pacify bpf. This is safe because bpf
* will only read from the mbuf (i.e., it won't
* try to free it or keep a pointer a to it).
*/
u_int32_t af = AF_INET6;
bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
}
/*
* Put the packet to the network layer input queue according to the
* specified address family.
* See net/if_gif.c for possible issues with packet processing
* reorder due to extra queueing.
*/
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
M_SETFIB(m, ifp->if_fib);
netisr_dispatch(NETISR_IPV6, m);
SDT_PROBE2(if_stf, , stf_input, out, IPPROTO_DONE, __LINE__);
return (IPPROTO_DONE);
}
static struct sockaddr_in *
stf_getin4addr_in6(struct stf_softc *sc, struct sockaddr_in *sin,
struct in6_addr addr6, struct in6_addr mask6, struct in6_addr in6)
{
int i;
struct sockaddr_in *out;
/*
* When (src addr & src mask) != (in6 & src mask),
* the dst is not in the 6rd domain. The IPv4 address must
* not be used.
*/
for (i = 0; i < sizeof(addr6); i++) {
if ((((u_char *)&addr6)[i] & ((u_char *)&mask6)[i]) !=
(((u_char *)&in6)[i] & ((u_char *)&mask6)[i])) {
SDT_PROBE4(if_stf, , getin4addr_in6, out, &addr6,
&mask6, &in6, NULL);
return (NULL);
}
}
/* After the mask check, use in6 instead of addr6. */
out = stf_getin4addr(sc, sin, in6, mask6);
SDT_PROBE4(if_stf, , getin4addr_in6, out, &addr6, &mask6, &in6, out);
return (out);
}
static struct sockaddr_in *
stf_getin4addr(struct stf_softc *sc, struct sockaddr_in *sin,
struct in6_addr addr6, struct in6_addr mask6)
{
struct in_addr *in;
SDT_PROBE2(if_stf, , getin4addr, in, &addr6, &mask6);
memset(sin, 0, sizeof(*sin));
in = &sin->sin_addr;
if (IN6_IS_ADDR_6TO4(&addr6)) {
/* 6to4 (RFC 3056) */
bcopy(GET_V4(&addr6), in, sizeof(*in));
if (isrfc1918addr(in))
return (NULL);
} else {
/* 6rd (RFC 5569) */
in_addr_t v4prefix;
uint8_t *v6 = (uint8_t*)&addr6;
uint64_t v6prefix;
u_int plen;
u_int v4suffixlen;
v4prefix = 0;
if (sc->v4prefixlen < 32) {
v4suffixlen = 32 - sc->v4prefixlen;
v4prefix = ntohl(sc->srcv4_addr) &
(0xffffffffU << v4suffixlen);
} else {
MPASS(sc->v4prefixlen == 32);
v4suffixlen = 32;
}
plen = in6_mask2len(&mask6, NULL);
if (plen > 64)
return (NULL);
/* To make this simple we do not support prefixes longer than
* 64 bits. RFC5969 says "a 6rd delegated prefix SHOULD be /64
* or shorter." so this is a moderately safe assumption. */
v6prefix = be64toh(*(uint64_t *)v6);
/* Shift away the v6 prefix itself. */
v6prefix <<= plen;
v6prefix >>= plen;
/* Now shift away everything after the v4 address. */
v6prefix >>= 64 - plen - v4suffixlen;
sin->sin_addr.s_addr = htonl(v4prefix | (uint32_t)v6prefix);
}
SDT_PROBE1(if_stf, , getin4addr, out, sin);
return (sin);
}
static int
stf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifaddr *ifa;
struct ifdrv *ifd;
struct ifreq *ifr;
struct sockaddr_in sin4;
struct stf_softc *sc_cur;
struct stfv4args args;
int error, mtu;
error = 0;
sc_cur = ifp->if_softc;
switch (cmd) {
case SIOCSDRVSPEC:
ifd = (struct ifdrv *)data;
error = priv_check(curthread, PRIV_NET_ADDIFADDR);
if (error)
break;
if (ifd->ifd_cmd == STF6RD_SV4NET) {
if (ifd->ifd_len != sizeof(args)) {
error = EINVAL;
break;
}
bzero(&args, sizeof(args));
error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
if (error)
break;
if (args.v4_prefixlen < 1 || args.v4_prefixlen > 32) {
error = EINVAL;
break;
}
bcopy(&args.srcv4_addr, &sc_cur->srcv4_addr,
sizeof(sc_cur->srcv4_addr));
sc_cur->v4prefixlen = args.v4_prefixlen;
SDT_PROBE3(if_stf, , ioctl, sv4net, sc_cur->srcv4_addr,
sc_cur->srcv4_addr, sc_cur->v4prefixlen);
} else if (ifd->ifd_cmd == STF6RD_SBR) {
if (ifd->ifd_len != sizeof(args)) {
error = EINVAL;
break;
}
bzero(&args, sizeof(args));
error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
if (error)
break;
sc_cur->braddr = args.braddr.s_addr;
SDT_PROBE1(if_stf, , ioctl, sdstv4,
sc_cur->braddr);
} else
error = EINVAL;
break;
case SIOCGDRVSPEC:
ifd = (struct ifdrv *)data;
if (ifd->ifd_cmd != STF6RD_GV4NET) {
error = EINVAL;
break;
}
if (ifd->ifd_len != sizeof(args)) {
error = EINVAL;
break;
}
bzero(&args, sizeof(args));
args.srcv4_addr.s_addr = sc_cur->srcv4_addr;
args.braddr.s_addr = sc_cur->braddr;
args.v4_prefixlen = sc_cur->v4prefixlen;
error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
break;
case SIOCSIFADDR:
ifa = (struct ifaddr *)data;
SDT_PROBE1(if_stf, , ioctl, ifaddr, ifa);
if (ifa == NULL || ifa->ifa_addr->sa_family != AF_INET6) {
error = EAFNOSUPPORT;
break;
}
if (stf_getin4addr(sc_cur, &sin4,
satosin6(ifa->ifa_addr)->sin6_addr,
satosin6(ifa->ifa_netmask)->sin6_addr) == NULL) {
error = EINVAL;
break;
}
ifp->if_flags |= IFF_UP;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ifr = (struct ifreq *)data;
if (ifr && ifr->ifr_addr.sa_family == AF_INET6)
;
else
error = EAFNOSUPPORT;
break;
case SIOCGIFMTU:
break;
case SIOCSIFMTU:
ifr = (struct ifreq *)data;
mtu = ifr->ifr_mtu;
/* RFC 4213 3.2 ideal world MTU */
if (mtu < IPV6_MINMTU || mtu > IF_MAXMTU - 20)
return (EINVAL);
ifp->if_mtu = mtu;
break;
default:
error = EINVAL;
break;
}
return (error);
}
diff --git a/sys/net/if_tuntap.c b/sys/net/if_tuntap.c
index a70efe79cbb5..00ebc9546af6 100644
--- a/sys/net/if_tuntap.c
+++ b/sys/net/if_tuntap.c
@@ -1,2051 +1,2048 @@
/* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
* All rights reserved.
* Copyright (c) 2019 Kyle Evans <kevans@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* BASED ON:
* -------------------------------------------------------------------------
*
* Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
* Nottingham University 1987.
*
* This source may be freely distributed, however I would be interested
* in any changes that are made.
*
* This driver takes packets off the IP i/f and hands them up to a
* user process to have its wicked way with. This driver has it's
* roots in a similar driver written by Phil Cockcroft (formerly) at
* UCL. This driver is based much more on read/write/poll mode of
* operation though.
*/
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/jail.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/eventhandler.h>
#include <sys/fcntl.h>
#include <sys/filio.h>
#include <sys/sockio.h>
#include <sys/sx.h>
#include <sys/syslog.h>
#include <sys/ttycom.h>
#include <sys/poll.h>
#include <sys/selinfo.h>
#include <sys/signalvar.h>
#include <sys/filedesc.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/conf.h>
#include <sys/uio.h>
#include <sys/malloc.h>
#include <sys/random.h>
#include <sys/ctype.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/netisr.h>
#include <net/route.h>
#include <net/vnet.h>
#include <netinet/in.h>
#ifdef INET
#include <netinet/ip.h>
#endif
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#endif
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <net/bpf.h>
#include <net/if_tap.h>
#include <net/if_tun.h>
#include <dev/virtio/network/virtio_net.h>
#include <sys/queue.h>
#include <sys/condvar.h>
#include <security/mac/mac_framework.h>
struct tuntap_driver;
/*
* tun_list is protected by global tunmtx. Other mutable fields are
* protected by tun->tun_mtx, or by their owning subsystem. tun_dev is
* static for the duration of a tunnel interface.
*/
struct tuntap_softc {
TAILQ_ENTRY(tuntap_softc) tun_list;
struct cdev *tun_alias;
struct cdev *tun_dev;
u_short tun_flags; /* misc flags */
#define TUN_OPEN 0x0001
#define TUN_INITED 0x0002
#define TUN_UNUSED1 0x0008
#define TUN_UNUSED2 0x0010
#define TUN_LMODE 0x0020
#define TUN_RWAIT 0x0040
#define TUN_ASYNC 0x0080
#define TUN_IFHEAD 0x0100
#define TUN_DYING 0x0200
#define TUN_L2 0x0400
#define TUN_VMNET 0x0800
#define TUN_DRIVER_IDENT_MASK (TUN_L2 | TUN_VMNET)
#define TUN_READY (TUN_OPEN | TUN_INITED)
pid_t tun_pid; /* owning pid */
struct ifnet *tun_ifp; /* the interface */
struct sigio *tun_sigio; /* async I/O info */
struct tuntap_driver *tun_drv; /* appropriate driver */
struct selinfo tun_rsel; /* read select */
struct mtx tun_mtx; /* softc field mutex */
struct cv tun_cv; /* for ref'd dev destroy */
struct ether_addr tun_ether; /* remote address */
int tun_busy; /* busy count */
int tun_vhdrlen; /* virtio-net header length */
struct lro_ctrl tun_lro; /* for TCP LRO */
bool tun_lro_ready; /* TCP LRO initialized */
};
#define TUN2IFP(sc) ((sc)->tun_ifp)
#define TUNDEBUG if (tundebug) if_printf
#define TUN_LOCK(tp) mtx_lock(&(tp)->tun_mtx)
#define TUN_UNLOCK(tp) mtx_unlock(&(tp)->tun_mtx)
#define TUN_LOCK_ASSERT(tp) mtx_assert(&(tp)->tun_mtx, MA_OWNED);
#define TUN_VMIO_FLAG_MASK 0x0fff
/*
* Interface capabilities of a tap device that supports the virtio-net
* header.
*/
#define TAP_VNET_HDR_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 \
| IFCAP_VLAN_HWCSUM \
| IFCAP_TSO | IFCAP_LRO \
| IFCAP_VLAN_HWTSO)
#define TAP_ALL_OFFLOAD (CSUM_TSO | CSUM_TCP | CSUM_UDP |\
CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
/*
* All mutable global variables in if_tun are locked using tunmtx, with
* the exception of tundebug, which is used unlocked, and the drivers' *clones,
* which are static after setup.
*/
static struct mtx tunmtx;
static eventhandler_tag arrival_tag;
static eventhandler_tag clone_tag;
static const char tunname[] = "tun";
static const char tapname[] = "tap";
static const char vmnetname[] = "vmnet";
static MALLOC_DEFINE(M_TUN, tunname, "Tunnel Interface");
static int tundebug = 0;
static int tundclone = 1;
static int tap_allow_uopen = 0; /* allow user devfs cloning */
static int tapuponopen = 0; /* IFF_UP on open() */
static int tapdclone = 1; /* enable devfs cloning */
static TAILQ_HEAD(,tuntap_softc) tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
static struct sx tun_ioctl_sx;
SX_SYSINIT(tun_ioctl_sx, &tun_ioctl_sx, "tun_ioctl");
SYSCTL_DECL(_net_link);
/* tun */
static SYSCTL_NODE(_net_link, OID_AUTO, tun, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"IP tunnel software network interface");
SYSCTL_INT(_net_link_tun, OID_AUTO, devfs_cloning, CTLFLAG_RWTUN, &tundclone, 0,
"Enable legacy devfs interface creation");
/* tap */
static SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Ethernet tunnel software network interface");
SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tap_allow_uopen, 0,
"Enable legacy devfs interface creation for all users");
SYSCTL_INT(_net_link_tap, OID_AUTO, up_on_open, CTLFLAG_RW, &tapuponopen, 0,
"Bring interface up when /dev/tap is opened");
SYSCTL_INT(_net_link_tap, OID_AUTO, devfs_cloning, CTLFLAG_RWTUN, &tapdclone, 0,
"Enable legacy devfs interface creation");
SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tundebug, 0, "");
static int tun_create_device(struct tuntap_driver *drv, int unit,
struct ucred *cr, struct cdev **dev, const char *name);
static int tun_busy_locked(struct tuntap_softc *tp);
static void tun_unbusy_locked(struct tuntap_softc *tp);
static int tun_busy(struct tuntap_softc *tp);
static void tun_unbusy(struct tuntap_softc *tp);
static int tuntap_name2info(const char *name, int *unit, int *flags);
static void tunclone(void *arg, struct ucred *cred, char *name,
int namelen, struct cdev **dev);
static void tuncreate(struct cdev *dev);
static void tundtor(void *data);
static void tunrename(void *arg, struct ifnet *ifp);
static int tunifioctl(struct ifnet *, u_long, caddr_t);
static void tuninit(struct ifnet *);
static void tunifinit(void *xtp);
static int tuntapmodevent(module_t, int, void *);
static int tunoutput(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *ro);
static void tunstart(struct ifnet *);
static void tunstart_l2(struct ifnet *);
static int tun_clone_match(struct if_clone *ifc, const char *name);
static int tap_clone_match(struct if_clone *ifc, const char *name);
static int vmnet_clone_match(struct if_clone *ifc, const char *name);
static int tun_clone_create(struct if_clone *, char *, size_t,
struct ifc_data *, struct ifnet **);
static int tun_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
static void tun_vnethdr_set(struct ifnet *ifp, int vhdrlen);
static d_open_t tunopen;
static d_read_t tunread;
static d_write_t tunwrite;
static d_ioctl_t tunioctl;
static d_poll_t tunpoll;
static d_kqfilter_t tunkqfilter;
static int tunkqread(struct knote *, long);
static int tunkqwrite(struct knote *, long);
static void tunkqdetach(struct knote *);
static struct filterops tun_read_filterops = {
.f_isfd = 1,
.f_attach = NULL,
.f_detach = tunkqdetach,
.f_event = tunkqread,
};
static struct filterops tun_write_filterops = {
.f_isfd = 1,
.f_attach = NULL,
.f_detach = tunkqdetach,
.f_event = tunkqwrite,
};
static struct tuntap_driver {
struct cdevsw cdevsw;
int ident_flags;
struct unrhdr *unrhdr;
struct clonedevs *clones;
ifc_match_f *clone_match_fn;
ifc_create_f *clone_create_fn;
ifc_destroy_f *clone_destroy_fn;
} tuntap_drivers[] = {
{
.ident_flags = 0,
.cdevsw = {
.d_version = D_VERSION,
.d_flags = D_NEEDMINOR,
.d_open = tunopen,
.d_read = tunread,
.d_write = tunwrite,
.d_ioctl = tunioctl,
.d_poll = tunpoll,
.d_kqfilter = tunkqfilter,
.d_name = tunname,
},
.clone_match_fn = tun_clone_match,
.clone_create_fn = tun_clone_create,
.clone_destroy_fn = tun_clone_destroy,
},
{
.ident_flags = TUN_L2,
.cdevsw = {
.d_version = D_VERSION,
.d_flags = D_NEEDMINOR,
.d_open = tunopen,
.d_read = tunread,
.d_write = tunwrite,
.d_ioctl = tunioctl,
.d_poll = tunpoll,
.d_kqfilter = tunkqfilter,
.d_name = tapname,
},
.clone_match_fn = tap_clone_match,
.clone_create_fn = tun_clone_create,
.clone_destroy_fn = tun_clone_destroy,
},
{
.ident_flags = TUN_L2 | TUN_VMNET,
.cdevsw = {
.d_version = D_VERSION,
.d_flags = D_NEEDMINOR,
.d_open = tunopen,
.d_read = tunread,
.d_write = tunwrite,
.d_ioctl = tunioctl,
.d_poll = tunpoll,
.d_kqfilter = tunkqfilter,
.d_name = vmnetname,
},
.clone_match_fn = vmnet_clone_match,
.clone_create_fn = tun_clone_create,
.clone_destroy_fn = tun_clone_destroy,
},
};
#define NDRV nitems(tuntap_drivers)
VNET_DEFINE_STATIC(struct if_clone *, tuntap_driver_cloners[NDRV]);
#define V_tuntap_driver_cloners VNET(tuntap_driver_cloners)
/*
* Mechanism for marking a tunnel device as busy so that we can safely do some
* orthogonal operations (such as operations on devices) without racing against
* tun_destroy. tun_destroy will wait on the condvar if we're at all busy or
* open, to be woken up when the condition is alleviated.
*/
static int
tun_busy_locked(struct tuntap_softc *tp)
{
TUN_LOCK_ASSERT(tp);
if ((tp->tun_flags & TUN_DYING) != 0) {
/*
* Perhaps unintuitive, but the device is busy going away.
* Other interpretations of EBUSY from tun_busy make little
* sense, since making a busy device even more busy doesn't
* sound like a problem.
*/
return (EBUSY);
}
++tp->tun_busy;
return (0);
}
static void
tun_unbusy_locked(struct tuntap_softc *tp)
{
TUN_LOCK_ASSERT(tp);
KASSERT(tp->tun_busy != 0, ("tun_unbusy: called for non-busy tunnel"));
--tp->tun_busy;
/* Wake up anything that may be waiting on our busy tunnel. */
if (tp->tun_busy == 0)
cv_broadcast(&tp->tun_cv);
}
static int
tun_busy(struct tuntap_softc *tp)
{
int ret;
TUN_LOCK(tp);
ret = tun_busy_locked(tp);
TUN_UNLOCK(tp);
return (ret);
}
static void
tun_unbusy(struct tuntap_softc *tp)
{
TUN_LOCK(tp);
tun_unbusy_locked(tp);
TUN_UNLOCK(tp);
}
/*
* Sets unit and/or flags given the device name. Must be called with correct
* vnet context.
*/
static int
tuntap_name2info(const char *name, int *outunit, int *outflags)
{
struct tuntap_driver *drv;
char *dname;
int flags, unit;
bool found;
if (name == NULL)
return (EINVAL);
/*
* Needed for dev_stdclone, but dev_stdclone will not modify, it just
* wants to be able to pass back a char * through the second param. We
* will always set that as NULL here, so we'll fake it.
*/
dname = __DECONST(char *, name);
found = false;
for (u_int i = 0; i < NDRV; i++) {
drv = &tuntap_drivers[i];
if (strcmp(name, drv->cdevsw.d_name) == 0) {
found = true;
unit = -1;
flags = drv->ident_flags;
break;
}
if (dev_stdclone(dname, NULL, drv->cdevsw.d_name, &unit) == 1) {
found = true;
flags = drv->ident_flags;
break;
}
}
if (!found)
return (ENXIO);
if (outunit != NULL)
*outunit = unit;
if (outflags != NULL)
*outflags = flags;
return (0);
}
/*
* Get driver information from a set of flags specified. Masks the identifying
* part of the flags and compares it against all of the available
* tuntap_drivers.
*/
static struct tuntap_driver *
tuntap_driver_from_flags(int tun_flags)
{
for (u_int i = 0; i < NDRV; i++)
if ((tun_flags & TUN_DRIVER_IDENT_MASK) ==
tuntap_drivers[i].ident_flags)
return (&tuntap_drivers[i]);
return (NULL);
}
static int
tun_clone_match(struct if_clone *ifc, const char *name)
{
int tunflags;
if (tuntap_name2info(name, NULL, &tunflags) == 0) {
if ((tunflags & TUN_L2) == 0)
return (1);
}
return (0);
}
static int
tap_clone_match(struct if_clone *ifc, const char *name)
{
int tunflags;
if (tuntap_name2info(name, NULL, &tunflags) == 0) {
if ((tunflags & (TUN_L2 | TUN_VMNET)) == TUN_L2)
return (1);
}
return (0);
}
static int
vmnet_clone_match(struct if_clone *ifc, const char *name)
{
int tunflags;
if (tuntap_name2info(name, NULL, &tunflags) == 0) {
if ((tunflags & TUN_VMNET) != 0)
return (1);
}
return (0);
}
static int
tun_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
struct tuntap_driver *drv;
struct cdev *dev;
int err, i, tunflags, unit;
tunflags = 0;
/* The name here tells us exactly what we're creating */
err = tuntap_name2info(name, &unit, &tunflags);
if (err != 0)
return (err);
drv = tuntap_driver_from_flags(tunflags);
if (drv == NULL)
return (ENXIO);
if (unit != -1) {
/* If this unit number is still available that's okay. */
if (alloc_unr_specific(drv->unrhdr, unit) == -1)
return (EEXIST);
} else {
unit = alloc_unr(drv->unrhdr);
}
snprintf(name, IFNAMSIZ, "%s%d", drv->cdevsw.d_name, unit);
/* find any existing device, or allocate new unit number */
dev = NULL;
i = clone_create(&drv->clones, &drv->cdevsw, &unit, &dev, 0);
/* No preexisting struct cdev *, create one */
if (i != 0)
i = tun_create_device(drv, unit, NULL, &dev, name);
if (i == 0) {
dev_ref(dev);
tuncreate(dev);
struct tuntap_softc *tp = dev->si_drv1;
*ifpp = tp->tun_ifp;
}
return (i);
}
static void
tunclone(void *arg, struct ucred *cred, char *name, int namelen,
struct cdev **dev)
{
char devname[SPECNAMELEN + 1];
struct tuntap_driver *drv;
int append_unit, i, u, tunflags;
bool mayclone;
if (*dev != NULL)
return;
tunflags = 0;
CURVNET_SET(CRED_TO_VNET(cred));
if (tuntap_name2info(name, &u, &tunflags) != 0)
goto out; /* Not recognized */
if (u != -1 && u > IF_MAXUNIT)
goto out; /* Unit number too high */
mayclone = priv_check_cred(cred, PRIV_NET_IFCREATE) == 0;
if ((tunflags & TUN_L2) != 0) {
/* tap/vmnet allow user open with a sysctl */
mayclone = (mayclone || tap_allow_uopen) && tapdclone;
} else {
mayclone = mayclone && tundclone;
}
/*
* If tun cloning is enabled, only the superuser can create an
* interface.
*/
if (!mayclone)
goto out;
if (u == -1)
append_unit = 1;
else
append_unit = 0;
drv = tuntap_driver_from_flags(tunflags);
if (drv == NULL)
goto out;
/* find any existing device, or allocate new unit number */
i = clone_create(&drv->clones, &drv->cdevsw, &u, dev, 0);
if (i) {
if (append_unit) {
namelen = snprintf(devname, sizeof(devname), "%s%d",
name, u);
name = devname;
}
i = tun_create_device(drv, u, cred, dev, name);
}
if (i == 0) {
dev_ref(*dev);
if_clone_create(name, namelen, NULL);
}
out:
CURVNET_RESTORE();
}
static void
tun_destroy(struct tuntap_softc *tp)
{
TUN_LOCK(tp);
tp->tun_flags |= TUN_DYING;
if (tp->tun_busy != 0)
cv_wait_unlock(&tp->tun_cv, &tp->tun_mtx);
else
TUN_UNLOCK(tp);
CURVNET_SET(TUN2IFP(tp)->if_vnet);
/* destroy_dev will take care of any alias. */
destroy_dev(tp->tun_dev);
seldrain(&tp->tun_rsel);
knlist_clear(&tp->tun_rsel.si_note, 0);
knlist_destroy(&tp->tun_rsel.si_note);
if ((tp->tun_flags & TUN_L2) != 0) {
ether_ifdetach(TUN2IFP(tp));
} else {
bpfdetach(TUN2IFP(tp));
if_detach(TUN2IFP(tp));
}
sx_xlock(&tun_ioctl_sx);
TUN2IFP(tp)->if_softc = NULL;
sx_xunlock(&tun_ioctl_sx);
free_unr(tp->tun_drv->unrhdr, TUN2IFP(tp)->if_dunit);
if_free(TUN2IFP(tp));
mtx_destroy(&tp->tun_mtx);
cv_destroy(&tp->tun_cv);
free(tp, M_TUN);
CURVNET_RESTORE();
}
static int
tun_clone_destroy(struct if_clone *ifc __unused, struct ifnet *ifp, uint32_t flags)
{
struct tuntap_softc *tp = ifp->if_softc;
mtx_lock(&tunmtx);
TAILQ_REMOVE(&tunhead, tp, tun_list);
mtx_unlock(&tunmtx);
tun_destroy(tp);
return (0);
}
static void
vnet_tun_init(const void *unused __unused)
{
for (u_int i = 0; i < NDRV; ++i) {
struct if_clone_addreq req = {
.match_f = tuntap_drivers[i].clone_match_fn,
.create_f = tuntap_drivers[i].clone_create_fn,
.destroy_f = tuntap_drivers[i].clone_destroy_fn,
};
V_tuntap_driver_cloners[i] =
ifc_attach_cloner(tuntap_drivers[i].cdevsw.d_name, &req);
};
}
VNET_SYSINIT(vnet_tun_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
vnet_tun_init, NULL);
static void
vnet_tun_uninit(const void *unused __unused)
{
for (u_int i = 0; i < NDRV; ++i)
if_clone_detach(V_tuntap_driver_cloners[i]);
}
VNET_SYSUNINIT(vnet_tun_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
vnet_tun_uninit, NULL);
static void
tun_uninit(const void *unused __unused)
{
struct tuntap_driver *drv;
struct tuntap_softc *tp;
int i;
EVENTHANDLER_DEREGISTER(ifnet_arrival_event, arrival_tag);
EVENTHANDLER_DEREGISTER(dev_clone, clone_tag);
mtx_lock(&tunmtx);
while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
TAILQ_REMOVE(&tunhead, tp, tun_list);
mtx_unlock(&tunmtx);
tun_destroy(tp);
mtx_lock(&tunmtx);
}
mtx_unlock(&tunmtx);
for (i = 0; i < nitems(tuntap_drivers); ++i) {
drv = &tuntap_drivers[i];
delete_unrhdr(drv->unrhdr);
clone_cleanup(&drv->clones);
}
mtx_destroy(&tunmtx);
}
SYSUNINIT(tun_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY, tun_uninit, NULL);
static struct tuntap_driver *
tuntap_driver_from_ifnet(const struct ifnet *ifp)
{
struct tuntap_driver *drv;
int i;
if (ifp == NULL)
return (NULL);
for (i = 0; i < nitems(tuntap_drivers); ++i) {
drv = &tuntap_drivers[i];
if (strcmp(ifp->if_dname, drv->cdevsw.d_name) == 0)
return (drv);
}
return (NULL);
}
static int
tuntapmodevent(module_t mod, int type, void *data)
{
struct tuntap_driver *drv;
int i;
switch (type) {
case MOD_LOAD:
mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
for (i = 0; i < nitems(tuntap_drivers); ++i) {
drv = &tuntap_drivers[i];
clone_setup(&drv->clones);
drv->unrhdr = new_unrhdr(0, IF_MAXUNIT, &tunmtx);
}
arrival_tag = EVENTHANDLER_REGISTER(ifnet_arrival_event,
tunrename, 0, 1000);
if (arrival_tag == NULL)
return (ENOMEM);
clone_tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
if (clone_tag == NULL)
return (ENOMEM);
break;
case MOD_UNLOAD:
/* See tun_uninit, so it's done after the vnet_sysuninit() */
break;
default:
return EOPNOTSUPP;
}
return 0;
}
static moduledata_t tuntap_mod = {
"if_tuntap",
tuntapmodevent,
0
};
/* We'll only ever have these two, so no need for a macro. */
static moduledata_t tun_mod = { "if_tun", NULL, 0 };
static moduledata_t tap_mod = { "if_tap", NULL, 0 };
DECLARE_MODULE(if_tuntap, tuntap_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_tuntap, 1);
DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_tun, 1);
DECLARE_MODULE(if_tap, tap_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_tap, 1);
static int
tun_create_device(struct tuntap_driver *drv, int unit, struct ucred *cr,
struct cdev **dev, const char *name)
{
struct make_dev_args args;
struct tuntap_softc *tp;
int error;
tp = malloc(sizeof(*tp), M_TUN, M_WAITOK | M_ZERO);
mtx_init(&tp->tun_mtx, "tun_mtx", NULL, MTX_DEF);
cv_init(&tp->tun_cv, "tun_condvar");
tp->tun_flags = drv->ident_flags;
tp->tun_drv = drv;
make_dev_args_init(&args);
if (cr != NULL)
args.mda_flags = MAKEDEV_REF | MAKEDEV_CHECKNAME;
args.mda_devsw = &drv->cdevsw;
args.mda_cr = cr;
args.mda_uid = UID_UUCP;
args.mda_gid = GID_DIALER;
args.mda_mode = 0600;
args.mda_unit = unit;
args.mda_si_drv1 = tp;
error = make_dev_s(&args, dev, "%s", name);
if (error != 0) {
free(tp, M_TUN);
return (error);
}
KASSERT((*dev)->si_drv1 != NULL,
("Failed to set si_drv1 at %s creation", name));
tp->tun_dev = *dev;
knlist_init_mtx(&tp->tun_rsel.si_note, &tp->tun_mtx);
mtx_lock(&tunmtx);
TAILQ_INSERT_TAIL(&tunhead, tp, tun_list);
mtx_unlock(&tunmtx);
return (0);
}
static void
tunstart(struct ifnet *ifp)
{
struct tuntap_softc *tp = ifp->if_softc;
struct mbuf *m;
TUNDEBUG(ifp, "starting\n");
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
IFQ_LOCK(&ifp->if_snd);
IFQ_POLL_NOLOCK(&ifp->if_snd, m);
if (m == NULL) {
IFQ_UNLOCK(&ifp->if_snd);
return;
}
IFQ_UNLOCK(&ifp->if_snd);
}
TUN_LOCK(tp);
if (tp->tun_flags & TUN_RWAIT) {
tp->tun_flags &= ~TUN_RWAIT;
wakeup(tp);
}
selwakeuppri(&tp->tun_rsel, PZERO + 1);
KNOTE_LOCKED(&tp->tun_rsel.si_note, 0);
if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) {
TUN_UNLOCK(tp);
pgsigio(&tp->tun_sigio, SIGIO, 0);
} else
TUN_UNLOCK(tp);
}
/*
* tunstart_l2
*
* queue packets from higher level ready to put out
*/
static void
tunstart_l2(struct ifnet *ifp)
{
struct tuntap_softc *tp = ifp->if_softc;
TUNDEBUG(ifp, "starting\n");
/*
* do not junk pending output if we are in VMnet mode.
* XXX: can this do any harm because of queue overflow?
*/
TUN_LOCK(tp);
if (((tp->tun_flags & TUN_VMNET) == 0) &&
((tp->tun_flags & TUN_READY) != TUN_READY)) {
struct mbuf *m;
/* Unlocked read. */
TUNDEBUG(ifp, "not ready, tun_flags = 0x%x\n", tp->tun_flags);
for (;;) {
IF_DEQUEUE(&ifp->if_snd, m);
if (m != NULL) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} else
break;
}
TUN_UNLOCK(tp);
return;
}
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
if (tp->tun_flags & TUN_RWAIT) {
tp->tun_flags &= ~TUN_RWAIT;
wakeup(tp);
}
if ((tp->tun_flags & TUN_ASYNC) && (tp->tun_sigio != NULL)) {
TUN_UNLOCK(tp);
pgsigio(&tp->tun_sigio, SIGIO, 0);
TUN_LOCK(tp);
}
selwakeuppri(&tp->tun_rsel, PZERO+1);
KNOTE_LOCKED(&tp->tun_rsel.si_note, 0);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* obytes are counted in ether_output */
}
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
TUN_UNLOCK(tp);
} /* tunstart_l2 */
static int
tap_transmit(struct ifnet *ifp, struct mbuf *m)
{
int error;
BPF_MTAP(ifp, m);
IFQ_HANDOFF(ifp, m, error);
return (error);
}
/* XXX: should return an error code so it can fail. */
static void
tuncreate(struct cdev *dev)
{
struct tuntap_driver *drv;
struct tuntap_softc *tp;
struct ifnet *ifp;
struct ether_addr eaddr;
int iflags;
u_char type;
tp = dev->si_drv1;
KASSERT(tp != NULL,
("si_drv1 should have been initialized at creation"));
drv = tp->tun_drv;
iflags = IFF_MULTICAST;
if ((tp->tun_flags & TUN_L2) != 0) {
type = IFT_ETHER;
iflags |= IFF_BROADCAST | IFF_SIMPLEX;
} else {
type = IFT_PPP;
iflags |= IFF_POINTOPOINT;
}
ifp = tp->tun_ifp = if_alloc(type);
- if (ifp == NULL)
- panic("%s%d: failed to if_alloc() interface.\n",
- drv->cdevsw.d_name, dev2unit(dev));
ifp->if_softc = tp;
if_initname(ifp, drv->cdevsw.d_name, dev2unit(dev));
ifp->if_ioctl = tunifioctl;
ifp->if_flags = iflags;
IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
ifp->if_capabilities |= IFCAP_LINKSTATE;
if ((tp->tun_flags & TUN_L2) != 0)
ifp->if_capabilities |=
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO;
ifp->if_capenable |= IFCAP_LINKSTATE;
if ((tp->tun_flags & TUN_L2) != 0) {
ifp->if_init = tunifinit;
ifp->if_start = tunstart_l2;
ifp->if_transmit = tap_transmit;
ifp->if_qflush = if_qflush;
ether_gen_addr(ifp, &eaddr);
ether_ifattach(ifp, eaddr.octet);
} else {
ifp->if_mtu = TUNMTU;
ifp->if_start = tunstart;
ifp->if_output = tunoutput;
ifp->if_snd.ifq_drv_maxlen = 0;
IFQ_SET_READY(&ifp->if_snd);
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
}
TUN_LOCK(tp);
tp->tun_flags |= TUN_INITED;
TUN_UNLOCK(tp);
TUNDEBUG(ifp, "interface %s is created, minor = %#x\n",
ifp->if_xname, dev2unit(dev));
}
static void
tunrename(void *arg __unused, struct ifnet *ifp)
{
struct tuntap_softc *tp;
int error;
if ((ifp->if_flags & IFF_RENAMING) == 0)
return;
if (tuntap_driver_from_ifnet(ifp) == NULL)
return;
/*
* We need to grab the ioctl sx long enough to make sure the softc is
* still there. If it is, we can safely try to busy the tun device.
* The busy may fail if the device is currently dying, in which case
* we do nothing. If it doesn't fail, the busy count stops the device
* from dying until we've created the alias (that will then be
* subsequently destroyed).
*/
sx_xlock(&tun_ioctl_sx);
tp = ifp->if_softc;
if (tp == NULL) {
sx_xunlock(&tun_ioctl_sx);
return;
}
error = tun_busy(tp);
sx_xunlock(&tun_ioctl_sx);
if (error != 0)
return;
if (tp->tun_alias != NULL) {
destroy_dev(tp->tun_alias);
tp->tun_alias = NULL;
}
if (strcmp(ifp->if_xname, tp->tun_dev->si_name) == 0)
goto out;
/*
* Failure's ok, aliases are created on a best effort basis. If a
* tun user/consumer decides to rename the interface to conflict with
* another device (non-ifnet) on the system, we will assume they know
* what they are doing. make_dev_alias_p won't touch tun_alias on
* failure, so we use it but ignore the return value.
*/
make_dev_alias_p(MAKEDEV_CHECKNAME, &tp->tun_alias, tp->tun_dev, "%s",
ifp->if_xname);
out:
tun_unbusy(tp);
}
static int
tunopen(struct cdev *dev, int flag, int mode, struct thread *td)
{
struct ifnet *ifp;
struct tuntap_softc *tp;
int error __diagused, tunflags;
tunflags = 0;
CURVNET_SET(TD_TO_VNET(td));
error = tuntap_name2info(dev->si_name, NULL, &tunflags);
if (error != 0) {
CURVNET_RESTORE();
return (error); /* Shouldn't happen */
}
tp = dev->si_drv1;
KASSERT(tp != NULL,
("si_drv1 should have been initialized at creation"));
TUN_LOCK(tp);
if ((tp->tun_flags & TUN_INITED) == 0) {
TUN_UNLOCK(tp);
CURVNET_RESTORE();
return (ENXIO);
}
if ((tp->tun_flags & (TUN_OPEN | TUN_DYING)) != 0) {
TUN_UNLOCK(tp);
CURVNET_RESTORE();
return (EBUSY);
}
error = tun_busy_locked(tp);
KASSERT(error == 0, ("Must be able to busy an unopen tunnel"));
ifp = TUN2IFP(tp);
if ((tp->tun_flags & TUN_L2) != 0) {
bcopy(IF_LLADDR(ifp), tp->tun_ether.octet,
sizeof(tp->tun_ether.octet));
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if (tapuponopen)
ifp->if_flags |= IFF_UP;
}
tp->tun_pid = td->td_proc->p_pid;
tp->tun_flags |= TUN_OPEN;
if_link_state_change(ifp, LINK_STATE_UP);
TUNDEBUG(ifp, "open\n");
TUN_UNLOCK(tp);
/*
* This can fail with either ENOENT or EBUSY. This is in the middle of
* d_open, so ENOENT should not be possible. EBUSY is possible, but
* the only cdevpriv dtor being set will be tundtor and the softc being
* passed is constant for a given cdev. We ignore the possible error
* because of this as either "unlikely" or "not actually a problem."
*/
(void)devfs_set_cdevpriv(tp, tundtor);
CURVNET_RESTORE();
return (0);
}
/*
* tundtor - tear down the device - mark i/f down & delete
* routing info
*/
static void
tundtor(void *data)
{
struct proc *p;
struct tuntap_softc *tp;
struct ifnet *ifp;
bool l2tun;
tp = data;
p = curproc;
ifp = TUN2IFP(tp);
TUN_LOCK(tp);
/*
* Realistically, we can't be obstinate here. This only means that the
* tuntap device was closed out of order, and the last closer wasn't the
* controller. These are still good to know about, though, as software
* should avoid multiple processes with a tuntap device open and
* ill-defined transfer of control (e.g., handoff, TUNSIFPID, close in
* parent).
*/
if (p->p_pid != tp->tun_pid) {
log(LOG_INFO,
"pid %d (%s), %s: tun/tap protocol violation, non-controlling process closed last.\n",
p->p_pid, p->p_comm, tp->tun_dev->si_name);
}
/*
* junk all pending output
*/
CURVNET_SET(ifp->if_vnet);
l2tun = false;
if ((tp->tun_flags & TUN_L2) != 0) {
l2tun = true;
IF_DRAIN(&ifp->if_snd);
} else {
IFQ_PURGE(&ifp->if_snd);
}
/* For vmnet, we won't do most of the address/route bits */
if ((tp->tun_flags & TUN_VMNET) != 0 ||
(l2tun && (ifp->if_flags & IFF_LINK0) != 0))
goto out;
#if defined(INET) || defined(INET6)
if (l2tun && tp->tun_lro_ready) {
TUNDEBUG (ifp, "LRO disabled\n");
tcp_lro_free(&tp->tun_lro);
tp->tun_lro_ready = false;
}
#endif
if (ifp->if_flags & IFF_UP) {
TUN_UNLOCK(tp);
if_down(ifp);
TUN_LOCK(tp);
}
/* Delete all addresses and routes which reference this interface. */
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
TUN_UNLOCK(tp);
if_purgeaddrs(ifp);
TUN_LOCK(tp);
}
out:
if_link_state_change(ifp, LINK_STATE_DOWN);
CURVNET_RESTORE();
funsetown(&tp->tun_sigio);
selwakeuppri(&tp->tun_rsel, PZERO + 1);
KNOTE_LOCKED(&tp->tun_rsel.si_note, 0);
TUNDEBUG (ifp, "closed\n");
tp->tun_flags &= ~TUN_OPEN;
tp->tun_pid = 0;
tun_vnethdr_set(ifp, 0);
tun_unbusy_locked(tp);
TUN_UNLOCK(tp);
}
static void
tuninit(struct ifnet *ifp)
{
struct tuntap_softc *tp = ifp->if_softc;
TUNDEBUG(ifp, "tuninit\n");
TUN_LOCK(tp);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
if ((tp->tun_flags & TUN_L2) == 0) {
ifp->if_flags |= IFF_UP;
getmicrotime(&ifp->if_lastchange);
TUN_UNLOCK(tp);
} else {
#if defined(INET) || defined(INET6)
if (tcp_lro_init(&tp->tun_lro) == 0) {
TUNDEBUG(ifp, "LRO enabled\n");
tp->tun_lro.ifp = ifp;
tp->tun_lro_ready = true;
} else {
TUNDEBUG(ifp, "Could not enable LRO\n");
tp->tun_lro_ready = false;
}
#endif
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
TUN_UNLOCK(tp);
/* attempt to start output */
tunstart_l2(ifp);
}
}
/*
* Used only for l2 tunnel.
*/
static void
tunifinit(void *xtp)
{
struct tuntap_softc *tp;
tp = (struct tuntap_softc *)xtp;
tuninit(tp->tun_ifp);
}
/*
* To be called under TUN_LOCK. Update ifp->if_hwassist according to the
* current value of ifp->if_capenable.
*/
static void
tun_caps_changed(struct ifnet *ifp)
{
uint64_t hwassist = 0;
TUN_LOCK_ASSERT((struct tuntap_softc *)ifp->if_softc);
if (ifp->if_capenable & IFCAP_TXCSUM)
hwassist |= CSUM_TCP | CSUM_UDP;
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
hwassist |= CSUM_TCP_IPV6
| CSUM_UDP_IPV6;
if (ifp->if_capenable & IFCAP_TSO4)
hwassist |= CSUM_IP_TSO;
if (ifp->if_capenable & IFCAP_TSO6)
hwassist |= CSUM_IP6_TSO;
ifp->if_hwassist = hwassist;
}
/*
* To be called under TUN_LOCK. Update tp->tun_vhdrlen and adjust
* if_capabilities and if_capenable as needed.
*/
static void
tun_vnethdr_set(struct ifnet *ifp, int vhdrlen)
{
struct tuntap_softc *tp = ifp->if_softc;
TUN_LOCK_ASSERT(tp);
if (tp->tun_vhdrlen == vhdrlen)
return;
/*
* Update if_capabilities to reflect the
* functionalities offered by the virtio-net
* header.
*/
if (vhdrlen != 0)
ifp->if_capabilities |=
TAP_VNET_HDR_CAPS;
else
ifp->if_capabilities &=
~TAP_VNET_HDR_CAPS;
/*
* Disable any capabilities that we don't
* support anymore.
*/
ifp->if_capenable &= ifp->if_capabilities;
tun_caps_changed(ifp);
tp->tun_vhdrlen = vhdrlen;
TUNDEBUG(ifp, "vnet_hdr_len=%d, if_capabilities=%x\n",
vhdrlen, ifp->if_capabilities);
}
/*
* Process an ioctl request.
*/
static int
tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifreq *ifr = (struct ifreq *)data;
struct tuntap_softc *tp;
struct ifstat *ifs;
struct ifmediareq *ifmr;
int dummy, error = 0;
bool l2tun;
ifmr = NULL;
sx_xlock(&tun_ioctl_sx);
tp = ifp->if_softc;
if (tp == NULL) {
error = ENXIO;
goto bad;
}
l2tun = (tp->tun_flags & TUN_L2) != 0;
switch(cmd) {
case SIOCGIFSTATUS:
ifs = (struct ifstat *)data;
TUN_LOCK(tp);
if (tp->tun_pid)
snprintf(ifs->ascii, sizeof(ifs->ascii),
"\tOpened by PID %d\n", tp->tun_pid);
else
ifs->ascii[0] = '\0';
TUN_UNLOCK(tp);
break;
case SIOCSIFADDR:
if (l2tun)
error = ether_ioctl(ifp, cmd, data);
else
tuninit(ifp);
if (error == 0)
TUNDEBUG(ifp, "address set\n");
break;
case SIOCSIFMTU:
ifp->if_mtu = ifr->ifr_mtu;
TUNDEBUG(ifp, "mtu set\n");
break;
case SIOCSIFFLAGS:
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCGIFMEDIA:
if (!l2tun) {
error = EINVAL;
break;
}
ifmr = (struct ifmediareq *)data;
dummy = ifmr->ifm_count;
ifmr->ifm_count = 1;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
if (tp->tun_flags & TUN_OPEN)
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_current = ifmr->ifm_active;
if (dummy >= 1) {
int media = IFM_ETHER;
error = copyout(&media, ifmr->ifm_ulist, sizeof(int));
}
break;
case SIOCSIFCAP:
TUN_LOCK(tp);
ifp->if_capenable = ifr->ifr_reqcap;
tun_caps_changed(ifp);
TUN_UNLOCK(tp);
VLAN_CAPABILITIES(ifp);
break;
default:
if (l2tun) {
error = ether_ioctl(ifp, cmd, data);
} else {
error = EINVAL;
}
}
bad:
sx_xunlock(&tun_ioctl_sx);
return (error);
}
/*
* tunoutput - queue packets from higher level ready to put out.
*/
static int
tunoutput(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
struct route *ro)
{
struct tuntap_softc *tp = ifp->if_softc;
u_short cached_tun_flags;
int error;
u_int32_t af;
TUNDEBUG (ifp, "tunoutput\n");
#ifdef MAC
error = mac_ifnet_check_transmit(ifp, m0);
if (error) {
m_freem(m0);
return (error);
}
#endif
/* Could be unlocked read? */
TUN_LOCK(tp);
cached_tun_flags = tp->tun_flags;
TUN_UNLOCK(tp);
if ((cached_tun_flags & TUN_READY) != TUN_READY) {
TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
m_freem (m0);
return (EHOSTDOWN);
}
if ((ifp->if_flags & IFF_UP) != IFF_UP) {
m_freem (m0);
return (EHOSTDOWN);
}
/* BPF writes need to be handled specially. */
if (dst->sa_family == AF_UNSPEC || dst->sa_family == pseudo_AF_HDRCMPLT)
bcopy(dst->sa_data, &af, sizeof(af));
else
af = RO_GET_FAMILY(ro, dst);
BPF_MTAP2(ifp, &af, sizeof(af), m0);
/* prepend sockaddr? this may abort if the mbuf allocation fails */
if (cached_tun_flags & TUN_LMODE) {
/* allocate space for sockaddr */
M_PREPEND(m0, dst->sa_len, M_NOWAIT);
/* if allocation failed drop packet */
if (m0 == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
} else {
bcopy(dst, m0->m_data, dst->sa_len);
}
}
if (cached_tun_flags & TUN_IFHEAD) {
/* Prepend the address family */
M_PREPEND(m0, 4, M_NOWAIT);
/* if allocation failed drop packet */
if (m0 == NULL) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
} else
*(u_int32_t *)m0->m_data = htonl(af);
} else {
#ifdef INET
if (af != AF_INET)
#endif
{
m_freem(m0);
return (EAFNOSUPPORT);
}
}
error = (ifp->if_transmit)(ifp, m0);
if (error)
return (ENOBUFS);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
return (0);
}
/*
* the cdevsw interface is now pretty minimal.
*/
static int
tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
struct thread *td)
{
struct ifreq ifr, *ifrp;
struct tuntap_softc *tp = dev->si_drv1;
struct ifnet *ifp = TUN2IFP(tp);
struct tuninfo *tunp;
int error, iflags, ival;
bool l2tun;
l2tun = (tp->tun_flags & TUN_L2) != 0;
if (l2tun) {
/* tap specific ioctls */
switch(cmd) {
/* VMware/VMnet port ioctl's */
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4)
case _IO('V', 0):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */
iflags = *(int *)data;
iflags &= TUN_VMIO_FLAG_MASK;
iflags &= ~IFF_CANTCHANGE;
iflags |= IFF_UP;
TUN_LOCK(tp);
ifp->if_flags = iflags |
(ifp->if_flags & IFF_CANTCHANGE);
TUN_UNLOCK(tp);
return (0);
case SIOCGIFADDR: /* get MAC address of the remote side */
TUN_LOCK(tp);
bcopy(&tp->tun_ether.octet, data,
sizeof(tp->tun_ether.octet));
TUN_UNLOCK(tp);
return (0);
case SIOCSIFADDR: /* set MAC address of the remote side */
TUN_LOCK(tp);
bcopy(data, &tp->tun_ether.octet,
sizeof(tp->tun_ether.octet));
TUN_UNLOCK(tp);
return (0);
case TAPSVNETHDR:
ival = *(int *)data;
if (ival != 0 &&
ival != sizeof(struct virtio_net_hdr) &&
ival != sizeof(struct virtio_net_hdr_mrg_rxbuf)) {
return (EINVAL);
}
TUN_LOCK(tp);
tun_vnethdr_set(ifp, ival);
TUN_UNLOCK(tp);
return (0);
case TAPGVNETHDR:
TUN_LOCK(tp);
*(int *)data = tp->tun_vhdrlen;
TUN_UNLOCK(tp);
return (0);
}
/* Fall through to the common ioctls if unhandled */
} else {
switch (cmd) {
case TUNSLMODE:
TUN_LOCK(tp);
if (*(int *)data) {
tp->tun_flags |= TUN_LMODE;
tp->tun_flags &= ~TUN_IFHEAD;
} else
tp->tun_flags &= ~TUN_LMODE;
TUN_UNLOCK(tp);
return (0);
case TUNSIFHEAD:
TUN_LOCK(tp);
if (*(int *)data) {
tp->tun_flags |= TUN_IFHEAD;
tp->tun_flags &= ~TUN_LMODE;
} else
tp->tun_flags &= ~TUN_IFHEAD;
TUN_UNLOCK(tp);
return (0);
case TUNGIFHEAD:
TUN_LOCK(tp);
*(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
TUN_UNLOCK(tp);
return (0);
case TUNSIFMODE:
/* deny this if UP */
if (TUN2IFP(tp)->if_flags & IFF_UP)
return (EBUSY);
switch (*(int *)data & ~IFF_MULTICAST) {
case IFF_POINTOPOINT:
case IFF_BROADCAST:
TUN_LOCK(tp);
TUN2IFP(tp)->if_flags &=
~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
TUN2IFP(tp)->if_flags |= *(int *)data;
TUN_UNLOCK(tp);
break;
default:
return (EINVAL);
}
return (0);
case TUNSIFPID:
TUN_LOCK(tp);
tp->tun_pid = curthread->td_proc->p_pid;
TUN_UNLOCK(tp);
return (0);
}
/* Fall through to the common ioctls if unhandled */
}
switch (cmd) {
case TUNGIFNAME:
ifrp = (struct ifreq *)data;
strlcpy(ifrp->ifr_name, TUN2IFP(tp)->if_xname, IFNAMSIZ);
return (0);
case TUNSIFINFO:
tunp = (struct tuninfo *)data;
if (TUN2IFP(tp)->if_type != tunp->type)
return (EPROTOTYPE);
TUN_LOCK(tp);
if (TUN2IFP(tp)->if_mtu != tunp->mtu) {
strlcpy(ifr.ifr_name, if_name(TUN2IFP(tp)), IFNAMSIZ);
ifr.ifr_mtu = tunp->mtu;
CURVNET_SET(TUN2IFP(tp)->if_vnet);
error = ifhwioctl(SIOCSIFMTU, TUN2IFP(tp),
(caddr_t)&ifr, td);
CURVNET_RESTORE();
if (error) {
TUN_UNLOCK(tp);
return (error);
}
}
TUN2IFP(tp)->if_baudrate = tunp->baudrate;
TUN_UNLOCK(tp);
break;
case TUNGIFINFO:
tunp = (struct tuninfo *)data;
TUN_LOCK(tp);
tunp->mtu = TUN2IFP(tp)->if_mtu;
tunp->type = TUN2IFP(tp)->if_type;
tunp->baudrate = TUN2IFP(tp)->if_baudrate;
TUN_UNLOCK(tp);
break;
case TUNSDEBUG:
tundebug = *(int *)data;
break;
case TUNGDEBUG:
*(int *)data = tundebug;
break;
case FIONBIO:
break;
case FIOASYNC:
TUN_LOCK(tp);
if (*(int *)data)
tp->tun_flags |= TUN_ASYNC;
else
tp->tun_flags &= ~TUN_ASYNC;
TUN_UNLOCK(tp);
break;
case FIONREAD:
if (!IFQ_IS_EMPTY(&TUN2IFP(tp)->if_snd)) {
struct mbuf *mb;
IFQ_LOCK(&TUN2IFP(tp)->if_snd);
IFQ_POLL_NOLOCK(&TUN2IFP(tp)->if_snd, mb);
for (*(int *)data = 0; mb != NULL; mb = mb->m_next)
*(int *)data += mb->m_len;
IFQ_UNLOCK(&TUN2IFP(tp)->if_snd);
} else
*(int *)data = 0;
break;
case FIOSETOWN:
return (fsetown(*(int *)data, &tp->tun_sigio));
case FIOGETOWN:
*(int *)data = fgetown(&tp->tun_sigio);
return (0);
/* This is deprecated, FIOSETOWN should be used instead. */
case TIOCSPGRP:
return (fsetown(-(*(int *)data), &tp->tun_sigio));
/* This is deprecated, FIOGETOWN should be used instead. */
case TIOCGPGRP:
*(int *)data = -fgetown(&tp->tun_sigio);
return (0);
default:
return (ENOTTY);
}
return (0);
}
/*
* The cdevsw read interface - reads a packet at a time, or at
* least as much of a packet as can be read.
*/
static int
tunread(struct cdev *dev, struct uio *uio, int flag)
{
struct tuntap_softc *tp = dev->si_drv1;
struct ifnet *ifp = TUN2IFP(tp);
struct mbuf *m;
size_t len;
int error = 0;
TUNDEBUG (ifp, "read\n");
TUN_LOCK(tp);
if ((tp->tun_flags & TUN_READY) != TUN_READY) {
TUN_UNLOCK(tp);
TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
return (EHOSTDOWN);
}
tp->tun_flags &= ~TUN_RWAIT;
for (;;) {
IFQ_DEQUEUE(&ifp->if_snd, m);
if (m != NULL)
break;
if (flag & O_NONBLOCK) {
TUN_UNLOCK(tp);
return (EWOULDBLOCK);
}
tp->tun_flags |= TUN_RWAIT;
error = mtx_sleep(tp, &tp->tun_mtx, PCATCH | (PZERO + 1),
"tunread", 0);
if (error != 0) {
TUN_UNLOCK(tp);
return (error);
}
}
TUN_UNLOCK(tp);
len = min(tp->tun_vhdrlen, uio->uio_resid);
if (len > 0) {
struct virtio_net_hdr_mrg_rxbuf vhdr;
bzero(&vhdr, sizeof(vhdr));
if (m->m_pkthdr.csum_flags & TAP_ALL_OFFLOAD) {
m = virtio_net_tx_offload(ifp, m, false, &vhdr.hdr);
}
TUNDEBUG(ifp, "txvhdr: f %u, gt %u, hl %u, "
"gs %u, cs %u, co %u\n", vhdr.hdr.flags,
vhdr.hdr.gso_type, vhdr.hdr.hdr_len,
vhdr.hdr.gso_size, vhdr.hdr.csum_start,
vhdr.hdr.csum_offset);
error = uiomove(&vhdr, len, uio);
}
while (m && uio->uio_resid > 0 && error == 0) {
len = min(uio->uio_resid, m->m_len);
if (len != 0)
error = uiomove(mtod(m, void *), len, uio);
m = m_free(m);
}
if (m) {
TUNDEBUG(ifp, "Dropping mbuf\n");
m_freem(m);
}
return (error);
}
static int
tunwrite_l2(struct tuntap_softc *tp, struct mbuf *m,
struct virtio_net_hdr_mrg_rxbuf *vhdr)
{
struct epoch_tracker et;
struct ether_header *eh;
struct ifnet *ifp;
ifp = TUN2IFP(tp);
/*
* Only pass a unicast frame to ether_input(), if it would
* actually have been received by non-virtual hardware.
*/
if (m->m_len < sizeof(struct ether_header)) {
m_freem(m);
return (0);
}
eh = mtod(m, struct ether_header *);
if ((ifp->if_flags & IFF_PROMISC) == 0 &&
!ETHER_IS_MULTICAST(eh->ether_dhost) &&
bcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) != 0) {
m_freem(m);
return (0);
}
if (vhdr != NULL) {
if (virtio_net_rx_csum(m, &vhdr->hdr)) {
m_freem(m);
return (0);
}
} else {
switch (ntohs(eh->ether_type)) {
#ifdef INET
case ETHERTYPE_IP:
if (ifp->if_capenable & IFCAP_RXCSUM) {
m->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED | CSUM_IP_VALID |
CSUM_DATA_VALID | CSUM_SCTP_VALID |
CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
if (ifp->if_capenable & IFCAP_RXCSUM_IPV6) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID_IPV6 | CSUM_SCTP_VALID |
CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
break;
#endif
}
}
/* Pass packet up to parent. */
CURVNET_SET(ifp->if_vnet);
NET_EPOCH_ENTER(et);
#if defined(INET) || defined(INET6)
if (tp->tun_lro_ready && ifp->if_capenable & IFCAP_LRO &&
tcp_lro_rx(&tp->tun_lro, m, 0) == 0)
tcp_lro_flush_all(&tp->tun_lro);
else
#endif
(*ifp->if_input)(ifp, m);
NET_EPOCH_EXIT(et);
CURVNET_RESTORE();
/* ibytes are counted in parent */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
return (0);
}
static int
tunwrite_l3(struct tuntap_softc *tp, struct mbuf *m)
{
struct epoch_tracker et;
struct ifnet *ifp;
int family, isr;
ifp = TUN2IFP(tp);
/* Could be unlocked read? */
TUN_LOCK(tp);
if (tp->tun_flags & TUN_IFHEAD) {
TUN_UNLOCK(tp);
if (m->m_len < sizeof(family) &&
(m = m_pullup(m, sizeof(family))) == NULL)
return (ENOBUFS);
family = ntohl(*mtod(m, u_int32_t *));
m_adj(m, sizeof(family));
} else {
TUN_UNLOCK(tp);
family = AF_INET;
}
BPF_MTAP2(ifp, &family, sizeof(family), m);
switch (family) {
#ifdef INET
case AF_INET:
isr = NETISR_IP;
break;
#endif
#ifdef INET6
case AF_INET6:
isr = NETISR_IPV6;
break;
#endif
default:
m_freem(m);
return (EAFNOSUPPORT);
}
random_harvest_queue(m, sizeof(*m), RANDOM_NET_TUN);
if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
CURVNET_SET(ifp->if_vnet);
M_SETFIB(m, ifp->if_fib);
NET_EPOCH_ENTER(et);
netisr_dispatch(isr, m);
NET_EPOCH_EXIT(et);
CURVNET_RESTORE();
return (0);
}
/*
* the cdevsw write interface - an atomic write is a packet - or else!
*/
static int
tunwrite(struct cdev *dev, struct uio *uio, int flag)
{
struct virtio_net_hdr_mrg_rxbuf vhdr;
struct tuntap_softc *tp;
struct ifnet *ifp;
struct mbuf *m;
uint32_t mru;
int align, vhdrlen, error;
bool l2tun;
tp = dev->si_drv1;
ifp = TUN2IFP(tp);
TUNDEBUG(ifp, "tunwrite\n");
if ((ifp->if_flags & IFF_UP) != IFF_UP)
/* ignore silently */
return (0);
if (uio->uio_resid == 0)
return (0);
l2tun = (tp->tun_flags & TUN_L2) != 0;
mru = l2tun ? TAPMRU : TUNMRU;
vhdrlen = tp->tun_vhdrlen;
align = 0;
if (l2tun) {
align = ETHER_ALIGN;
mru += vhdrlen;
} else if ((tp->tun_flags & TUN_IFHEAD) != 0)
mru += sizeof(uint32_t); /* family */
if (uio->uio_resid < 0 || uio->uio_resid > mru) {
TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid);
return (EIO);
}
if (vhdrlen > 0) {
error = uiomove(&vhdr, vhdrlen, uio);
if (error != 0)
return (error);
TUNDEBUG(ifp, "txvhdr: f %u, gt %u, hl %u, "
"gs %u, cs %u, co %u\n", vhdr.hdr.flags,
vhdr.hdr.gso_type, vhdr.hdr.hdr_len,
vhdr.hdr.gso_size, vhdr.hdr.csum_start,
vhdr.hdr.csum_offset);
}
if ((m = m_uiotombuf(uio, M_NOWAIT, 0, align, M_PKTHDR)) == NULL) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
return (ENOBUFS);
}
m->m_pkthdr.rcvif = ifp;
#ifdef MAC
mac_ifnet_create_mbuf(ifp, m);
#endif
if (l2tun)
return (tunwrite_l2(tp, m, vhdrlen > 0 ? &vhdr : NULL));
return (tunwrite_l3(tp, m));
}
/*
* tunpoll - the poll interface, this is only useful on reads
* really. The write detect always returns true, write never blocks
* anyway, it either accepts the packet or drops it.
*/
static int
tunpoll(struct cdev *dev, int events, struct thread *td)
{
struct tuntap_softc *tp = dev->si_drv1;
struct ifnet *ifp = TUN2IFP(tp);
int revents = 0;
TUNDEBUG(ifp, "tunpoll\n");
if (events & (POLLIN | POLLRDNORM)) {
IFQ_LOCK(&ifp->if_snd);
if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
revents |= events & (POLLIN | POLLRDNORM);
} else {
TUNDEBUG(ifp, "tunpoll waiting\n");
selrecord(td, &tp->tun_rsel);
}
IFQ_UNLOCK(&ifp->if_snd);
}
revents |= events & (POLLOUT | POLLWRNORM);
return (revents);
}
/*
* tunkqfilter - support for the kevent() system call.
*/
static int
tunkqfilter(struct cdev *dev, struct knote *kn)
{
struct tuntap_softc *tp = dev->si_drv1;
struct ifnet *ifp = TUN2IFP(tp);
switch(kn->kn_filter) {
case EVFILT_READ:
TUNDEBUG(ifp, "%s kqfilter: EVFILT_READ, minor = %#x\n",
ifp->if_xname, dev2unit(dev));
kn->kn_fop = &tun_read_filterops;
break;
case EVFILT_WRITE:
TUNDEBUG(ifp, "%s kqfilter: EVFILT_WRITE, minor = %#x\n",
ifp->if_xname, dev2unit(dev));
kn->kn_fop = &tun_write_filterops;
break;
default:
TUNDEBUG(ifp, "%s kqfilter: invalid filter, minor = %#x\n",
ifp->if_xname, dev2unit(dev));
return(EINVAL);
}
kn->kn_hook = tp;
knlist_add(&tp->tun_rsel.si_note, kn, 0);
return (0);
}
/*
* Return true of there is data in the interface queue.
*/
static int
tunkqread(struct knote *kn, long hint)
{
int ret;
struct tuntap_softc *tp = kn->kn_hook;
struct cdev *dev = tp->tun_dev;
struct ifnet *ifp = TUN2IFP(tp);
if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) {
TUNDEBUG(ifp,
"%s have data in the queue. Len = %d, minor = %#x\n",
ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev));
ret = 1;
} else {
TUNDEBUG(ifp,
"%s waiting for data, minor = %#x\n", ifp->if_xname,
dev2unit(dev));
ret = 0;
}
return (ret);
}
/*
* Always can write, always return MTU in kn->data.
*/
static int
tunkqwrite(struct knote *kn, long hint)
{
struct tuntap_softc *tp = kn->kn_hook;
struct ifnet *ifp = TUN2IFP(tp);
kn->kn_data = ifp->if_mtu;
return (1);
}
static void
tunkqdetach(struct knote *kn)
{
struct tuntap_softc *tp = kn->kn_hook;
knlist_remove(&tp->tun_rsel.si_note, kn, 0);
}
diff --git a/sys/net/if_vlan.c b/sys/net/if_vlan.c
index 788fdf787249..e2b32ac2e7de 100644
--- a/sys/net/if_vlan.c
+++ b/sys/net/if_vlan.c
@@ -1,2522 +1,2514 @@
/*-
* Copyright 1998 Massachusetts Institute of Technology
* Copyright 2012 ADARA Networks, Inc.
* Copyright 2017 Dell EMC Isilon
*
* Portions of this software were developed by Robert N. M. Watson under
* contract to ADARA Networks, Inc.
*
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that both the above copyright notice and this
* permission notice appear in all copies, that both the above
* copyright notice and this permission notice appear in all
* supporting documentation, and that the name of M.I.T. not be used
* in advertising or publicity pertaining to distribution of the
* software without specific, written prior permission. M.I.T. makes
* no representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied
* warranty.
*
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
* SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs.
* This is sort of sneaky in the implementation, since
* we need to pretend to be enough of an Ethernet implementation
* to make arp work. The way we do this is by telling everyone
* that we are an Ethernet, and then catch the packets that
* ether_output() sends to us via if_transmit(), rewrite them for
* use by the real outgoing interface, and ask it to send them.
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_kern_tls.h"
#include "opt_vlan.h"
#include "opt_ratelimit.h"
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rmlock.h>
#include <sys/priv.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_clone.h>
#include <net/if_dl.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/route.h>
#include <net/vnet.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <netlink/netlink.h>
#include <netlink/netlink_ctl.h>
#include <netlink/netlink_route.h>
#include <netlink/route/route_var.h>
#define VLAN_DEF_HWIDTH 4
#define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST)
#define UP_AND_RUNNING(ifp) \
((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING)
CK_SLIST_HEAD(ifvlanhead, ifvlan);
struct ifvlantrunk {
struct ifnet *parent; /* parent interface of this trunk */
struct mtx lock;
#ifdef VLAN_ARRAY
#define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1)
struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */
#else
struct ifvlanhead *hash; /* dynamic hash-list table */
uint16_t hmask;
uint16_t hwidth;
#endif
int refcnt;
};
#if defined(KERN_TLS) || defined(RATELIMIT)
struct vlan_snd_tag {
struct m_snd_tag com;
struct m_snd_tag *tag;
};
static inline struct vlan_snd_tag *
mst_to_vst(struct m_snd_tag *mst)
{
return (__containerof(mst, struct vlan_snd_tag, com));
}
#endif
/*
* This macro provides a facility to iterate over every vlan on a trunk with
* the assumption that none will be added/removed during iteration.
*/
#ifdef VLAN_ARRAY
#define VLAN_FOREACH(_ifv, _trunk) \
size_t _i; \
for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \
if (((_ifv) = (_trunk)->vlans[_i]) != NULL)
#else /* VLAN_ARRAY */
#define VLAN_FOREACH(_ifv, _trunk) \
struct ifvlan *_next; \
size_t _i; \
for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \
CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next)
#endif /* VLAN_ARRAY */
/*
* This macro provides a facility to iterate over every vlan on a trunk while
* also modifying the number of vlans on the trunk. The iteration continues
* until some condition is met or there are no more vlans on the trunk.
*/
#ifdef VLAN_ARRAY
/* The VLAN_ARRAY case is simple -- just a for loop using the condition. */
#define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \
size_t _i; \
for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \
if (((_ifv) = (_trunk)->vlans[_i]))
#else /* VLAN_ARRAY */
/*
* The hash table case is more complicated. We allow for the hash table to be
* modified (i.e. vlans removed) while we are iterating over it. To allow for
* this we must restart the iteration every time we "touch" something during
* the iteration, since removal will resize the hash table and invalidate our
* current position. If acting on the touched element causes the trunk to be
* emptied, then iteration also stops.
*/
#define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \
size_t _i; \
bool _touch = false; \
for (_i = 0; \
!(_cond) && _i < (1 << (_trunk)->hwidth); \
_i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \
if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \
(_touch = true))
#endif /* VLAN_ARRAY */
struct vlan_mc_entry {
struct sockaddr_dl mc_addr;
CK_SLIST_ENTRY(vlan_mc_entry) mc_entries;
struct epoch_context mc_epoch_ctx;
};
struct ifvlan {
struct ifvlantrunk *ifv_trunk;
struct ifnet *ifv_ifp;
#define TRUNK(ifv) ((ifv)->ifv_trunk)
#define PARENT(ifv) (TRUNK(ifv)->parent)
void *ifv_cookie;
int ifv_pflags; /* special flags we have set on parent */
int ifv_capenable;
int ifv_encaplen; /* encapsulation length */
int ifv_mtufudge; /* MTU fudged by this much */
int ifv_mintu; /* min transmission unit */
struct ether_8021q_tag ifv_qtag;
#define ifv_proto ifv_qtag.proto
#define ifv_vid ifv_qtag.vid
#define ifv_pcp ifv_qtag.pcp
struct task lladdr_task;
CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead;
#ifndef VLAN_ARRAY
CK_SLIST_ENTRY(ifvlan) ifv_list;
#endif
};
/* Special flags we should propagate to parent. */
static struct {
int flag;
int (*func)(struct ifnet *, int);
} vlan_pflags[] = {
{IFF_PROMISC, ifpromisc},
{IFF_ALLMULTI, if_allmulti},
{0, NULL}
};
VNET_DECLARE(int, vlan_mtag_pcp);
#define V_vlan_mtag_pcp VNET(vlan_mtag_pcp)
static const char vlanname[] = "vlan";
static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface");
static eventhandler_tag ifdetach_tag;
static eventhandler_tag iflladdr_tag;
static eventhandler_tag ifevent_tag;
/*
* if_vlan uses two module-level synchronizations primitives to allow concurrent
* modification of vlan interfaces and (mostly) allow for vlans to be destroyed
* while they are being used for tx/rx. To accomplish this in a way that has
* acceptable performance and cooperation with other parts of the network stack
* there is a non-sleepable epoch(9) and an sx(9).
*
* The performance-sensitive paths that warrant using the epoch(9) are
* vlan_transmit and vlan_input. Both have to check for the vlan interface's
* existence using if_vlantrunk, and being in the network tx/rx paths the use
* of an epoch(9) gives a measureable improvement in performance.
*
* The reason for having an sx(9) is mostly because there are still areas that
* must be sleepable and also have safe concurrent access to a vlan interface.
* Since the sx(9) exists, it is used by default in most paths unless sleeping
* is not permitted, or if it is not clear whether sleeping is permitted.
*
*/
#define _VLAN_SX_ID ifv_sx
static struct sx _VLAN_SX_ID;
#define VLAN_LOCKING_INIT() \
sx_init_flags(&_VLAN_SX_ID, "vlan_sx", SX_RECURSE)
#define VLAN_LOCKING_DESTROY() \
sx_destroy(&_VLAN_SX_ID)
#define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID)
#define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID)
#define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID)
#define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID)
#define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED)
#define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED)
#define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED)
/*
* We also have a per-trunk mutex that should be acquired when changing
* its state.
*/
#define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF)
#define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock)
#define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock)
#define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock)
#define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED);
/*
* The VLAN_ARRAY substitutes the dynamic hash with a static array
* with 4096 entries. In theory this can give a boost in processing,
* however in practice it does not. Probably this is because the array
* is too big to fit into CPU cache.
*/
#ifndef VLAN_ARRAY
static void vlan_inithash(struct ifvlantrunk *trunk);
static void vlan_freehash(struct ifvlantrunk *trunk);
static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv);
static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv);
static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch);
static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk,
uint16_t vid);
#endif
static void trunk_destroy(struct ifvlantrunk *trunk);
static void vlan_init(void *foo);
static void vlan_input(struct ifnet *ifp, struct mbuf *m);
static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr);
#if defined(KERN_TLS) || defined(RATELIMIT)
static int vlan_snd_tag_alloc(struct ifnet *,
union if_snd_tag_alloc_params *, struct m_snd_tag **);
static int vlan_snd_tag_modify(struct m_snd_tag *,
union if_snd_tag_modify_params *);
static int vlan_snd_tag_query(struct m_snd_tag *,
union if_snd_tag_query_params *);
static void vlan_snd_tag_free(struct m_snd_tag *);
static struct m_snd_tag *vlan_next_snd_tag(struct m_snd_tag *);
static void vlan_ratelimit_query(struct ifnet *,
struct if_ratelimit_query_results *);
#endif
static void vlan_qflush(struct ifnet *ifp);
static int vlan_setflag(struct ifnet *ifp, int flag, int status,
int (*func)(struct ifnet *, int));
static int vlan_setflags(struct ifnet *ifp, int status);
static int vlan_setmulti(struct ifnet *ifp);
static int vlan_transmit(struct ifnet *ifp, struct mbuf *m);
#ifdef ALTQ
static void vlan_altq_start(struct ifnet *ifp);
static int vlan_altq_transmit(struct ifnet *ifp, struct mbuf *m);
#endif
static int vlan_output(struct ifnet *ifp, struct mbuf *m,
const struct sockaddr *dst, struct route *ro);
static void vlan_unconfig(struct ifnet *ifp);
static void vlan_unconfig_locked(struct ifnet *ifp, int departing);
static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag,
uint16_t proto);
static void vlan_link_state(struct ifnet *ifp);
static void vlan_capabilities(struct ifvlan *ifv);
static void vlan_trunk_capabilities(struct ifnet *ifp);
static struct ifnet *vlan_clone_match_ethervid(const char *, int *);
static int vlan_clone_match(struct if_clone *, const char *);
static int vlan_clone_create(struct if_clone *, char *, size_t,
struct ifc_data *, struct ifnet **);
static int vlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
static int vlan_clone_create_nl(struct if_clone *ifc, char *name, size_t len,
struct ifc_data_nl *ifd);
static int vlan_clone_modify_nl(struct ifnet *ifp, struct ifc_data_nl *ifd);
static void vlan_clone_dump_nl(struct ifnet *ifp, struct nl_writer *nw);
static void vlan_ifdetach(void *arg, struct ifnet *ifp);
static void vlan_iflladdr(void *arg, struct ifnet *ifp);
static void vlan_ifevent(void *arg, struct ifnet *ifp, int event);
static void vlan_lladdr_fn(void *arg, int pending);
static struct if_clone *vlan_cloner;
#ifdef VIMAGE
VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner);
#define V_vlan_cloner VNET(vlan_cloner)
#endif
#ifdef RATELIMIT
static const struct if_snd_tag_sw vlan_snd_tag_ul_sw = {
.snd_tag_modify = vlan_snd_tag_modify,
.snd_tag_query = vlan_snd_tag_query,
.snd_tag_free = vlan_snd_tag_free,
.next_snd_tag = vlan_next_snd_tag,
.type = IF_SND_TAG_TYPE_UNLIMITED
};
static const struct if_snd_tag_sw vlan_snd_tag_rl_sw = {
.snd_tag_modify = vlan_snd_tag_modify,
.snd_tag_query = vlan_snd_tag_query,
.snd_tag_free = vlan_snd_tag_free,
.next_snd_tag = vlan_next_snd_tag,
.type = IF_SND_TAG_TYPE_RATE_LIMIT
};
#endif
#ifdef KERN_TLS
static const struct if_snd_tag_sw vlan_snd_tag_tls_sw = {
.snd_tag_modify = vlan_snd_tag_modify,
.snd_tag_query = vlan_snd_tag_query,
.snd_tag_free = vlan_snd_tag_free,
.next_snd_tag = vlan_next_snd_tag,
.type = IF_SND_TAG_TYPE_TLS
};
#ifdef RATELIMIT
static const struct if_snd_tag_sw vlan_snd_tag_tls_rl_sw = {
.snd_tag_modify = vlan_snd_tag_modify,
.snd_tag_query = vlan_snd_tag_query,
.snd_tag_free = vlan_snd_tag_free,
.next_snd_tag = vlan_next_snd_tag,
.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT
};
#endif
#endif
static void
vlan_mc_free(struct epoch_context *ctx)
{
struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx);
free(mc, M_VLAN);
}
#ifndef VLAN_ARRAY
#define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m))
static void
vlan_inithash(struct ifvlantrunk *trunk)
{
int i, n;
/*
* The trunk must not be locked here since we call malloc(M_WAITOK).
* It is OK in case this function is called before the trunk struct
* gets hooked up and becomes visible from other threads.
*/
KASSERT(trunk->hwidth == 0 && trunk->hash == NULL,
("%s: hash already initialized", __func__));
trunk->hwidth = VLAN_DEF_HWIDTH;
n = 1 << trunk->hwidth;
trunk->hmask = n - 1;
trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK);
for (i = 0; i < n; i++)
CK_SLIST_INIT(&trunk->hash[i]);
}
static void
vlan_freehash(struct ifvlantrunk *trunk)
{
#ifdef INVARIANTS
int i;
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
for (i = 0; i < (1 << trunk->hwidth); i++)
KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]),
("%s: hash table not empty", __func__));
#endif
free(trunk->hash, M_VLAN);
trunk->hash = NULL;
trunk->hwidth = trunk->hmask = 0;
}
static int
vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
int i, b;
struct ifvlan *ifv2;
VLAN_XLOCK_ASSERT();
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
b = 1 << trunk->hwidth;
i = HASH(ifv->ifv_vid, trunk->hmask);
CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
if (ifv->ifv_vid == ifv2->ifv_vid)
return (EEXIST);
/*
* Grow the hash when the number of vlans exceeds half of the number of
* hash buckets squared. This will make the average linked-list length
* buckets/2.
*/
if (trunk->refcnt > (b * b) / 2) {
vlan_growhash(trunk, 1);
i = HASH(ifv->ifv_vid, trunk->hmask);
}
CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list);
trunk->refcnt++;
return (0);
}
static int
vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
int i, b;
struct ifvlan *ifv2;
VLAN_XLOCK_ASSERT();
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
b = 1 << (trunk->hwidth - 1);
i = HASH(ifv->ifv_vid, trunk->hmask);
CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
if (ifv2 == ifv) {
trunk->refcnt--;
CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list);
if (trunk->refcnt < (b * b) / 2)
vlan_growhash(trunk, -1);
return (0);
}
panic("%s: vlan not found\n", __func__);
return (ENOENT); /*NOTREACHED*/
}
/*
* Grow the hash larger or smaller if memory permits.
*/
static void
vlan_growhash(struct ifvlantrunk *trunk, int howmuch)
{
struct ifvlan *ifv;
struct ifvlanhead *hash2;
int hwidth2, i, j, n, n2;
VLAN_XLOCK_ASSERT();
KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
if (howmuch == 0) {
/* Harmless yet obvious coding error */
printf("%s: howmuch is 0\n", __func__);
return;
}
hwidth2 = trunk->hwidth + howmuch;
n = 1 << trunk->hwidth;
n2 = 1 << hwidth2;
/* Do not shrink the table below the default */
if (hwidth2 < VLAN_DEF_HWIDTH)
return;
hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK);
if (hash2 == NULL) {
printf("%s: out of memory -- hash size not changed\n",
__func__);
return; /* We can live with the old hash table */
}
for (j = 0; j < n2; j++)
CK_SLIST_INIT(&hash2[j]);
for (i = 0; i < n; i++)
while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) {
CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list);
j = HASH(ifv->ifv_vid, n2 - 1);
CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list);
}
NET_EPOCH_WAIT();
free(trunk->hash, M_VLAN);
trunk->hash = hash2;
trunk->hwidth = hwidth2;
trunk->hmask = n2 - 1;
if (bootverbose)
if_printf(trunk->parent,
"VLAN hash table resized from %d to %d buckets\n", n, n2);
}
static __inline struct ifvlan *
vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid)
{
struct ifvlan *ifv;
NET_EPOCH_ASSERT();
CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list)
if (ifv->ifv_vid == vid)
return (ifv);
return (NULL);
}
#if 0
/* Debugging code to view the hashtables. */
static void
vlan_dumphash(struct ifvlantrunk *trunk)
{
int i;
struct ifvlan *ifv;
for (i = 0; i < (1 << trunk->hwidth); i++) {
printf("%d: ", i);
CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list)
printf("%s ", ifv->ifv_ifp->if_xname);
printf("\n");
}
}
#endif /* 0 */
#else
static __inline struct ifvlan *
vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid)
{
return trunk->vlans[vid];
}
static __inline int
vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
if (trunk->vlans[ifv->ifv_vid] != NULL)
return EEXIST;
trunk->vlans[ifv->ifv_vid] = ifv;
trunk->refcnt++;
return (0);
}
static __inline int
vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
{
trunk->vlans[ifv->ifv_vid] = NULL;
trunk->refcnt--;
return (0);
}
static __inline void
vlan_freehash(struct ifvlantrunk *trunk)
{
}
static __inline void
vlan_inithash(struct ifvlantrunk *trunk)
{
}
#endif /* !VLAN_ARRAY */
static void
trunk_destroy(struct ifvlantrunk *trunk)
{
VLAN_XLOCK_ASSERT();
vlan_freehash(trunk);
trunk->parent->if_vlantrunk = NULL;
TRUNK_LOCK_DESTROY(trunk);
if_rele(trunk->parent);
free(trunk, M_VLAN);
}
/*
* Program our multicast filter. What we're actually doing is
* programming the multicast filter of the parent. This has the
* side effect of causing the parent interface to receive multicast
* traffic that it doesn't really want, which ends up being discarded
* later by the upper protocol layers. Unfortunately, there's no way
* to avoid this: there really is only one physical interface.
*/
static int
vlan_setmulti(struct ifnet *ifp)
{
struct ifnet *ifp_p;
struct ifmultiaddr *ifma;
struct ifvlan *sc;
struct vlan_mc_entry *mc;
int error;
VLAN_XLOCK_ASSERT();
/* Find the parent. */
sc = ifp->if_softc;
ifp_p = PARENT(sc);
CURVNET_SET_QUIET(ifp_p->if_vnet);
/* First, remove any existing filter entries. */
while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) {
CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries);
(void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr);
NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx);
}
/* Now program new ones. */
IF_ADDR_WLOCK(ifp);
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT);
if (mc == NULL) {
IF_ADDR_WUNLOCK(ifp);
CURVNET_RESTORE();
return (ENOMEM);
}
bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len);
mc->mc_addr.sdl_index = ifp_p->if_index;
CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries);
}
IF_ADDR_WUNLOCK(ifp);
CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) {
error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr,
NULL);
if (error) {
CURVNET_RESTORE();
return (error);
}
}
CURVNET_RESTORE();
return (0);
}
/*
* A handler for interface ifnet events.
*/
static void
vlan_ifevent(void *arg __unused, struct ifnet *ifp, int event)
{
struct epoch_tracker et;
struct ifvlan *ifv;
struct ifvlantrunk *trunk;
if (event != IFNET_EVENT_UPDATE_BAUDRATE)
return;
NET_EPOCH_ENTER(et);
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
NET_EPOCH_EXIT(et);
return;
}
TRUNK_WLOCK(trunk);
VLAN_FOREACH(ifv, trunk) {
ifv->ifv_ifp->if_baudrate = ifp->if_baudrate;
}
TRUNK_WUNLOCK(trunk);
NET_EPOCH_EXIT(et);
}
/*
* A handler for parent interface link layer address changes.
* If the parent interface link layer address is changed we
* should also change it on all children vlans.
*/
static void
vlan_iflladdr(void *arg __unused, struct ifnet *ifp)
{
struct epoch_tracker et;
struct ifvlan *ifv;
struct ifnet *ifv_ifp;
struct ifvlantrunk *trunk;
struct sockaddr_dl *sdl;
/* Need the epoch since this is run on taskqueue_swi. */
NET_EPOCH_ENTER(et);
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
NET_EPOCH_EXIT(et);
return;
}
/*
* OK, it's a trunk. Loop over and change all vlan's lladdrs on it.
* We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR
* ioctl calls on the parent garbling the lladdr of the child vlan.
*/
TRUNK_WLOCK(trunk);
VLAN_FOREACH(ifv, trunk) {
/*
* Copy new new lladdr into the ifv_ifp, enqueue a task
* to actually call if_setlladdr. if_setlladdr needs to
* be deferred to a taskqueue because it will call into
* the if_vlan ioctl path and try to acquire the global
* lock.
*/
ifv_ifp = ifv->ifv_ifp;
bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp),
ifp->if_addrlen);
sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr;
sdl->sdl_alen = ifp->if_addrlen;
taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task);
}
TRUNK_WUNLOCK(trunk);
NET_EPOCH_EXIT(et);
}
/*
* A handler for network interface departure events.
* Track departure of trunks here so that we don't access invalid
* pointers or whatever if a trunk is ripped from under us, e.g.,
* by ejecting its hot-plug card. However, if an ifnet is simply
* being renamed, then there's no need to tear down the state.
*/
static void
vlan_ifdetach(void *arg __unused, struct ifnet *ifp)
{
struct ifvlan *ifv;
struct ifvlantrunk *trunk;
/* If the ifnet is just being renamed, don't do anything. */
if (ifp->if_flags & IFF_RENAMING)
return;
VLAN_XLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_XUNLOCK();
return;
}
/*
* OK, it's a trunk. Loop over and detach all vlan's on it.
* Check trunk pointer after each vlan_unconfig() as it will
* free it and set to NULL after the last vlan was detached.
*/
VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk,
ifp->if_vlantrunk == NULL)
vlan_unconfig_locked(ifv->ifv_ifp, 1);
/* Trunk should have been destroyed in vlan_unconfig(). */
KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__));
VLAN_XUNLOCK();
}
/*
* Return the trunk device for a virtual interface.
*/
static struct ifnet *
vlan_trunkdev(struct ifnet *ifp)
{
struct ifvlan *ifv;
NET_EPOCH_ASSERT();
if (ifp->if_type != IFT_L2VLAN)
return (NULL);
ifv = ifp->if_softc;
ifp = NULL;
if (ifv->ifv_trunk)
ifp = PARENT(ifv);
return (ifp);
}
/*
* Return the 12-bit VLAN VID for this interface, for use by external
* components such as Infiniband.
*
* XXXRW: Note that the function name here is historical; it should be named
* vlan_vid().
*/
static int
vlan_tag(struct ifnet *ifp, uint16_t *vidp)
{
struct ifvlan *ifv;
if (ifp->if_type != IFT_L2VLAN)
return (EINVAL);
ifv = ifp->if_softc;
*vidp = ifv->ifv_vid;
return (0);
}
static int
vlan_pcp(struct ifnet *ifp, uint16_t *pcpp)
{
struct ifvlan *ifv;
if (ifp->if_type != IFT_L2VLAN)
return (EINVAL);
ifv = ifp->if_softc;
*pcpp = ifv->ifv_pcp;
return (0);
}
/*
* Return a driver specific cookie for this interface. Synchronization
* with setcookie must be provided by the driver.
*/
static void *
vlan_cookie(struct ifnet *ifp)
{
struct ifvlan *ifv;
if (ifp->if_type != IFT_L2VLAN)
return (NULL);
ifv = ifp->if_softc;
return (ifv->ifv_cookie);
}
/*
* Store a cookie in our softc that drivers can use to store driver
* private per-instance data in.
*/
static int
vlan_setcookie(struct ifnet *ifp, void *cookie)
{
struct ifvlan *ifv;
if (ifp->if_type != IFT_L2VLAN)
return (EINVAL);
ifv = ifp->if_softc;
ifv->ifv_cookie = cookie;
return (0);
}
/*
* Return the vlan device present at the specific VID.
*/
static struct ifnet *
vlan_devat(struct ifnet *ifp, uint16_t vid)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
NET_EPOCH_ASSERT();
trunk = ifp->if_vlantrunk;
if (trunk == NULL)
return (NULL);
ifp = NULL;
ifv = vlan_gethash(trunk, vid);
if (ifv)
ifp = ifv->ifv_ifp;
return (ifp);
}
/*
* VLAN support can be loaded as a module. The only place in the
* system that's intimately aware of this is ether_input. We hook
* into this code through vlan_input_p which is defined there and
* set here. No one else in the system should be aware of this so
* we use an explicit reference here.
*/
extern void (*vlan_input_p)(struct ifnet *, struct mbuf *);
/* For if_link_state_change() eyes only... */
extern void (*vlan_link_state_p)(struct ifnet *);
static struct if_clone_addreq_v2 vlan_addreq = {
.version = 2,
.match_f = vlan_clone_match,
.create_f = vlan_clone_create,
.destroy_f = vlan_clone_destroy,
.create_nl_f = vlan_clone_create_nl,
.modify_nl_f = vlan_clone_modify_nl,
.dump_nl_f = vlan_clone_dump_nl,
};
static int
vlan_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY);
if (ifdetach_tag == NULL)
return (ENOMEM);
iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event,
vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
if (iflladdr_tag == NULL)
return (ENOMEM);
ifevent_tag = EVENTHANDLER_REGISTER(ifnet_event,
vlan_ifevent, NULL, EVENTHANDLER_PRI_ANY);
if (ifevent_tag == NULL)
return (ENOMEM);
VLAN_LOCKING_INIT();
vlan_input_p = vlan_input;
vlan_link_state_p = vlan_link_state;
vlan_trunk_cap_p = vlan_trunk_capabilities;
vlan_trunkdev_p = vlan_trunkdev;
vlan_cookie_p = vlan_cookie;
vlan_setcookie_p = vlan_setcookie;
vlan_tag_p = vlan_tag;
vlan_pcp_p = vlan_pcp;
vlan_devat_p = vlan_devat;
#ifndef VIMAGE
vlan_cloner = ifc_attach_cloner(vlanname, (struct if_clone_addreq *)&vlan_addreq);
#endif
if (bootverbose)
printf("vlan: initialized, using "
#ifdef VLAN_ARRAY
"full-size arrays"
#else
"hash tables with chaining"
#endif
"\n");
break;
case MOD_UNLOAD:
#ifndef VIMAGE
ifc_detach_cloner(vlan_cloner);
#endif
EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag);
EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag);
EVENTHANDLER_DEREGISTER(ifnet_event, ifevent_tag);
vlan_input_p = NULL;
vlan_link_state_p = NULL;
vlan_trunk_cap_p = NULL;
vlan_trunkdev_p = NULL;
vlan_tag_p = NULL;
vlan_cookie_p = NULL;
vlan_setcookie_p = NULL;
vlan_devat_p = NULL;
VLAN_LOCKING_DESTROY();
if (bootverbose)
printf("vlan: unloaded\n");
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t vlan_mod = {
"if_vlan",
vlan_modevent,
0
};
DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_vlan, 3);
#ifdef VIMAGE
static void
vnet_vlan_init(const void *unused __unused)
{
vlan_cloner = ifc_attach_cloner(vlanname, (struct if_clone_addreq *)&vlan_addreq);
V_vlan_cloner = vlan_cloner;
}
VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
vnet_vlan_init, NULL);
static void
vnet_vlan_uninit(const void *unused __unused)
{
ifc_detach_cloner(V_vlan_cloner);
}
VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_vlan_uninit, NULL);
#endif
/*
* Check for <etherif>.<vlan>[.<vlan> ...] style interface names.
*/
static struct ifnet *
vlan_clone_match_ethervid(const char *name, int *vidp)
{
char ifname[IFNAMSIZ];
char *cp;
struct ifnet *ifp;
int vid;
strlcpy(ifname, name, IFNAMSIZ);
if ((cp = strrchr(ifname, '.')) == NULL)
return (NULL);
*cp = '\0';
if ((ifp = ifunit_ref(ifname)) == NULL)
return (NULL);
/* Parse VID. */
if (*++cp == '\0') {
if_rele(ifp);
return (NULL);
}
vid = 0;
for(; *cp >= '0' && *cp <= '9'; cp++)
vid = (vid * 10) + (*cp - '0');
if (*cp != '\0') {
if_rele(ifp);
return (NULL);
}
if (vidp != NULL)
*vidp = vid;
return (ifp);
}
static int
vlan_clone_match(struct if_clone *ifc, const char *name)
{
struct ifnet *ifp;
const char *cp;
ifp = vlan_clone_match_ethervid(name, NULL);
if (ifp != NULL) {
if_rele(ifp);
return (1);
}
if (strncmp(vlanname, name, strlen(vlanname)) != 0)
return (0);
for (cp = name + 4; *cp != '\0'; cp++) {
if (*cp < '0' || *cp > '9')
return (0);
}
return (1);
}
static int
vlan_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
char *dp;
bool wildcard = false;
bool subinterface = false;
int unit;
int error;
int vid = 0;
uint16_t proto = ETHERTYPE_VLAN;
struct ifvlan *ifv;
struct ifnet *ifp;
struct ifnet *p = NULL;
struct ifaddr *ifa;
struct sockaddr_dl *sdl;
struct vlanreq vlr;
static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
/*
* There are three ways to specify the cloned device:
* o pass a parameter block with the clone request.
* o specify parameters in the text of the clone device name
* o specify no parameters and get an unattached device that
* must be configured separately.
* The first technique is preferred; the latter two are supported
* for backwards compatibility.
*
* XXXRW: Note historic use of the word "tag" here. New ioctls may be
* called for.
*/
if (ifd->params != NULL) {
error = ifc_copyin(ifd, &vlr, sizeof(vlr));
if (error)
return error;
vid = vlr.vlr_tag;
proto = vlr.vlr_proto;
if (proto == 0)
proto = ETHERTYPE_VLAN;
p = ifunit_ref(vlr.vlr_parent);
if (p == NULL)
return (ENXIO);
}
if ((error = ifc_name2unit(name, &unit)) == 0) {
/*
* vlanX interface. Set wildcard to true if the unit number
* is not fixed (-1)
*/
wildcard = (unit < 0);
} else {
struct ifnet *p_tmp = vlan_clone_match_ethervid(name, &vid);
if (p_tmp != NULL) {
error = 0;
subinterface = true;
unit = IF_DUNIT_NONE;
wildcard = false;
if (p != NULL) {
if_rele(p_tmp);
if (p != p_tmp)
error = EINVAL;
} else
p = p_tmp;
} else
error = ENXIO;
}
if (error != 0) {
if (p != NULL)
if_rele(p);
return (error);
}
if (!subinterface) {
/* vlanX interface, mark X as busy or allocate new unit # */
error = ifc_alloc_unit(ifc, &unit);
if (error != 0) {
if (p != NULL)
if_rele(p);
return (error);
}
}
/* In the wildcard case, we need to update the name. */
if (wildcard) {
for (dp = name; *dp != '\0'; dp++);
if (snprintf(dp, len - (dp-name), "%d", unit) >
len - (dp-name) - 1) {
panic("%s: interface name too long", __func__);
}
}
ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO);
ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- if (!subinterface)
- ifc_free_unit(ifc, unit);
- free(ifv, M_VLAN);
- if (p != NULL)
- if_rele(p);
- return (ENOSPC);
- }
CK_SLIST_INIT(&ifv->vlan_mc_listhead);
ifp->if_softc = ifv;
/*
* Set the name manually rather than using if_initname because
* we don't conform to the default naming convention for interfaces.
*/
strlcpy(ifp->if_xname, name, IFNAMSIZ);
ifp->if_dname = vlanname;
ifp->if_dunit = unit;
ifp->if_init = vlan_init;
#ifdef ALTQ
ifp->if_start = vlan_altq_start;
ifp->if_transmit = vlan_altq_transmit;
IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
ifp->if_snd.ifq_drv_maxlen = 0;
IFQ_SET_READY(&ifp->if_snd);
#else
ifp->if_transmit = vlan_transmit;
#endif
ifp->if_qflush = vlan_qflush;
ifp->if_ioctl = vlan_ioctl;
#if defined(KERN_TLS) || defined(RATELIMIT)
ifp->if_snd_tag_alloc = vlan_snd_tag_alloc;
ifp->if_ratelimit_query = vlan_ratelimit_query;
#endif
ifp->if_flags = VLAN_IFFLAGS;
ether_ifattach(ifp, eaddr);
/* Now undo some of the damage... */
ifp->if_baudrate = 0;
ifp->if_type = IFT_L2VLAN;
ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN;
ifa = ifp->if_addr;
sdl = (struct sockaddr_dl *)ifa->ifa_addr;
sdl->sdl_type = IFT_L2VLAN;
if (p != NULL) {
error = vlan_config(ifv, p, vid, proto);
if_rele(p);
if (error != 0) {
/*
* Since we've partially failed, we need to back
* out all the way, otherwise userland could get
* confused. Thus, we destroy the interface.
*/
ether_ifdetach(ifp);
vlan_unconfig(ifp);
if_free(ifp);
if (!subinterface)
ifc_free_unit(ifc, unit);
free(ifv, M_VLAN);
return (error);
}
}
*ifpp = ifp;
return (0);
}
/*
*
* Parsers of IFLA_INFO_DATA inside IFLA_LINKINFO of RTM_NEWLINK
* {{nla_len=8, nla_type=IFLA_LINK}, 2},
* {{nla_len=12, nla_type=IFLA_IFNAME}, "xvlan22"},
* {{nla_len=24, nla_type=IFLA_LINKINFO},
* [
* {{nla_len=8, nla_type=IFLA_INFO_KIND}, "vlan"...},
* {{nla_len=12, nla_type=IFLA_INFO_DATA}, "\x06\x00\x01\x00\x16\x00\x00\x00"}]}
*/
struct nl_parsed_vlan {
uint16_t vlan_id;
uint16_t vlan_proto;
struct ifla_vlan_flags vlan_flags;
};
#define _OUT(_field) offsetof(struct nl_parsed_vlan, _field)
static const struct nlattr_parser nla_p_vlan[] = {
{ .type = IFLA_VLAN_ID, .off = _OUT(vlan_id), .cb = nlattr_get_uint16 },
{ .type = IFLA_VLAN_FLAGS, .off = _OUT(vlan_flags), .cb = nlattr_get_nla },
{ .type = IFLA_VLAN_PROTOCOL, .off = _OUT(vlan_proto), .cb = nlattr_get_uint16 },
};
#undef _OUT
NL_DECLARE_ATTR_PARSER(vlan_parser, nla_p_vlan);
static int
vlan_clone_create_nl(struct if_clone *ifc, char *name, size_t len,
struct ifc_data_nl *ifd)
{
struct epoch_tracker et;
struct ifnet *ifp_parent;
struct nl_pstate *npt = ifd->npt;
struct nl_parsed_link *lattrs = ifd->lattrs;
int error;
/*
* lattrs.ifla_ifname is the new interface name
* lattrs.ifi_index contains parent interface index
* lattrs.ifla_idata contains un-parsed vlan data
*/
struct nl_parsed_vlan attrs = {
.vlan_id = 0xFEFE,
.vlan_proto = ETHERTYPE_VLAN
};
if (lattrs->ifla_idata == NULL) {
nlmsg_report_err_msg(npt, "vlan id is required, guessing not supported");
return (ENOTSUP);
}
error = nl_parse_nested(lattrs->ifla_idata, &vlan_parser, npt, &attrs);
if (error != 0)
return (error);
if (attrs.vlan_id > 4095) {
nlmsg_report_err_msg(npt, "Invalid VID: %d", attrs.vlan_id);
return (EINVAL);
}
if (attrs.vlan_proto != ETHERTYPE_VLAN && attrs.vlan_proto != ETHERTYPE_QINQ) {
nlmsg_report_err_msg(npt, "Unsupported ethertype: 0x%04X", attrs.vlan_proto);
return (ENOTSUP);
}
struct vlanreq params = {
.vlr_tag = attrs.vlan_id,
.vlr_proto = attrs.vlan_proto,
};
struct ifc_data ifd_new = { .flags = IFC_F_SYSSPACE, .unit = ifd->unit, .params = &params };
NET_EPOCH_ENTER(et);
ifp_parent = ifnet_byindex(lattrs->ifi_index);
if (ifp_parent != NULL)
strlcpy(params.vlr_parent, if_name(ifp_parent), sizeof(params.vlr_parent));
NET_EPOCH_EXIT(et);
if (ifp_parent == NULL) {
nlmsg_report_err_msg(npt, "unable to find parent interface %u", lattrs->ifi_index);
return (ENOENT);
}
error = vlan_clone_create(ifc, name, len, &ifd_new, &ifd->ifp);
return (error);
}
static int
vlan_clone_modify_nl(struct ifnet *ifp, struct ifc_data_nl *ifd)
{
struct nl_parsed_link *lattrs = ifd->lattrs;
if ((lattrs->ifla_idata != NULL) && ((ifd->flags & IFC_F_CREATE) == 0)) {
struct epoch_tracker et;
struct nl_parsed_vlan attrs = {
.vlan_proto = ETHERTYPE_VLAN,
};
int error;
error = nl_parse_nested(lattrs->ifla_idata, &vlan_parser, ifd->npt, &attrs);
if (error != 0)
return (error);
NET_EPOCH_ENTER(et);
struct ifnet *ifp_parent = ifnet_byindex_ref(lattrs->ifla_link);
NET_EPOCH_EXIT(et);
if (ifp_parent == NULL) {
nlmsg_report_err_msg(ifd->npt, "unable to find parent interface %u",
lattrs->ifla_link);
return (ENOENT);
}
struct ifvlan *ifv = ifp->if_softc;
error = vlan_config(ifv, ifp_parent, attrs.vlan_id, attrs.vlan_proto);
if_rele(ifp_parent);
if (error != 0)
return (error);
}
return (nl_modify_ifp_generic(ifp, ifd->lattrs, ifd->bm, ifd->npt));
}
/*
* {{nla_len=24, nla_type=IFLA_LINKINFO},
* [
* {{nla_len=8, nla_type=IFLA_INFO_KIND}, "vlan"...},
* {{nla_len=12, nla_type=IFLA_INFO_DATA}, "\x06\x00\x01\x00\x16\x00\x00\x00"}]}
*/
static void
vlan_clone_dump_nl(struct ifnet *ifp, struct nl_writer *nw)
{
uint32_t parent_index = 0;
uint16_t vlan_id = 0;
uint16_t vlan_proto = 0;
VLAN_SLOCK();
struct ifvlan *ifv = ifp->if_softc;
if (TRUNK(ifv) != NULL)
parent_index = PARENT(ifv)->if_index;
vlan_id = ifv->ifv_vid;
vlan_proto = ifv->ifv_proto;
VLAN_SUNLOCK();
if (parent_index != 0)
nlattr_add_u32(nw, IFLA_LINK, parent_index);
int off = nlattr_add_nested(nw, IFLA_LINKINFO);
if (off != 0) {
nlattr_add_string(nw, IFLA_INFO_KIND, "vlan");
int off2 = nlattr_add_nested(nw, IFLA_INFO_DATA);
if (off2 != 0) {
nlattr_add_u16(nw, IFLA_VLAN_ID, vlan_id);
nlattr_add_u16(nw, IFLA_VLAN_PROTOCOL, vlan_proto);
nlattr_set_len(nw, off2);
}
nlattr_set_len(nw, off);
}
}
static int
vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
struct ifvlan *ifv = ifp->if_softc;
int unit = ifp->if_dunit;
if (ifp->if_vlantrunk)
return (EBUSY);
#ifdef ALTQ
IFQ_PURGE(&ifp->if_snd);
#endif
ether_ifdetach(ifp); /* first, remove it from system-wide lists */
vlan_unconfig(ifp); /* now it can be unconfigured and freed */
/*
* We should have the only reference to the ifv now, so we can now
* drain any remaining lladdr task before freeing the ifnet and the
* ifvlan.
*/
taskqueue_drain(taskqueue_thread, &ifv->lladdr_task);
NET_EPOCH_WAIT();
if_free(ifp);
free(ifv, M_VLAN);
if (unit != IF_DUNIT_NONE)
ifc_free_unit(ifc, unit);
return (0);
}
/*
* The ifp->if_init entry point for vlan(4) is a no-op.
*/
static void
vlan_init(void *foo __unused)
{
}
/*
* The if_transmit method for vlan(4) interface.
*/
static int
vlan_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct ifvlan *ifv;
struct ifnet *p;
int error, len, mcast;
NET_EPOCH_ASSERT();
ifv = ifp->if_softc;
if (TRUNK(ifv) == NULL) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENETDOWN);
}
p = PARENT(ifv);
len = m->m_pkthdr.len;
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
BPF_MTAP(ifp, m);
#if defined(KERN_TLS) || defined(RATELIMIT)
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
struct vlan_snd_tag *vst;
struct m_snd_tag *mst;
MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
mst = m->m_pkthdr.snd_tag;
vst = mst_to_vst(mst);
if (vst->tag->ifp != p) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (EAGAIN);
}
m->m_pkthdr.snd_tag = m_snd_tag_ref(vst->tag);
m_snd_tag_rele(mst);
}
#endif
/*
* Do not run parent's if_transmit() if the parent is not up,
* or parent's driver will cause a system crash.
*/
if (!UP_AND_RUNNING(p)) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
return (ENETDOWN);
}
if (!ether_8021q_frame(&m, ifp, p, &ifv->ifv_qtag)) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (0);
}
/*
* Send it, precisely as ether_output() would have.
*/
error = (p->if_transmit)(p, m);
if (error == 0) {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast);
} else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (error);
}
static int
vlan_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *ro)
{
struct ifvlan *ifv;
struct ifnet *p;
NET_EPOCH_ASSERT();
/*
* Find the first non-VLAN parent interface.
*/
ifv = ifp->if_softc;
do {
if (TRUNK(ifv) == NULL) {
m_freem(m);
return (ENETDOWN);
}
p = PARENT(ifv);
ifv = p->if_softc;
} while (p->if_type == IFT_L2VLAN);
return p->if_output(ifp, m, dst, ro);
}
#ifdef ALTQ
static void
vlan_altq_start(if_t ifp)
{
struct ifaltq *ifq = &ifp->if_snd;
struct mbuf *m;
IFQ_LOCK(ifq);
IFQ_DEQUEUE_NOLOCK(ifq, m);
while (m != NULL) {
vlan_transmit(ifp, m);
IFQ_DEQUEUE_NOLOCK(ifq, m);
}
IFQ_UNLOCK(ifq);
}
static int
vlan_altq_transmit(if_t ifp, struct mbuf *m)
{
int err;
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
IFQ_ENQUEUE(&ifp->if_snd, m, err);
if (err == 0)
vlan_altq_start(ifp);
} else
err = vlan_transmit(ifp, m);
return (err);
}
#endif /* ALTQ */
/*
* The ifp->if_qflush entry point for vlan(4) is a no-op.
*/
static void
vlan_qflush(struct ifnet *ifp __unused)
{
}
static void
vlan_input(struct ifnet *ifp, struct mbuf *m)
{
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
struct m_tag *mtag;
uint16_t vid, tag;
NET_EPOCH_ASSERT();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
m_freem(m);
return;
}
if (m->m_flags & M_VLANTAG) {
/*
* Packet is tagged, but m contains a normal
* Ethernet frame; the tag is stored out-of-band.
*/
tag = m->m_pkthdr.ether_vtag;
m->m_flags &= ~M_VLANTAG;
} else {
struct ether_vlan_header *evl;
/*
* Packet is tagged in-band as specified by 802.1q.
*/
switch (ifp->if_type) {
case IFT_ETHER:
if (m->m_len < sizeof(*evl) &&
(m = m_pullup(m, sizeof(*evl))) == NULL) {
if_printf(ifp, "cannot pullup VLAN header\n");
return;
}
evl = mtod(m, struct ether_vlan_header *);
tag = ntohs(evl->evl_tag);
/*
* Remove the 802.1q header by copying the Ethernet
* addresses over it and adjusting the beginning of
* the data in the mbuf. The encapsulated Ethernet
* type field is already in place.
*/
bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
break;
default:
#ifdef INVARIANTS
panic("%s: %s has unsupported if_type %u",
__func__, ifp->if_xname, ifp->if_type);
#endif
if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
m_freem(m);
return;
}
}
vid = EVL_VLANOFTAG(tag);
ifv = vlan_gethash(trunk, vid);
if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) {
if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
m_freem(m);
return;
}
if (V_vlan_mtag_pcp) {
/*
* While uncommon, it is possible that we will find a 802.1q
* packet encapsulated inside another packet that also had an
* 802.1q header. For example, ethernet tunneled over IPSEC
* arriving over ethernet. In that case, we replace the
* existing 802.1q PCP m_tag value.
*/
mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
if (mtag == NULL) {
mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN,
sizeof(uint8_t), M_NOWAIT);
if (mtag == NULL) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
return;
}
m_tag_prepend(m, mtag);
}
*(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag);
}
m->m_pkthdr.rcvif = ifv->ifv_ifp;
if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1);
/* Pass it back through the parent's input routine. */
(*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m);
}
static void
vlan_lladdr_fn(void *arg, int pending __unused)
{
struct ifvlan *ifv;
struct ifnet *ifp;
ifv = (struct ifvlan *)arg;
ifp = ifv->ifv_ifp;
CURVNET_SET(ifp->if_vnet);
/* The ifv_ifp already has the lladdr copied in. */
if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen);
CURVNET_RESTORE();
}
static int
vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid,
uint16_t proto)
{
struct epoch_tracker et;
struct ifvlantrunk *trunk;
struct ifnet *ifp;
int error = 0;
/*
* We can handle non-ethernet hardware types as long as
* they handle the tagging and headers themselves.
*/
if (p->if_type != IFT_ETHER &&
p->if_type != IFT_L2VLAN &&
(p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
return (EPROTONOSUPPORT);
if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS)
return (EPROTONOSUPPORT);
/*
* Don't let the caller set up a VLAN VID with
* anything except VLID bits.
* VID numbers 0x0 and 0xFFF are reserved.
*/
if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK))
return (EINVAL);
if (ifv->ifv_trunk) {
trunk = ifv->ifv_trunk;
if (trunk->parent != p)
return (EBUSY);
VLAN_XLOCK();
ifv->ifv_proto = proto;
if (ifv->ifv_vid != vid) {
int oldvid = ifv->ifv_vid;
/* Re-hash */
vlan_remhash(trunk, ifv);
ifv->ifv_vid = vid;
error = vlan_inshash(trunk, ifv);
if (error) {
int ret __diagused;
ifv->ifv_vid = oldvid;
/* Re-insert back where we found it. */
ret = vlan_inshash(trunk, ifv);
MPASS(ret == 0);
}
}
/* Will unlock */
goto done;
}
VLAN_XLOCK();
if (p->if_vlantrunk == NULL) {
trunk = malloc(sizeof(struct ifvlantrunk),
M_VLAN, M_WAITOK | M_ZERO);
vlan_inithash(trunk);
TRUNK_LOCK_INIT(trunk);
TRUNK_WLOCK(trunk);
p->if_vlantrunk = trunk;
trunk->parent = p;
if_ref(trunk->parent);
TRUNK_WUNLOCK(trunk);
} else {
trunk = p->if_vlantrunk;
}
ifv->ifv_vid = vid; /* must set this before vlan_inshash() */
ifv->ifv_pcp = 0; /* Default: best effort delivery. */
error = vlan_inshash(trunk, ifv);
if (error)
goto done;
ifv->ifv_proto = proto;
ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN;
ifv->ifv_mintu = ETHERMIN;
ifv->ifv_pflags = 0;
ifv->ifv_capenable = -1;
/*
* If the parent supports the VLAN_MTU capability,
* i.e. can Tx/Rx larger than ETHER_MAX_LEN frames,
* use it.
*/
if (p->if_capenable & IFCAP_VLAN_MTU) {
/*
* No need to fudge the MTU since the parent can
* handle extended frames.
*/
ifv->ifv_mtufudge = 0;
} else {
/*
* Fudge the MTU by the encapsulation size. This
* makes us incompatible with strictly compliant
* 802.1Q implementations, but allows us to use
* the feature with other NetBSD implementations,
* which might still be useful.
*/
ifv->ifv_mtufudge = ifv->ifv_encaplen;
}
ifv->ifv_trunk = trunk;
ifp = ifv->ifv_ifp;
/*
* Initialize fields from our parent. This duplicates some
* work with ether_ifattach() but allows for non-ethernet
* interfaces to also work.
*/
ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge;
ifp->if_baudrate = p->if_baudrate;
ifp->if_input = p->if_input;
ifp->if_resolvemulti = p->if_resolvemulti;
ifp->if_addrlen = p->if_addrlen;
ifp->if_broadcastaddr = p->if_broadcastaddr;
ifp->if_pcp = ifv->ifv_pcp;
/*
* We wrap the parent's if_output using vlan_output to ensure that it
* can't become stale.
*/
ifp->if_output = vlan_output;
/*
* Copy only a selected subset of flags from the parent.
* Other flags are none of our business.
*/
#define VLAN_COPY_FLAGS (IFF_SIMPLEX)
ifp->if_flags &= ~VLAN_COPY_FLAGS;
ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS;
#undef VLAN_COPY_FLAGS
ifp->if_link_state = p->if_link_state;
NET_EPOCH_ENTER(et);
vlan_capabilities(ifv);
NET_EPOCH_EXIT(et);
/*
* Set up our interface address to reflect the underlying
* physical interface's.
*/
TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv);
((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen =
p->if_addrlen;
/*
* Do not schedule link address update if it was the same
* as previous parent's. This helps avoid updating for each
* associated llentry.
*/
if (memcmp(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen) != 0) {
bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen);
taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task);
}
/* We are ready for operation now. */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
/* Update flags on the parent, if necessary. */
vlan_setflags(ifp, 1);
/*
* Configure multicast addresses that may already be
* joined on the vlan device.
*/
(void)vlan_setmulti(ifp);
done:
if (error == 0)
EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid);
VLAN_XUNLOCK();
return (error);
}
static void
vlan_unconfig(struct ifnet *ifp)
{
VLAN_XLOCK();
vlan_unconfig_locked(ifp, 0);
VLAN_XUNLOCK();
}
static void
vlan_unconfig_locked(struct ifnet *ifp, int departing)
{
struct ifvlantrunk *trunk;
struct vlan_mc_entry *mc;
struct ifvlan *ifv;
struct ifnet *parent;
int error;
VLAN_XLOCK_ASSERT();
ifv = ifp->if_softc;
trunk = ifv->ifv_trunk;
parent = NULL;
if (trunk != NULL) {
parent = trunk->parent;
/*
* Since the interface is being unconfigured, we need to
* empty the list of multicast groups that we may have joined
* while we were alive from the parent's list.
*/
while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) {
/*
* If the parent interface is being detached,
* all its multicast addresses have already
* been removed. Warn about errors if
* if_delmulti() does fail, but don't abort as
* all callers expect vlan destruction to
* succeed.
*/
if (!departing) {
error = if_delmulti(parent,
(struct sockaddr *)&mc->mc_addr);
if (error)
if_printf(ifp,
"Failed to delete multicast address from parent: %d\n",
error);
}
CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries);
NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx);
}
vlan_setflags(ifp, 0); /* clear special flags on parent */
vlan_remhash(trunk, ifv);
ifv->ifv_trunk = NULL;
/*
* Check if we were the last.
*/
if (trunk->refcnt == 0) {
parent->if_vlantrunk = NULL;
NET_EPOCH_WAIT();
trunk_destroy(trunk);
}
}
/* Disconnect from parent. */
if (ifv->ifv_pflags)
if_printf(ifp, "%s: ifv_pflags unclean\n", __func__);
ifp->if_mtu = ETHERMTU;
ifp->if_link_state = LINK_STATE_UNKNOWN;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
/*
* Only dispatch an event if vlan was
* attached, otherwise there is nothing
* to cleanup anyway.
*/
if (parent != NULL)
EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid);
}
/* Handle a reference counted flag that should be set on the parent as well */
static int
vlan_setflag(struct ifnet *ifp, int flag, int status,
int (*func)(struct ifnet *, int))
{
struct ifvlan *ifv;
int error;
VLAN_SXLOCK_ASSERT();
ifv = ifp->if_softc;
status = status ? (ifp->if_flags & flag) : 0;
/* Now "status" contains the flag value or 0 */
/*
* See if recorded parent's status is different from what
* we want it to be. If it is, flip it. We record parent's
* status in ifv_pflags so that we won't clear parent's flag
* we haven't set. In fact, we don't clear or set parent's
* flags directly, but get or release references to them.
* That's why we can be sure that recorded flags still are
* in accord with actual parent's flags.
*/
if (status != (ifv->ifv_pflags & flag)) {
error = (*func)(PARENT(ifv), status);
if (error)
return (error);
ifv->ifv_pflags &= ~flag;
ifv->ifv_pflags |= status;
}
return (0);
}
/*
* Handle IFF_* flags that require certain changes on the parent:
* if "status" is true, update parent's flags respective to our if_flags;
* if "status" is false, forcedly clear the flags set on parent.
*/
static int
vlan_setflags(struct ifnet *ifp, int status)
{
int error, i;
for (i = 0; vlan_pflags[i].flag; i++) {
error = vlan_setflag(ifp, vlan_pflags[i].flag,
status, vlan_pflags[i].func);
if (error)
return (error);
}
return (0);
}
/* Inform all vlans that their parent has changed link state */
static void
vlan_link_state(struct ifnet *ifp)
{
struct epoch_tracker et;
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
NET_EPOCH_ENTER(et);
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
NET_EPOCH_EXIT(et);
return;
}
TRUNK_WLOCK(trunk);
VLAN_FOREACH(ifv, trunk) {
ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate;
if_link_state_change(ifv->ifv_ifp,
trunk->parent->if_link_state);
}
TRUNK_WUNLOCK(trunk);
NET_EPOCH_EXIT(et);
}
static void
vlan_capabilities(struct ifvlan *ifv)
{
struct ifnet *p;
struct ifnet *ifp;
struct ifnet_hw_tsomax hw_tsomax;
int cap = 0, ena = 0, mena;
u_long hwa = 0;
NET_EPOCH_ASSERT();
VLAN_SXLOCK_ASSERT();
p = PARENT(ifv);
ifp = ifv->ifv_ifp;
/* Mask parent interface enabled capabilities disabled by user. */
mena = p->if_capenable & ifv->ifv_capenable;
/*
* If the parent interface can do checksum offloading
* on VLANs, then propagate its hardware-assisted
* checksumming flags. Also assert that checksum
* offloading requires hardware VLAN tagging.
*/
if (p->if_capabilities & IFCAP_VLAN_HWCSUM)
cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
if (p->if_capenable & IFCAP_VLAN_HWCSUM &&
p->if_capenable & IFCAP_VLAN_HWTAGGING) {
ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
if (ena & IFCAP_TXCSUM)
hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP |
CSUM_UDP | CSUM_SCTP);
if (ena & IFCAP_TXCSUM_IPV6)
hwa |= p->if_hwassist & (CSUM_TCP_IPV6 |
CSUM_UDP_IPV6 | CSUM_SCTP_IPV6);
}
/*
* If the parent interface can do TSO on VLANs then
* propagate the hardware-assisted flag. TSO on VLANs
* does not necessarily require hardware VLAN tagging.
*/
memset(&hw_tsomax, 0, sizeof(hw_tsomax));
if_hw_tsomax_common(p, &hw_tsomax);
if_hw_tsomax_update(ifp, &hw_tsomax);
if (p->if_capabilities & IFCAP_VLAN_HWTSO)
cap |= p->if_capabilities & IFCAP_TSO;
if (p->if_capenable & IFCAP_VLAN_HWTSO) {
ena |= mena & IFCAP_TSO;
if (ena & IFCAP_TSO)
hwa |= p->if_hwassist & CSUM_TSO;
}
/*
* If the parent interface can do LRO and checksum offloading on
* VLANs, then guess it may do LRO on VLANs. False positive here
* cost nothing, while false negative may lead to some confusions.
*/
if (p->if_capabilities & IFCAP_VLAN_HWCSUM)
cap |= p->if_capabilities & IFCAP_LRO;
if (p->if_capenable & IFCAP_VLAN_HWCSUM)
ena |= mena & IFCAP_LRO;
/*
* If the parent interface can offload TCP connections over VLANs then
* propagate its TOE capability to the VLAN interface.
*
* All TOE drivers in the tree today can deal with VLANs. If this
* changes then IFCAP_VLAN_TOE should be promoted to a full capability
* with its own bit.
*/
#define IFCAP_VLAN_TOE IFCAP_TOE
if (p->if_capabilities & IFCAP_VLAN_TOE)
cap |= p->if_capabilities & IFCAP_TOE;
if (p->if_capenable & IFCAP_VLAN_TOE) {
SETTOEDEV(ifp, TOEDEV(p));
ena |= mena & IFCAP_TOE;
}
/*
* If the parent interface supports dynamic link state, so does the
* VLAN interface.
*/
cap |= (p->if_capabilities & IFCAP_LINKSTATE);
ena |= (mena & IFCAP_LINKSTATE);
#ifdef RATELIMIT
/*
* If the parent interface supports ratelimiting, so does the
* VLAN interface.
*/
cap |= (p->if_capabilities & IFCAP_TXRTLMT);
ena |= (mena & IFCAP_TXRTLMT);
#endif
/*
* If the parent interface supports unmapped mbufs, so does
* the VLAN interface. Note that this should be fine even for
* interfaces that don't support hardware tagging as headers
* are prepended in normal mbufs to unmapped mbufs holding
* payload data.
*/
cap |= (p->if_capabilities & IFCAP_MEXTPG);
ena |= (mena & IFCAP_MEXTPG);
/*
* If the parent interface can offload encryption and segmentation
* of TLS records over TCP, propagate it's capability to the VLAN
* interface.
*
* All TLS drivers in the tree today can deal with VLANs. If
* this ever changes, then a new IFCAP_VLAN_TXTLS can be
* defined.
*/
if (p->if_capabilities & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT))
cap |= p->if_capabilities & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT);
if (p->if_capenable & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT))
ena |= mena & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT);
ifp->if_capabilities = cap;
ifp->if_capenable = ena;
ifp->if_hwassist = hwa;
}
static void
vlan_trunk_capabilities(struct ifnet *ifp)
{
struct epoch_tracker et;
struct ifvlantrunk *trunk;
struct ifvlan *ifv;
VLAN_SLOCK();
trunk = ifp->if_vlantrunk;
if (trunk == NULL) {
VLAN_SUNLOCK();
return;
}
NET_EPOCH_ENTER(et);
VLAN_FOREACH(ifv, trunk)
vlan_capabilities(ifv);
NET_EPOCH_EXIT(et);
VLAN_SUNLOCK();
}
static int
vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifnet *p;
struct ifreq *ifr;
#ifdef INET
struct ifaddr *ifa;
#endif
struct ifvlan *ifv;
struct ifvlantrunk *trunk;
struct vlanreq vlr;
int error = 0, oldmtu;
ifr = (struct ifreq *)data;
#ifdef INET
ifa = (struct ifaddr *) data;
#endif
ifv = ifp->if_softc;
switch (cmd) {
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
arp_ifinit(ifp, ifa);
#endif
break;
case SIOCGIFADDR:
bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
ifp->if_addrlen);
break;
case SIOCGIFMEDIA:
VLAN_SLOCK();
if (TRUNK(ifv) != NULL) {
p = PARENT(ifv);
if_ref(p);
error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data);
if_rele(p);
/* Limit the result to the parent's current config. */
if (error == 0) {
struct ifmediareq *ifmr;
ifmr = (struct ifmediareq *)data;
if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) {
ifmr->ifm_count = 1;
error = copyout(&ifmr->ifm_current,
ifmr->ifm_ulist,
sizeof(int));
}
}
} else {
error = EINVAL;
}
VLAN_SUNLOCK();
break;
case SIOCSIFMEDIA:
error = EINVAL;
break;
case SIOCSIFMTU:
/*
* Set the interface MTU.
*/
VLAN_SLOCK();
trunk = TRUNK(ifv);
if (trunk != NULL) {
TRUNK_WLOCK(trunk);
if (ifr->ifr_mtu >
(PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) ||
ifr->ifr_mtu <
(ifv->ifv_mintu - ifv->ifv_mtufudge))
error = EINVAL;
else
ifp->if_mtu = ifr->ifr_mtu;
TRUNK_WUNLOCK(trunk);
} else
error = EINVAL;
VLAN_SUNLOCK();
break;
case SIOCSETVLAN:
#ifdef VIMAGE
/*
* XXXRW/XXXBZ: The goal in these checks is to allow a VLAN
* interface to be delegated to a jail without allowing the
* jail to change what underlying interface/VID it is
* associated with. We are not entirely convinced that this
* is the right way to accomplish that policy goal.
*/
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr));
if (error)
break;
if (vlr.vlr_parent[0] == '\0') {
vlan_unconfig(ifp);
break;
}
p = ifunit_ref(vlr.vlr_parent);
if (p == NULL) {
error = ENOENT;
break;
}
if (vlr.vlr_proto == 0)
vlr.vlr_proto = ETHERTYPE_VLAN;
oldmtu = ifp->if_mtu;
error = vlan_config(ifv, p, vlr.vlr_tag, vlr.vlr_proto);
if_rele(p);
/*
* VLAN MTU may change during addition of the vlandev.
* If it did, do network layer specific procedure.
*/
if (ifp->if_mtu != oldmtu)
if_notifymtu(ifp);
break;
case SIOCGETVLAN:
#ifdef VIMAGE
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
bzero(&vlr, sizeof(vlr));
VLAN_SLOCK();
if (TRUNK(ifv) != NULL) {
strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname,
sizeof(vlr.vlr_parent));
vlr.vlr_tag = ifv->ifv_vid;
vlr.vlr_proto = ifv->ifv_proto;
}
VLAN_SUNLOCK();
error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr));
break;
case SIOCSIFFLAGS:
/*
* We should propagate selected flags to the parent,
* e.g., promiscuous mode.
*/
VLAN_SLOCK();
if (TRUNK(ifv) != NULL)
error = vlan_setflags(ifp, 1);
VLAN_SUNLOCK();
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/*
* If we don't have a parent, just remember the membership for
* when we do.
*
* XXX We need the rmlock here to avoid sleeping while
* holding in6_multi_mtx.
*/
VLAN_XLOCK();
trunk = TRUNK(ifv);
if (trunk != NULL)
error = vlan_setmulti(ifp);
VLAN_XUNLOCK();
break;
case SIOCGVLANPCP:
#ifdef VIMAGE
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
ifr->ifr_vlan_pcp = ifv->ifv_pcp;
break;
case SIOCSVLANPCP:
#ifdef VIMAGE
if (ifp->if_vnet != ifp->if_home_vnet) {
error = EPERM;
break;
}
#endif
error = priv_check(curthread, PRIV_NET_SETVLANPCP);
if (error)
break;
if (ifr->ifr_vlan_pcp > VLAN_PCP_MAX) {
error = EINVAL;
break;
}
ifv->ifv_pcp = ifr->ifr_vlan_pcp;
ifp->if_pcp = ifv->ifv_pcp;
/* broadcast event about PCP change */
EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
break;
case SIOCSIFCAP:
VLAN_SLOCK();
ifv->ifv_capenable = ifr->ifr_reqcap;
trunk = TRUNK(ifv);
if (trunk != NULL) {
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
vlan_capabilities(ifv);
NET_EPOCH_EXIT(et);
}
VLAN_SUNLOCK();
break;
default:
error = EINVAL;
break;
}
return (error);
}
#if defined(KERN_TLS) || defined(RATELIMIT)
static int
vlan_snd_tag_alloc(struct ifnet *ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
struct epoch_tracker et;
const struct if_snd_tag_sw *sw;
struct vlan_snd_tag *vst;
struct ifvlan *ifv;
struct ifnet *parent;
struct m_snd_tag *mst;
int error;
NET_EPOCH_ENTER(et);
ifv = ifp->if_softc;
switch (params->hdr.type) {
#ifdef RATELIMIT
case IF_SND_TAG_TYPE_UNLIMITED:
sw = &vlan_snd_tag_ul_sw;
break;
case IF_SND_TAG_TYPE_RATE_LIMIT:
sw = &vlan_snd_tag_rl_sw;
break;
#endif
#ifdef KERN_TLS
case IF_SND_TAG_TYPE_TLS:
sw = &vlan_snd_tag_tls_sw;
break;
case IF_SND_TAG_TYPE_TLS_RX:
sw = NULL;
if (params->tls_rx.vlan_id != 0)
goto failure;
params->tls_rx.vlan_id = ifv->ifv_vid;
break;
#ifdef RATELIMIT
case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
sw = &vlan_snd_tag_tls_rl_sw;
break;
#endif
#endif
default:
goto failure;
}
if (ifv->ifv_trunk != NULL)
parent = PARENT(ifv);
else
parent = NULL;
if (parent == NULL)
goto failure;
if_ref(parent);
NET_EPOCH_EXIT(et);
if (sw != NULL) {
vst = malloc(sizeof(*vst), M_VLAN, M_NOWAIT);
if (vst == NULL) {
if_rele(parent);
return (ENOMEM);
}
} else
vst = NULL;
error = m_snd_tag_alloc(parent, params, &mst);
if_rele(parent);
if (error) {
free(vst, M_VLAN);
return (error);
}
if (sw != NULL) {
m_snd_tag_init(&vst->com, ifp, sw);
vst->tag = mst;
*ppmt = &vst->com;
} else
*ppmt = mst;
return (0);
failure:
NET_EPOCH_EXIT(et);
return (EOPNOTSUPP);
}
static struct m_snd_tag *
vlan_next_snd_tag(struct m_snd_tag *mst)
{
struct vlan_snd_tag *vst;
vst = mst_to_vst(mst);
return (vst->tag);
}
static int
vlan_snd_tag_modify(struct m_snd_tag *mst,
union if_snd_tag_modify_params *params)
{
struct vlan_snd_tag *vst;
vst = mst_to_vst(mst);
return (vst->tag->sw->snd_tag_modify(vst->tag, params));
}
static int
vlan_snd_tag_query(struct m_snd_tag *mst,
union if_snd_tag_query_params *params)
{
struct vlan_snd_tag *vst;
vst = mst_to_vst(mst);
return (vst->tag->sw->snd_tag_query(vst->tag, params));
}
static void
vlan_snd_tag_free(struct m_snd_tag *mst)
{
struct vlan_snd_tag *vst;
vst = mst_to_vst(mst);
m_snd_tag_rele(vst->tag);
free(vst, M_VLAN);
}
static void
vlan_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
{
/*
* For vlan, we have an indirect
* interface. The caller needs to
* get a ratelimit tag on the actual
* interface the flow will go on.
*/
q->rate_table = NULL;
q->flags = RT_IS_INDIRECT;
q->max_flows = 0;
q->number_of_rates = 0;
}
#endif
diff --git a/sys/net/if_vxlan.c b/sys/net/if_vxlan.c
index 9153adf64cfc..37f987981a0c 100644
--- a/sys/net/if_vxlan.c
+++ b/sys/net/if_vxlan.c
@@ -1,3722 +1,3717 @@
/*-
* Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
* Copyright (c) 2020, Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/hash.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/refcount.h>
#include <sys/rmlock.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sbuf.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_clone.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vxlan.h>
#include <net/netisr.h>
#include <net/route.h>
#include <net/route/nhop.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/ip_var.h>
#include <netinet/udp.h>
#include <netinet/udp_var.h>
#include <netinet/in_fib.h>
#include <netinet6/in6_fib.h>
#include <netinet6/ip6_var.h>
#include <netinet6/scope6_var.h>
struct vxlan_softc;
LIST_HEAD(vxlan_softc_head, vxlan_softc);
struct sx vxlan_sx;
SX_SYSINIT(vxlan, &vxlan_sx, "VXLAN global start/stop lock");
struct vxlan_socket_mc_info {
union vxlan_sockaddr vxlsomc_saddr;
union vxlan_sockaddr vxlsomc_gaddr;
int vxlsomc_ifidx;
int vxlsomc_users;
};
/*
* The maximum MTU of encapsulated ethernet frame within IPv4/UDP packet.
*/
#define VXLAN_MAX_MTU (IP_MAXPACKET - \
60 /* Maximum IPv4 header len */ - \
sizeof(struct udphdr) - \
sizeof(struct vxlan_header) - \
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
#define VXLAN_BASIC_IFCAPS (IFCAP_LINKSTATE | IFCAP_JUMBO_MTU)
#define VXLAN_SO_MC_MAX_GROUPS 32
#define VXLAN_SO_VNI_HASH_SHIFT 6
#define VXLAN_SO_VNI_HASH_SIZE (1 << VXLAN_SO_VNI_HASH_SHIFT)
#define VXLAN_SO_VNI_HASH(_vni) ((_vni) % VXLAN_SO_VNI_HASH_SIZE)
struct vxlan_socket {
struct socket *vxlso_sock;
struct rmlock vxlso_lock;
u_int vxlso_refcnt;
union vxlan_sockaddr vxlso_laddr;
LIST_ENTRY(vxlan_socket) vxlso_entry;
struct vxlan_softc_head vxlso_vni_hash[VXLAN_SO_VNI_HASH_SIZE];
struct vxlan_socket_mc_info vxlso_mc[VXLAN_SO_MC_MAX_GROUPS];
};
#define VXLAN_SO_RLOCK(_vso, _p) rm_rlock(&(_vso)->vxlso_lock, (_p))
#define VXLAN_SO_RUNLOCK(_vso, _p) rm_runlock(&(_vso)->vxlso_lock, (_p))
#define VXLAN_SO_WLOCK(_vso) rm_wlock(&(_vso)->vxlso_lock)
#define VXLAN_SO_WUNLOCK(_vso) rm_wunlock(&(_vso)->vxlso_lock)
#define VXLAN_SO_LOCK_ASSERT(_vso) \
rm_assert(&(_vso)->vxlso_lock, RA_LOCKED)
#define VXLAN_SO_LOCK_WASSERT(_vso) \
rm_assert(&(_vso)->vxlso_lock, RA_WLOCKED)
#define VXLAN_SO_ACQUIRE(_vso) refcount_acquire(&(_vso)->vxlso_refcnt)
#define VXLAN_SO_RELEASE(_vso) refcount_release(&(_vso)->vxlso_refcnt)
struct vxlan_ftable_entry {
LIST_ENTRY(vxlan_ftable_entry) vxlfe_hash;
uint16_t vxlfe_flags;
uint8_t vxlfe_mac[ETHER_ADDR_LEN];
union vxlan_sockaddr vxlfe_raddr;
time_t vxlfe_expire;
};
#define VXLAN_FE_FLAG_DYNAMIC 0x01
#define VXLAN_FE_FLAG_STATIC 0x02
#define VXLAN_FE_IS_DYNAMIC(_fe) \
((_fe)->vxlfe_flags & VXLAN_FE_FLAG_DYNAMIC)
#define VXLAN_SC_FTABLE_SHIFT 9
#define VXLAN_SC_FTABLE_SIZE (1 << VXLAN_SC_FTABLE_SHIFT)
#define VXLAN_SC_FTABLE_MASK (VXLAN_SC_FTABLE_SIZE - 1)
#define VXLAN_SC_FTABLE_HASH(_sc, _mac) \
(vxlan_mac_hash(_sc, _mac) % VXLAN_SC_FTABLE_SIZE)
LIST_HEAD(vxlan_ftable_head, vxlan_ftable_entry);
struct vxlan_statistics {
uint32_t ftable_nospace;
uint32_t ftable_lock_upgrade_failed;
counter_u64_t txcsum;
counter_u64_t tso;
counter_u64_t rxcsum;
};
struct vxlan_softc {
struct ifnet *vxl_ifp;
int vxl_reqcap;
u_int vxl_fibnum;
struct vxlan_socket *vxl_sock;
uint32_t vxl_vni;
union vxlan_sockaddr vxl_src_addr;
union vxlan_sockaddr vxl_dst_addr;
uint32_t vxl_flags;
#define VXLAN_FLAG_INIT 0x0001
#define VXLAN_FLAG_TEARDOWN 0x0002
#define VXLAN_FLAG_LEARN 0x0004
#define VXLAN_FLAG_USER_MTU 0x0008
uint32_t vxl_port_hash_key;
uint16_t vxl_min_port;
uint16_t vxl_max_port;
uint8_t vxl_ttl;
/* Lookup table from MAC address to forwarding entry. */
uint32_t vxl_ftable_cnt;
uint32_t vxl_ftable_max;
uint32_t vxl_ftable_timeout;
uint32_t vxl_ftable_hash_key;
struct vxlan_ftable_head *vxl_ftable;
/* Derived from vxl_dst_addr. */
struct vxlan_ftable_entry vxl_default_fe;
struct ip_moptions *vxl_im4o;
struct ip6_moptions *vxl_im6o;
struct rmlock vxl_lock;
volatile u_int vxl_refcnt;
int vxl_unit;
int vxl_vso_mc_index;
struct vxlan_statistics vxl_stats;
struct sysctl_oid *vxl_sysctl_node;
struct sysctl_ctx_list vxl_sysctl_ctx;
struct callout vxl_callout;
struct ether_addr vxl_hwaddr;
int vxl_mc_ifindex;
struct ifnet *vxl_mc_ifp;
struct ifmedia vxl_media;
char vxl_mc_ifname[IFNAMSIZ];
LIST_ENTRY(vxlan_softc) vxl_entry;
LIST_ENTRY(vxlan_softc) vxl_ifdetach_list;
/* For rate limiting errors on the tx fast path. */
struct timeval err_time;
int err_pps;
};
#define VXLAN_RLOCK(_sc, _p) rm_rlock(&(_sc)->vxl_lock, (_p))
#define VXLAN_RUNLOCK(_sc, _p) rm_runlock(&(_sc)->vxl_lock, (_p))
#define VXLAN_WLOCK(_sc) rm_wlock(&(_sc)->vxl_lock)
#define VXLAN_WUNLOCK(_sc) rm_wunlock(&(_sc)->vxl_lock)
#define VXLAN_LOCK_WOWNED(_sc) rm_wowned(&(_sc)->vxl_lock)
#define VXLAN_LOCK_ASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_LOCKED)
#define VXLAN_LOCK_WASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_WLOCKED)
#define VXLAN_UNLOCK(_sc, _p) do { \
if (VXLAN_LOCK_WOWNED(_sc)) \
VXLAN_WUNLOCK(_sc); \
else \
VXLAN_RUNLOCK(_sc, _p); \
} while (0)
#define VXLAN_ACQUIRE(_sc) refcount_acquire(&(_sc)->vxl_refcnt)
#define VXLAN_RELEASE(_sc) refcount_release(&(_sc)->vxl_refcnt)
#define satoconstsin(sa) ((const struct sockaddr_in *)(sa))
#define satoconstsin6(sa) ((const struct sockaddr_in6 *)(sa))
struct vxlanudphdr {
struct udphdr vxlh_udp;
struct vxlan_header vxlh_hdr;
} __packed;
static int vxlan_ftable_addr_cmp(const uint8_t *, const uint8_t *);
static void vxlan_ftable_init(struct vxlan_softc *);
static void vxlan_ftable_fini(struct vxlan_softc *);
static void vxlan_ftable_flush(struct vxlan_softc *, int);
static void vxlan_ftable_expire(struct vxlan_softc *);
static int vxlan_ftable_update_locked(struct vxlan_softc *,
const union vxlan_sockaddr *, const uint8_t *,
struct rm_priotracker *);
static int vxlan_ftable_learn(struct vxlan_softc *,
const struct sockaddr *, const uint8_t *);
static int vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS);
static struct vxlan_ftable_entry *
vxlan_ftable_entry_alloc(void);
static void vxlan_ftable_entry_free(struct vxlan_ftable_entry *);
static void vxlan_ftable_entry_init(struct vxlan_softc *,
struct vxlan_ftable_entry *, const uint8_t *,
const struct sockaddr *, uint32_t);
static void vxlan_ftable_entry_destroy(struct vxlan_softc *,
struct vxlan_ftable_entry *);
static int vxlan_ftable_entry_insert(struct vxlan_softc *,
struct vxlan_ftable_entry *);
static struct vxlan_ftable_entry *
vxlan_ftable_entry_lookup(struct vxlan_softc *,
const uint8_t *);
static void vxlan_ftable_entry_dump(struct vxlan_ftable_entry *,
struct sbuf *);
static struct vxlan_socket *
vxlan_socket_alloc(const union vxlan_sockaddr *);
static void vxlan_socket_destroy(struct vxlan_socket *);
static void vxlan_socket_release(struct vxlan_socket *);
static struct vxlan_socket *
vxlan_socket_lookup(union vxlan_sockaddr *vxlsa);
static void vxlan_socket_insert(struct vxlan_socket *);
static int vxlan_socket_init(struct vxlan_socket *, struct ifnet *);
static int vxlan_socket_bind(struct vxlan_socket *, struct ifnet *);
static int vxlan_socket_create(struct ifnet *, int,
const union vxlan_sockaddr *, struct vxlan_socket **);
static void vxlan_socket_ifdetach(struct vxlan_socket *,
struct ifnet *, struct vxlan_softc_head *);
static struct vxlan_socket *
vxlan_socket_mc_lookup(const union vxlan_sockaddr *);
static int vxlan_sockaddr_mc_info_match(
const struct vxlan_socket_mc_info *,
const union vxlan_sockaddr *,
const union vxlan_sockaddr *, int);
static int vxlan_socket_mc_join_group(struct vxlan_socket *,
const union vxlan_sockaddr *, const union vxlan_sockaddr *,
int *, union vxlan_sockaddr *);
static int vxlan_socket_mc_leave_group(struct vxlan_socket *,
const union vxlan_sockaddr *,
const union vxlan_sockaddr *, int);
static int vxlan_socket_mc_add_group(struct vxlan_socket *,
const union vxlan_sockaddr *, const union vxlan_sockaddr *,
int, int *);
static void vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *,
int);
static struct vxlan_softc *
vxlan_socket_lookup_softc_locked(struct vxlan_socket *,
uint32_t);
static struct vxlan_softc *
vxlan_socket_lookup_softc(struct vxlan_socket *, uint32_t);
static int vxlan_socket_insert_softc(struct vxlan_socket *,
struct vxlan_softc *);
static void vxlan_socket_remove_softc(struct vxlan_socket *,
struct vxlan_softc *);
static struct ifnet *
vxlan_multicast_if_ref(struct vxlan_softc *, int);
static void vxlan_free_multicast(struct vxlan_softc *);
static int vxlan_setup_multicast_interface(struct vxlan_softc *);
static int vxlan_setup_multicast(struct vxlan_softc *);
static int vxlan_setup_socket(struct vxlan_softc *);
#ifdef INET6
static void vxlan_setup_zero_checksum_port(struct vxlan_softc *);
#endif
static void vxlan_setup_interface_hdrlen(struct vxlan_softc *);
static int vxlan_valid_init_config(struct vxlan_softc *);
static void vxlan_init_wait(struct vxlan_softc *);
static void vxlan_init_complete(struct vxlan_softc *);
static void vxlan_init(void *);
static void vxlan_release(struct vxlan_softc *);
static void vxlan_teardown_wait(struct vxlan_softc *);
static void vxlan_teardown_complete(struct vxlan_softc *);
static void vxlan_teardown_locked(struct vxlan_softc *);
static void vxlan_teardown(struct vxlan_softc *);
static void vxlan_ifdetach(struct vxlan_softc *, struct ifnet *,
struct vxlan_softc_head *);
static void vxlan_timer(void *);
static int vxlan_ctrl_get_config(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_vni(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_local_addr(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_remote_addr(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_local_port(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_remote_port(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_port_range(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_ftable_max(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_multicast_if(struct vxlan_softc * , void *);
static int vxlan_ctrl_set_ttl(struct vxlan_softc *, void *);
static int vxlan_ctrl_set_learn(struct vxlan_softc *, void *);
static int vxlan_ctrl_ftable_entry_add(struct vxlan_softc *, void *);
static int vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *, void *);
static int vxlan_ctrl_flush(struct vxlan_softc *, void *);
static int vxlan_ioctl_drvspec(struct vxlan_softc *,
struct ifdrv *, int);
static int vxlan_ioctl_ifflags(struct vxlan_softc *);
static int vxlan_ioctl(struct ifnet *, u_long, caddr_t);
#if defined(INET) || defined(INET6)
static uint16_t vxlan_pick_source_port(struct vxlan_softc *, struct mbuf *);
static void vxlan_encap_header(struct vxlan_softc *, struct mbuf *,
int, uint16_t, uint16_t);
#endif
static int vxlan_encap4(struct vxlan_softc *,
const union vxlan_sockaddr *, struct mbuf *);
static int vxlan_encap6(struct vxlan_softc *,
const union vxlan_sockaddr *, struct mbuf *);
static int vxlan_transmit(struct ifnet *, struct mbuf *);
static void vxlan_qflush(struct ifnet *);
static bool vxlan_rcv_udp_packet(struct mbuf *, int, struct inpcb *,
const struct sockaddr *, void *);
static int vxlan_input(struct vxlan_socket *, uint32_t, struct mbuf **,
const struct sockaddr *);
static int vxlan_stats_alloc(struct vxlan_softc *);
static void vxlan_stats_free(struct vxlan_softc *);
static void vxlan_set_default_config(struct vxlan_softc *);
static int vxlan_set_user_config(struct vxlan_softc *,
struct ifvxlanparam *);
static int vxlan_set_reqcap(struct vxlan_softc *, struct ifnet *, int);
static void vxlan_set_hwcaps(struct vxlan_softc *);
static int vxlan_clone_create(struct if_clone *, char *, size_t,
struct ifc_data *, struct ifnet **);
static int vxlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
static uint32_t vxlan_mac_hash(struct vxlan_softc *, const uint8_t *);
static int vxlan_media_change(struct ifnet *);
static void vxlan_media_status(struct ifnet *, struct ifmediareq *);
static int vxlan_sockaddr_cmp(const union vxlan_sockaddr *,
const struct sockaddr *);
static void vxlan_sockaddr_copy(union vxlan_sockaddr *,
const struct sockaddr *);
static int vxlan_sockaddr_in_equal(const union vxlan_sockaddr *,
const struct sockaddr *);
static void vxlan_sockaddr_in_copy(union vxlan_sockaddr *,
const struct sockaddr *);
static int vxlan_sockaddr_supported(const union vxlan_sockaddr *, int);
static int vxlan_sockaddr_in_any(const union vxlan_sockaddr *);
static int vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *);
static int vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *);
static int vxlan_can_change_config(struct vxlan_softc *);
static int vxlan_check_vni(uint32_t);
static int vxlan_check_ttl(int);
static int vxlan_check_ftable_timeout(uint32_t);
static int vxlan_check_ftable_max(uint32_t);
static void vxlan_sysctl_setup(struct vxlan_softc *);
static void vxlan_sysctl_destroy(struct vxlan_softc *);
static int vxlan_tunable_int(struct vxlan_softc *, const char *, int);
static void vxlan_ifdetach_event(void *, struct ifnet *);
static void vxlan_load(void);
static void vxlan_unload(void);
static int vxlan_modevent(module_t, int, void *);
static const char vxlan_name[] = "vxlan";
static MALLOC_DEFINE(M_VXLAN, vxlan_name,
"Virtual eXtensible LAN Interface");
static struct if_clone *vxlan_cloner;
static struct mtx vxlan_list_mtx;
#define VXLAN_LIST_LOCK() mtx_lock(&vxlan_list_mtx)
#define VXLAN_LIST_UNLOCK() mtx_unlock(&vxlan_list_mtx)
static LIST_HEAD(, vxlan_socket) vxlan_socket_list;
static eventhandler_tag vxlan_ifdetach_event_tag;
SYSCTL_DECL(_net_link);
SYSCTL_NODE(_net_link, OID_AUTO, vxlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Virtual eXtensible Local Area Network");
static int vxlan_legacy_port = 0;
TUNABLE_INT("net.link.vxlan.legacy_port", &vxlan_legacy_port);
static int vxlan_reuse_port = 0;
TUNABLE_INT("net.link.vxlan.reuse_port", &vxlan_reuse_port);
/*
* This macro controls the default upper limitation on nesting of vxlan
* tunnels. By default it is 3, as the overhead of IPv6 vxlan tunnel is 70
* bytes, this will create at most 210 bytes overhead and the most inner
* tunnel's MTU will be 1290 which will meet IPv6 minimum MTU size 1280.
* Be careful to configure the tunnels when raising the limit. A large
* number of nested tunnels can introduce system crash.
*/
#ifndef MAX_VXLAN_NEST
#define MAX_VXLAN_NEST 3
#endif
static int max_vxlan_nesting = MAX_VXLAN_NEST;
SYSCTL_INT(_net_link_vxlan, OID_AUTO, max_nesting, CTLFLAG_RW,
&max_vxlan_nesting, 0, "Max nested tunnels");
/* Default maximum number of addresses in the forwarding table. */
#ifndef VXLAN_FTABLE_MAX
#define VXLAN_FTABLE_MAX 2000
#endif
/* Timeout (in seconds) of addresses learned in the forwarding table. */
#ifndef VXLAN_FTABLE_TIMEOUT
#define VXLAN_FTABLE_TIMEOUT (20 * 60)
#endif
/*
* Maximum timeout (in seconds) of addresses learned in the forwarding
* table.
*/
#ifndef VXLAN_FTABLE_MAX_TIMEOUT
#define VXLAN_FTABLE_MAX_TIMEOUT (60 * 60 * 24)
#endif
/* Number of seconds between pruning attempts of the forwarding table. */
#ifndef VXLAN_FTABLE_PRUNE
#define VXLAN_FTABLE_PRUNE (5 * 60)
#endif
static int vxlan_ftable_prune_period = VXLAN_FTABLE_PRUNE;
struct vxlan_control {
int (*vxlc_func)(struct vxlan_softc *, void *);
int vxlc_argsize;
int vxlc_flags;
#define VXLAN_CTRL_FLAG_COPYIN 0x01
#define VXLAN_CTRL_FLAG_COPYOUT 0x02
#define VXLAN_CTRL_FLAG_SUSER 0x04
};
static const struct vxlan_control vxlan_control_table[] = {
[VXLAN_CMD_GET_CONFIG] =
{ vxlan_ctrl_get_config, sizeof(struct ifvxlancfg),
VXLAN_CTRL_FLAG_COPYOUT
},
[VXLAN_CMD_SET_VNI] =
{ vxlan_ctrl_set_vni, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_LOCAL_ADDR] =
{ vxlan_ctrl_set_local_addr, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_REMOTE_ADDR] =
{ vxlan_ctrl_set_remote_addr, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_LOCAL_PORT] =
{ vxlan_ctrl_set_local_port, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_REMOTE_PORT] =
{ vxlan_ctrl_set_remote_port, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_PORT_RANGE] =
{ vxlan_ctrl_set_port_range, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_FTABLE_TIMEOUT] =
{ vxlan_ctrl_set_ftable_timeout, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_FTABLE_MAX] =
{ vxlan_ctrl_set_ftable_max, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_MULTICAST_IF] =
{ vxlan_ctrl_set_multicast_if, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_TTL] =
{ vxlan_ctrl_set_ttl, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_SET_LEARN] =
{ vxlan_ctrl_set_learn, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_FTABLE_ENTRY_ADD] =
{ vxlan_ctrl_ftable_entry_add, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_FTABLE_ENTRY_REM] =
{ vxlan_ctrl_ftable_entry_rem, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
[VXLAN_CMD_FLUSH] =
{ vxlan_ctrl_flush, sizeof(struct ifvxlancmd),
VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
},
};
static const int vxlan_control_table_size = nitems(vxlan_control_table);
static int
vxlan_ftable_addr_cmp(const uint8_t *a, const uint8_t *b)
{
int i, d;
for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++)
d = ((int)a[i]) - ((int)b[i]);
return (d);
}
static void
vxlan_ftable_init(struct vxlan_softc *sc)
{
int i;
sc->vxl_ftable = malloc(sizeof(struct vxlan_ftable_head) *
VXLAN_SC_FTABLE_SIZE, M_VXLAN, M_ZERO | M_WAITOK);
for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++)
LIST_INIT(&sc->vxl_ftable[i]);
sc->vxl_ftable_hash_key = arc4random();
}
static void
vxlan_ftable_fini(struct vxlan_softc *sc)
{
int i;
for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
KASSERT(LIST_EMPTY(&sc->vxl_ftable[i]),
("%s: vxlan %p ftable[%d] not empty", __func__, sc, i));
}
MPASS(sc->vxl_ftable_cnt == 0);
free(sc->vxl_ftable, M_VXLAN);
sc->vxl_ftable = NULL;
}
static void
vxlan_ftable_flush(struct vxlan_softc *sc, int all)
{
struct vxlan_ftable_entry *fe, *tfe;
int i;
for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
if (all || VXLAN_FE_IS_DYNAMIC(fe))
vxlan_ftable_entry_destroy(sc, fe);
}
}
}
static void
vxlan_ftable_expire(struct vxlan_softc *sc)
{
struct vxlan_ftable_entry *fe, *tfe;
int i;
VXLAN_LOCK_WASSERT(sc);
for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
if (VXLAN_FE_IS_DYNAMIC(fe) &&
time_uptime >= fe->vxlfe_expire)
vxlan_ftable_entry_destroy(sc, fe);
}
}
}
static int
vxlan_ftable_update_locked(struct vxlan_softc *sc,
const union vxlan_sockaddr *vxlsa, const uint8_t *mac,
struct rm_priotracker *tracker)
{
struct vxlan_ftable_entry *fe;
int error __unused;
VXLAN_LOCK_ASSERT(sc);
again:
/*
* A forwarding entry for this MAC address might already exist. If
* so, update it, otherwise create a new one. We may have to upgrade
* the lock if we have to change or create an entry.
*/
fe = vxlan_ftable_entry_lookup(sc, mac);
if (fe != NULL) {
fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
if (!VXLAN_FE_IS_DYNAMIC(fe) ||
vxlan_sockaddr_in_equal(&fe->vxlfe_raddr, &vxlsa->sa))
return (0);
if (!VXLAN_LOCK_WOWNED(sc)) {
VXLAN_RUNLOCK(sc, tracker);
VXLAN_WLOCK(sc);
sc->vxl_stats.ftable_lock_upgrade_failed++;
goto again;
}
vxlan_sockaddr_in_copy(&fe->vxlfe_raddr, &vxlsa->sa);
return (0);
}
if (!VXLAN_LOCK_WOWNED(sc)) {
VXLAN_RUNLOCK(sc, tracker);
VXLAN_WLOCK(sc);
sc->vxl_stats.ftable_lock_upgrade_failed++;
goto again;
}
if (sc->vxl_ftable_cnt >= sc->vxl_ftable_max) {
sc->vxl_stats.ftable_nospace++;
return (ENOSPC);
}
fe = vxlan_ftable_entry_alloc();
if (fe == NULL)
return (ENOMEM);
vxlan_ftable_entry_init(sc, fe, mac, &vxlsa->sa, VXLAN_FE_FLAG_DYNAMIC);
/* The prior lookup failed, so the insert should not. */
error = vxlan_ftable_entry_insert(sc, fe);
MPASS(error == 0);
return (0);
}
static int
vxlan_ftable_learn(struct vxlan_softc *sc, const struct sockaddr *sa,
const uint8_t *mac)
{
struct rm_priotracker tracker;
union vxlan_sockaddr vxlsa;
int error;
/*
* The source port may be randomly selected by the remote host, so
* use the port of the default destination address.
*/
vxlan_sockaddr_copy(&vxlsa, sa);
vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
error = vxlan_sockaddr_in6_embedscope(&vxlsa);
if (error)
return (error);
}
VXLAN_RLOCK(sc, &tracker);
error = vxlan_ftable_update_locked(sc, &vxlsa, mac, &tracker);
VXLAN_UNLOCK(sc, &tracker);
return (error);
}
static int
vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS)
{
struct rm_priotracker tracker;
struct sbuf sb;
struct vxlan_softc *sc;
struct vxlan_ftable_entry *fe;
size_t size;
int i, error;
/*
* This is mostly intended for debugging during development. It is
* not practical to dump an entire large table this way.
*/
sc = arg1;
size = PAGE_SIZE; /* Calculate later. */
sbuf_new(&sb, NULL, size, SBUF_FIXEDLEN);
sbuf_putc(&sb, '\n');
VXLAN_RLOCK(sc, &tracker);
for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
LIST_FOREACH(fe, &sc->vxl_ftable[i], vxlfe_hash) {
if (sbuf_error(&sb) != 0)
break;
vxlan_ftable_entry_dump(fe, &sb);
}
}
VXLAN_RUNLOCK(sc, &tracker);
if (sbuf_len(&sb) == 1)
sbuf_setpos(&sb, 0);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
sbuf_delete(&sb);
return (error);
}
static struct vxlan_ftable_entry *
vxlan_ftable_entry_alloc(void)
{
struct vxlan_ftable_entry *fe;
fe = malloc(sizeof(*fe), M_VXLAN, M_ZERO | M_NOWAIT);
return (fe);
}
static void
vxlan_ftable_entry_free(struct vxlan_ftable_entry *fe)
{
free(fe, M_VXLAN);
}
static void
vxlan_ftable_entry_init(struct vxlan_softc *sc, struct vxlan_ftable_entry *fe,
const uint8_t *mac, const struct sockaddr *sa, uint32_t flags)
{
fe->vxlfe_flags = flags;
fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
memcpy(fe->vxlfe_mac, mac, ETHER_ADDR_LEN);
vxlan_sockaddr_copy(&fe->vxlfe_raddr, sa);
}
static void
vxlan_ftable_entry_destroy(struct vxlan_softc *sc,
struct vxlan_ftable_entry *fe)
{
sc->vxl_ftable_cnt--;
LIST_REMOVE(fe, vxlfe_hash);
vxlan_ftable_entry_free(fe);
}
static int
vxlan_ftable_entry_insert(struct vxlan_softc *sc,
struct vxlan_ftable_entry *fe)
{
struct vxlan_ftable_entry *lfe;
uint32_t hash;
int dir;
VXLAN_LOCK_WASSERT(sc);
hash = VXLAN_SC_FTABLE_HASH(sc, fe->vxlfe_mac);
lfe = LIST_FIRST(&sc->vxl_ftable[hash]);
if (lfe == NULL) {
LIST_INSERT_HEAD(&sc->vxl_ftable[hash], fe, vxlfe_hash);
goto out;
}
do {
dir = vxlan_ftable_addr_cmp(fe->vxlfe_mac, lfe->vxlfe_mac);
if (dir == 0)
return (EEXIST);
if (dir > 0) {
LIST_INSERT_BEFORE(lfe, fe, vxlfe_hash);
goto out;
} else if (LIST_NEXT(lfe, vxlfe_hash) == NULL) {
LIST_INSERT_AFTER(lfe, fe, vxlfe_hash);
goto out;
} else
lfe = LIST_NEXT(lfe, vxlfe_hash);
} while (lfe != NULL);
out:
sc->vxl_ftable_cnt++;
return (0);
}
static struct vxlan_ftable_entry *
vxlan_ftable_entry_lookup(struct vxlan_softc *sc, const uint8_t *mac)
{
struct vxlan_ftable_entry *fe;
uint32_t hash;
int dir;
VXLAN_LOCK_ASSERT(sc);
hash = VXLAN_SC_FTABLE_HASH(sc, mac);
LIST_FOREACH(fe, &sc->vxl_ftable[hash], vxlfe_hash) {
dir = vxlan_ftable_addr_cmp(mac, fe->vxlfe_mac);
if (dir == 0)
return (fe);
if (dir > 0)
break;
}
return (NULL);
}
static void
vxlan_ftable_entry_dump(struct vxlan_ftable_entry *fe, struct sbuf *sb)
{
char buf[64];
const union vxlan_sockaddr *sa;
const void *addr;
int i, len, af, width;
sa = &fe->vxlfe_raddr;
af = sa->sa.sa_family;
len = sbuf_len(sb);
sbuf_printf(sb, "%c 0x%02X ", VXLAN_FE_IS_DYNAMIC(fe) ? 'D' : 'S',
fe->vxlfe_flags);
for (i = 0; i < ETHER_ADDR_LEN - 1; i++)
sbuf_printf(sb, "%02X:", fe->vxlfe_mac[i]);
sbuf_printf(sb, "%02X ", fe->vxlfe_mac[i]);
if (af == AF_INET) {
addr = &sa->in4.sin_addr;
width = INET_ADDRSTRLEN - 1;
} else {
addr = &sa->in6.sin6_addr;
width = INET6_ADDRSTRLEN - 1;
}
inet_ntop(af, addr, buf, sizeof(buf));
sbuf_printf(sb, "%*s ", width, buf);
sbuf_printf(sb, "%08jd", (intmax_t)fe->vxlfe_expire);
sbuf_putc(sb, '\n');
/* Truncate a partial line. */
if (sbuf_error(sb) != 0)
sbuf_setpos(sb, len);
}
static struct vxlan_socket *
vxlan_socket_alloc(const union vxlan_sockaddr *sa)
{
struct vxlan_socket *vso;
int i;
vso = malloc(sizeof(*vso), M_VXLAN, M_WAITOK | M_ZERO);
rm_init(&vso->vxlso_lock, "vxlansorm");
refcount_init(&vso->vxlso_refcnt, 0);
for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++)
LIST_INIT(&vso->vxlso_vni_hash[i]);
vso->vxlso_laddr = *sa;
return (vso);
}
static void
vxlan_socket_destroy(struct vxlan_socket *vso)
{
struct socket *so;
#ifdef INVARIANTS
int i;
struct vxlan_socket_mc_info *mc;
for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
mc = &vso->vxlso_mc[i];
KASSERT(mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC,
("%s: socket %p mc[%d] still has address",
__func__, vso, i));
}
for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
KASSERT(LIST_EMPTY(&vso->vxlso_vni_hash[i]),
("%s: socket %p vni_hash[%d] not empty",
__func__, vso, i));
}
#endif
so = vso->vxlso_sock;
if (so != NULL) {
vso->vxlso_sock = NULL;
soclose(so);
}
rm_destroy(&vso->vxlso_lock);
free(vso, M_VXLAN);
}
static void
vxlan_socket_release(struct vxlan_socket *vso)
{
int destroy;
VXLAN_LIST_LOCK();
destroy = VXLAN_SO_RELEASE(vso);
if (destroy != 0)
LIST_REMOVE(vso, vxlso_entry);
VXLAN_LIST_UNLOCK();
if (destroy != 0)
vxlan_socket_destroy(vso);
}
static struct vxlan_socket *
vxlan_socket_lookup(union vxlan_sockaddr *vxlsa)
{
struct vxlan_socket *vso;
VXLAN_LIST_LOCK();
LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) {
if (vxlan_sockaddr_cmp(&vso->vxlso_laddr, &vxlsa->sa) == 0) {
VXLAN_SO_ACQUIRE(vso);
break;
}
}
VXLAN_LIST_UNLOCK();
return (vso);
}
static void
vxlan_socket_insert(struct vxlan_socket *vso)
{
VXLAN_LIST_LOCK();
VXLAN_SO_ACQUIRE(vso);
LIST_INSERT_HEAD(&vxlan_socket_list, vso, vxlso_entry);
VXLAN_LIST_UNLOCK();
}
static int
vxlan_socket_init(struct vxlan_socket *vso, struct ifnet *ifp)
{
struct thread *td;
int error;
td = curthread;
error = socreate(vso->vxlso_laddr.sa.sa_family, &vso->vxlso_sock,
SOCK_DGRAM, IPPROTO_UDP, td->td_ucred, td);
if (error) {
if_printf(ifp, "cannot create socket: %d\n", error);
return (error);
}
error = udp_set_kernel_tunneling(vso->vxlso_sock,
vxlan_rcv_udp_packet, NULL, vso);
if (error) {
if_printf(ifp, "cannot set tunneling function: %d\n", error);
return (error);
}
if (vxlan_reuse_port != 0) {
struct sockopt sopt;
int val = 1;
bzero(&sopt, sizeof(sopt));
sopt.sopt_dir = SOPT_SET;
sopt.sopt_level = IPPROTO_IP;
sopt.sopt_name = SO_REUSEPORT;
sopt.sopt_val = &val;
sopt.sopt_valsize = sizeof(val);
error = sosetopt(vso->vxlso_sock, &sopt);
if (error) {
if_printf(ifp,
"cannot set REUSEADDR socket opt: %d\n", error);
return (error);
}
}
return (0);
}
static int
vxlan_socket_bind(struct vxlan_socket *vso, struct ifnet *ifp)
{
union vxlan_sockaddr laddr;
struct thread *td;
int error;
td = curthread;
laddr = vso->vxlso_laddr;
error = sobind(vso->vxlso_sock, &laddr.sa, td);
if (error) {
if (error != EADDRINUSE)
if_printf(ifp, "cannot bind socket: %d\n", error);
return (error);
}
return (0);
}
static int
vxlan_socket_create(struct ifnet *ifp, int multicast,
const union vxlan_sockaddr *saddr, struct vxlan_socket **vsop)
{
union vxlan_sockaddr laddr;
struct vxlan_socket *vso;
int error;
laddr = *saddr;
/*
* If this socket will be multicast, then only the local port
* must be specified when binding.
*/
if (multicast != 0) {
if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
laddr.in4.sin_addr.s_addr = INADDR_ANY;
#ifdef INET6
else
laddr.in6.sin6_addr = in6addr_any;
#endif
}
vso = vxlan_socket_alloc(&laddr);
if (vso == NULL)
return (ENOMEM);
error = vxlan_socket_init(vso, ifp);
if (error)
goto fail;
error = vxlan_socket_bind(vso, ifp);
if (error)
goto fail;
/*
* There is a small window between the bind completing and
* inserting the socket, so that a concurrent create may fail.
* Let's not worry about that for now.
*/
vxlan_socket_insert(vso);
*vsop = vso;
return (0);
fail:
vxlan_socket_destroy(vso);
return (error);
}
static void
vxlan_socket_ifdetach(struct vxlan_socket *vso, struct ifnet *ifp,
struct vxlan_softc_head *list)
{
struct rm_priotracker tracker;
struct vxlan_softc *sc;
int i;
VXLAN_SO_RLOCK(vso, &tracker);
for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
LIST_FOREACH(sc, &vso->vxlso_vni_hash[i], vxl_entry)
vxlan_ifdetach(sc, ifp, list);
}
VXLAN_SO_RUNLOCK(vso, &tracker);
}
static struct vxlan_socket *
vxlan_socket_mc_lookup(const union vxlan_sockaddr *vxlsa)
{
union vxlan_sockaddr laddr;
struct vxlan_socket *vso;
laddr = *vxlsa;
if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
laddr.in4.sin_addr.s_addr = INADDR_ANY;
#ifdef INET6
else
laddr.in6.sin6_addr = in6addr_any;
#endif
vso = vxlan_socket_lookup(&laddr);
return (vso);
}
static int
vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info *mc,
const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
int ifidx)
{
if (!vxlan_sockaddr_in_any(local) &&
!vxlan_sockaddr_in_equal(&mc->vxlsomc_saddr, &local->sa))
return (0);
if (!vxlan_sockaddr_in_equal(&mc->vxlsomc_gaddr, &group->sa))
return (0);
if (ifidx != 0 && ifidx != mc->vxlsomc_ifidx)
return (0);
return (1);
}
static int
vxlan_socket_mc_join_group(struct vxlan_socket *vso,
const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
int *ifidx, union vxlan_sockaddr *source)
{
struct sockopt sopt;
int error;
*source = *local;
if (VXLAN_SOCKADDR_IS_IPV4(group)) {
struct ip_mreq mreq;
mreq.imr_multiaddr = group->in4.sin_addr;
mreq.imr_interface = local->in4.sin_addr;
bzero(&sopt, sizeof(sopt));
sopt.sopt_dir = SOPT_SET;
sopt.sopt_level = IPPROTO_IP;
sopt.sopt_name = IP_ADD_MEMBERSHIP;
sopt.sopt_val = &mreq;
sopt.sopt_valsize = sizeof(mreq);
error = sosetopt(vso->vxlso_sock, &sopt);
if (error)
return (error);
/*
* BMV: Ideally, there would be a formal way for us to get
* the local interface that was selected based on the
* imr_interface address. We could then update *ifidx so
* vxlan_sockaddr_mc_info_match() would return a match for
* later creates that explicitly set the multicast interface.
*
* If we really need to, we can of course look in the INP's
* membership list:
* sotoinpcb(vso->vxlso_sock)->inp_moptions->
* imo_head[]->imf_inm->inm_ifp
* similarly to imo_match_group().
*/
source->in4.sin_addr = local->in4.sin_addr;
} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
struct ipv6_mreq mreq;
mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
mreq.ipv6mr_interface = *ifidx;
bzero(&sopt, sizeof(sopt));
sopt.sopt_dir = SOPT_SET;
sopt.sopt_level = IPPROTO_IPV6;
sopt.sopt_name = IPV6_JOIN_GROUP;
sopt.sopt_val = &mreq;
sopt.sopt_valsize = sizeof(mreq);
error = sosetopt(vso->vxlso_sock, &sopt);
if (error)
return (error);
/*
* BMV: As with IPv4, we would really like to know what
* interface in6p_lookup_mcast_ifp() selected.
*/
} else
error = EAFNOSUPPORT;
return (error);
}
static int
vxlan_socket_mc_leave_group(struct vxlan_socket *vso,
const union vxlan_sockaddr *group, const union vxlan_sockaddr *source,
int ifidx)
{
struct sockopt sopt;
int error;
bzero(&sopt, sizeof(sopt));
sopt.sopt_dir = SOPT_SET;
if (VXLAN_SOCKADDR_IS_IPV4(group)) {
struct ip_mreq mreq;
mreq.imr_multiaddr = group->in4.sin_addr;
mreq.imr_interface = source->in4.sin_addr;
sopt.sopt_level = IPPROTO_IP;
sopt.sopt_name = IP_DROP_MEMBERSHIP;
sopt.sopt_val = &mreq;
sopt.sopt_valsize = sizeof(mreq);
error = sosetopt(vso->vxlso_sock, &sopt);
} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
struct ipv6_mreq mreq;
mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
mreq.ipv6mr_interface = ifidx;
sopt.sopt_level = IPPROTO_IPV6;
sopt.sopt_name = IPV6_LEAVE_GROUP;
sopt.sopt_val = &mreq;
sopt.sopt_valsize = sizeof(mreq);
error = sosetopt(vso->vxlso_sock, &sopt);
} else
error = EAFNOSUPPORT;
return (error);
}
static int
vxlan_socket_mc_add_group(struct vxlan_socket *vso,
const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
int ifidx, int *idx)
{
union vxlan_sockaddr source;
struct vxlan_socket_mc_info *mc;
int i, empty, error;
/*
* Within a socket, the same multicast group may be used by multiple
* interfaces, each with a different network identifier. But a socket
* may only join a multicast group once, so keep track of the users
* here.
*/
VXLAN_SO_WLOCK(vso);
for (empty = 0, i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
mc = &vso->vxlso_mc[i];
if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
empty++;
continue;
}
if (vxlan_sockaddr_mc_info_match(mc, group, local, ifidx))
goto out;
}
VXLAN_SO_WUNLOCK(vso);
if (empty == 0)
return (ENOSPC);
error = vxlan_socket_mc_join_group(vso, group, local, &ifidx, &source);
if (error)
return (error);
VXLAN_SO_WLOCK(vso);
for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
mc = &vso->vxlso_mc[i];
if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
vxlan_sockaddr_copy(&mc->vxlsomc_gaddr, &group->sa);
vxlan_sockaddr_copy(&mc->vxlsomc_saddr, &source.sa);
mc->vxlsomc_ifidx = ifidx;
goto out;
}
}
VXLAN_SO_WUNLOCK(vso);
error = vxlan_socket_mc_leave_group(vso, group, &source, ifidx);
MPASS(error == 0);
return (ENOSPC);
out:
mc->vxlsomc_users++;
VXLAN_SO_WUNLOCK(vso);
*idx = i;
return (0);
}
static void
vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *vso, int idx)
{
union vxlan_sockaddr group, source;
struct vxlan_socket_mc_info *mc;
int ifidx, leave;
KASSERT(idx >= 0 && idx < VXLAN_SO_MC_MAX_GROUPS,
("%s: vso %p idx %d out of bounds", __func__, vso, idx));
leave = 0;
mc = &vso->vxlso_mc[idx];
VXLAN_SO_WLOCK(vso);
mc->vxlsomc_users--;
if (mc->vxlsomc_users == 0) {
group = mc->vxlsomc_gaddr;
source = mc->vxlsomc_saddr;
ifidx = mc->vxlsomc_ifidx;
bzero(mc, sizeof(*mc));
leave = 1;
}
VXLAN_SO_WUNLOCK(vso);
if (leave != 0) {
/*
* Our socket's membership in this group may have already
* been removed if we joined through an interface that's
* been detached.
*/
vxlan_socket_mc_leave_group(vso, &group, &source, ifidx);
}
}
static struct vxlan_softc *
vxlan_socket_lookup_softc_locked(struct vxlan_socket *vso, uint32_t vni)
{
struct vxlan_softc *sc;
uint32_t hash;
VXLAN_SO_LOCK_ASSERT(vso);
hash = VXLAN_SO_VNI_HASH(vni);
LIST_FOREACH(sc, &vso->vxlso_vni_hash[hash], vxl_entry) {
if (sc->vxl_vni == vni) {
VXLAN_ACQUIRE(sc);
break;
}
}
return (sc);
}
static struct vxlan_softc *
vxlan_socket_lookup_softc(struct vxlan_socket *vso, uint32_t vni)
{
struct rm_priotracker tracker;
struct vxlan_softc *sc;
VXLAN_SO_RLOCK(vso, &tracker);
sc = vxlan_socket_lookup_softc_locked(vso, vni);
VXLAN_SO_RUNLOCK(vso, &tracker);
return (sc);
}
static int
vxlan_socket_insert_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
{
struct vxlan_softc *tsc;
uint32_t vni, hash;
vni = sc->vxl_vni;
hash = VXLAN_SO_VNI_HASH(vni);
VXLAN_SO_WLOCK(vso);
tsc = vxlan_socket_lookup_softc_locked(vso, vni);
if (tsc != NULL) {
VXLAN_SO_WUNLOCK(vso);
vxlan_release(tsc);
return (EEXIST);
}
VXLAN_ACQUIRE(sc);
LIST_INSERT_HEAD(&vso->vxlso_vni_hash[hash], sc, vxl_entry);
VXLAN_SO_WUNLOCK(vso);
return (0);
}
static void
vxlan_socket_remove_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
{
VXLAN_SO_WLOCK(vso);
LIST_REMOVE(sc, vxl_entry);
VXLAN_SO_WUNLOCK(vso);
vxlan_release(sc);
}
static struct ifnet *
vxlan_multicast_if_ref(struct vxlan_softc *sc, int ipv4)
{
struct ifnet *ifp;
VXLAN_LOCK_ASSERT(sc);
if (ipv4 && sc->vxl_im4o != NULL)
ifp = sc->vxl_im4o->imo_multicast_ifp;
else if (!ipv4 && sc->vxl_im6o != NULL)
ifp = sc->vxl_im6o->im6o_multicast_ifp;
else
ifp = NULL;
if (ifp != NULL)
if_ref(ifp);
return (ifp);
}
static void
vxlan_free_multicast(struct vxlan_softc *sc)
{
if (sc->vxl_mc_ifp != NULL) {
if_rele(sc->vxl_mc_ifp);
sc->vxl_mc_ifp = NULL;
sc->vxl_mc_ifindex = 0;
}
if (sc->vxl_im4o != NULL) {
free(sc->vxl_im4o, M_VXLAN);
sc->vxl_im4o = NULL;
}
if (sc->vxl_im6o != NULL) {
free(sc->vxl_im6o, M_VXLAN);
sc->vxl_im6o = NULL;
}
}
static int
vxlan_setup_multicast_interface(struct vxlan_softc *sc)
{
struct ifnet *ifp;
ifp = ifunit_ref(sc->vxl_mc_ifname);
if (ifp == NULL) {
if_printf(sc->vxl_ifp, "multicast interface %s does "
"not exist\n", sc->vxl_mc_ifname);
return (ENOENT);
}
if ((ifp->if_flags & IFF_MULTICAST) == 0) {
if_printf(sc->vxl_ifp, "interface %s does not support "
"multicast\n", sc->vxl_mc_ifname);
if_rele(ifp);
return (ENOTSUP);
}
sc->vxl_mc_ifp = ifp;
sc->vxl_mc_ifindex = ifp->if_index;
return (0);
}
static int
vxlan_setup_multicast(struct vxlan_softc *sc)
{
const union vxlan_sockaddr *group;
int error;
group = &sc->vxl_dst_addr;
error = 0;
if (sc->vxl_mc_ifname[0] != '\0') {
error = vxlan_setup_multicast_interface(sc);
if (error)
return (error);
}
/*
* Initialize an multicast options structure that is sufficiently
* populated for use in the respective IP output routine. This
* structure is typically stored in the socket, but our sockets
* may be shared among multiple interfaces.
*/
if (VXLAN_SOCKADDR_IS_IPV4(group)) {
sc->vxl_im4o = malloc(sizeof(struct ip_moptions), M_VXLAN,
M_ZERO | M_WAITOK);
sc->vxl_im4o->imo_multicast_ifp = sc->vxl_mc_ifp;
sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
sc->vxl_im4o->imo_multicast_vif = -1;
} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
sc->vxl_im6o = malloc(sizeof(struct ip6_moptions), M_VXLAN,
M_ZERO | M_WAITOK);
sc->vxl_im6o->im6o_multicast_ifp = sc->vxl_mc_ifp;
sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
}
return (error);
}
static int
vxlan_setup_socket(struct vxlan_softc *sc)
{
struct vxlan_socket *vso;
struct ifnet *ifp;
union vxlan_sockaddr *saddr, *daddr;
int multicast, error;
vso = NULL;
ifp = sc->vxl_ifp;
saddr = &sc->vxl_src_addr;
daddr = &sc->vxl_dst_addr;
multicast = vxlan_sockaddr_in_multicast(daddr);
MPASS(multicast != -1);
sc->vxl_vso_mc_index = -1;
/*
* Try to create the socket. If that fails, attempt to use an
* existing socket.
*/
error = vxlan_socket_create(ifp, multicast, saddr, &vso);
if (error) {
if (multicast != 0)
vso = vxlan_socket_mc_lookup(saddr);
else
vso = vxlan_socket_lookup(saddr);
if (vso == NULL) {
if_printf(ifp, "cannot create socket (error: %d), "
"and no existing socket found\n", error);
goto out;
}
}
if (multicast != 0) {
error = vxlan_setup_multicast(sc);
if (error)
goto out;
error = vxlan_socket_mc_add_group(vso, daddr, saddr,
sc->vxl_mc_ifindex, &sc->vxl_vso_mc_index);
if (error)
goto out;
}
sc->vxl_sock = vso;
error = vxlan_socket_insert_softc(vso, sc);
if (error) {
sc->vxl_sock = NULL;
if_printf(ifp, "network identifier %d already exists in "
"this socket\n", sc->vxl_vni);
goto out;
}
return (0);
out:
if (vso != NULL) {
if (sc->vxl_vso_mc_index != -1) {
vxlan_socket_mc_release_group_by_idx(vso,
sc->vxl_vso_mc_index);
sc->vxl_vso_mc_index = -1;
}
if (multicast != 0)
vxlan_free_multicast(sc);
vxlan_socket_release(vso);
}
return (error);
}
#ifdef INET6
static void
vxlan_setup_zero_checksum_port(struct vxlan_softc *sc)
{
if (!VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_src_addr))
return;
MPASS(sc->vxl_src_addr.in6.sin6_port != 0);
MPASS(sc->vxl_dst_addr.in6.sin6_port != 0);
if (sc->vxl_src_addr.in6.sin6_port != sc->vxl_dst_addr.in6.sin6_port) {
if_printf(sc->vxl_ifp, "port %d in src address does not match "
"port %d in dst address, rfc6935_port (%d) not updated.\n",
ntohs(sc->vxl_src_addr.in6.sin6_port),
ntohs(sc->vxl_dst_addr.in6.sin6_port),
V_zero_checksum_port);
return;
}
if (V_zero_checksum_port != 0) {
if (V_zero_checksum_port !=
ntohs(sc->vxl_src_addr.in6.sin6_port)) {
if_printf(sc->vxl_ifp, "rfc6935_port is already set to "
"%d, cannot set it to %d.\n", V_zero_checksum_port,
ntohs(sc->vxl_src_addr.in6.sin6_port));
}
return;
}
V_zero_checksum_port = ntohs(sc->vxl_src_addr.in6.sin6_port);
if_printf(sc->vxl_ifp, "rfc6935_port set to %d\n",
V_zero_checksum_port);
}
#endif
static void
vxlan_setup_interface_hdrlen(struct vxlan_softc *sc)
{
struct ifnet *ifp;
VXLAN_LOCK_WASSERT(sc);
ifp = sc->vxl_ifp;
ifp->if_hdrlen = ETHER_HDR_LEN + sizeof(struct vxlanudphdr);
if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr) != 0)
ifp->if_hdrlen += sizeof(struct ip);
else if (VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_dst_addr) != 0)
ifp->if_hdrlen += sizeof(struct ip6_hdr);
if ((sc->vxl_flags & VXLAN_FLAG_USER_MTU) == 0)
ifp->if_mtu = ETHERMTU - ifp->if_hdrlen;
}
static int
vxlan_valid_init_config(struct vxlan_softc *sc)
{
const char *reason;
if (vxlan_check_vni(sc->vxl_vni) != 0) {
reason = "invalid virtual network identifier specified";
goto fail;
}
if (vxlan_sockaddr_supported(&sc->vxl_src_addr, 1) == 0) {
reason = "source address type is not supported";
goto fail;
}
if (vxlan_sockaddr_supported(&sc->vxl_dst_addr, 0) == 0) {
reason = "destination address type is not supported";
goto fail;
}
if (vxlan_sockaddr_in_any(&sc->vxl_dst_addr) != 0) {
reason = "no valid destination address specified";
goto fail;
}
if (vxlan_sockaddr_in_multicast(&sc->vxl_dst_addr) == 0 &&
sc->vxl_mc_ifname[0] != '\0') {
reason = "can only specify interface with a group address";
goto fail;
}
if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_src_addr) ^
VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr)) {
reason = "source and destination address must both "
"be either IPv4 or IPv6";
goto fail;
}
}
if (sc->vxl_src_addr.in4.sin_port == 0) {
reason = "local port not specified";
goto fail;
}
if (sc->vxl_dst_addr.in4.sin_port == 0) {
reason = "remote port not specified";
goto fail;
}
return (0);
fail:
if_printf(sc->vxl_ifp, "cannot initialize interface: %s\n", reason);
return (EINVAL);
}
static void
vxlan_init_wait(struct vxlan_softc *sc)
{
VXLAN_LOCK_WASSERT(sc);
while (sc->vxl_flags & VXLAN_FLAG_INIT)
rm_sleep(sc, &sc->vxl_lock, 0, "vxlint", hz);
}
static void
vxlan_init_complete(struct vxlan_softc *sc)
{
VXLAN_WLOCK(sc);
sc->vxl_flags &= ~VXLAN_FLAG_INIT;
wakeup(sc);
VXLAN_WUNLOCK(sc);
}
static void
vxlan_init(void *xsc)
{
static const uint8_t empty_mac[ETHER_ADDR_LEN];
struct vxlan_softc *sc;
struct ifnet *ifp;
sc = xsc;
ifp = sc->vxl_ifp;
sx_xlock(&vxlan_sx);
VXLAN_WLOCK(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
VXLAN_WUNLOCK(sc);
sx_xunlock(&vxlan_sx);
return;
}
sc->vxl_flags |= VXLAN_FLAG_INIT;
VXLAN_WUNLOCK(sc);
if (vxlan_valid_init_config(sc) != 0)
goto out;
if (vxlan_setup_socket(sc) != 0)
goto out;
#ifdef INET6
vxlan_setup_zero_checksum_port(sc);
#endif
/* Initialize the default forwarding entry. */
vxlan_ftable_entry_init(sc, &sc->vxl_default_fe, empty_mac,
&sc->vxl_dst_addr.sa, VXLAN_FE_FLAG_STATIC);
VXLAN_WLOCK(sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
callout_reset(&sc->vxl_callout, vxlan_ftable_prune_period * hz,
vxlan_timer, sc);
VXLAN_WUNLOCK(sc);
if_link_state_change(ifp, LINK_STATE_UP);
EVENTHANDLER_INVOKE(vxlan_start, ifp, sc->vxl_src_addr.in4.sin_family,
ntohs(sc->vxl_src_addr.in4.sin_port));
out:
vxlan_init_complete(sc);
sx_xunlock(&vxlan_sx);
}
static void
vxlan_release(struct vxlan_softc *sc)
{
/*
* The softc may be destroyed as soon as we release our reference,
* so we cannot serialize the wakeup with the softc lock. We use a
* timeout in our sleeps so a missed wakeup is unfortunate but not
* fatal.
*/
if (VXLAN_RELEASE(sc) != 0)
wakeup(sc);
}
static void
vxlan_teardown_wait(struct vxlan_softc *sc)
{
VXLAN_LOCK_WASSERT(sc);
while (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
rm_sleep(sc, &sc->vxl_lock, 0, "vxltrn", hz);
}
static void
vxlan_teardown_complete(struct vxlan_softc *sc)
{
VXLAN_WLOCK(sc);
sc->vxl_flags &= ~VXLAN_FLAG_TEARDOWN;
wakeup(sc);
VXLAN_WUNLOCK(sc);
}
static void
vxlan_teardown_locked(struct vxlan_softc *sc)
{
struct ifnet *ifp;
struct vxlan_socket *vso;
sx_assert(&vxlan_sx, SA_XLOCKED);
VXLAN_LOCK_WASSERT(sc);
MPASS(sc->vxl_flags & VXLAN_FLAG_TEARDOWN);
ifp = sc->vxl_ifp;
ifp->if_flags &= ~IFF_UP;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
callout_stop(&sc->vxl_callout);
vso = sc->vxl_sock;
sc->vxl_sock = NULL;
VXLAN_WUNLOCK(sc);
if_link_state_change(ifp, LINK_STATE_DOWN);
EVENTHANDLER_INVOKE(vxlan_stop, ifp, sc->vxl_src_addr.in4.sin_family,
ntohs(sc->vxl_src_addr.in4.sin_port));
if (vso != NULL) {
vxlan_socket_remove_softc(vso, sc);
if (sc->vxl_vso_mc_index != -1) {
vxlan_socket_mc_release_group_by_idx(vso,
sc->vxl_vso_mc_index);
sc->vxl_vso_mc_index = -1;
}
}
VXLAN_WLOCK(sc);
while (sc->vxl_refcnt != 0)
rm_sleep(sc, &sc->vxl_lock, 0, "vxldrn", hz);
VXLAN_WUNLOCK(sc);
callout_drain(&sc->vxl_callout);
vxlan_free_multicast(sc);
if (vso != NULL)
vxlan_socket_release(vso);
vxlan_teardown_complete(sc);
}
static void
vxlan_teardown(struct vxlan_softc *sc)
{
sx_xlock(&vxlan_sx);
VXLAN_WLOCK(sc);
if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) {
vxlan_teardown_wait(sc);
VXLAN_WUNLOCK(sc);
sx_xunlock(&vxlan_sx);
return;
}
sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
vxlan_teardown_locked(sc);
sx_xunlock(&vxlan_sx);
}
static void
vxlan_ifdetach(struct vxlan_softc *sc, struct ifnet *ifp,
struct vxlan_softc_head *list)
{
VXLAN_WLOCK(sc);
if (sc->vxl_mc_ifp != ifp)
goto out;
if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
goto out;
sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
LIST_INSERT_HEAD(list, sc, vxl_ifdetach_list);
out:
VXLAN_WUNLOCK(sc);
}
static void
vxlan_timer(void *xsc)
{
struct vxlan_softc *sc;
sc = xsc;
VXLAN_LOCK_WASSERT(sc);
vxlan_ftable_expire(sc);
callout_schedule(&sc->vxl_callout, vxlan_ftable_prune_period * hz);
}
static int
vxlan_ioctl_ifflags(struct vxlan_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vxl_ifp;
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
vxlan_init(sc);
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
vxlan_teardown(sc);
}
return (0);
}
static int
vxlan_ctrl_get_config(struct vxlan_softc *sc, void *arg)
{
struct rm_priotracker tracker;
struct ifvxlancfg *cfg;
cfg = arg;
bzero(cfg, sizeof(*cfg));
VXLAN_RLOCK(sc, &tracker);
cfg->vxlc_vni = sc->vxl_vni;
memcpy(&cfg->vxlc_local_sa, &sc->vxl_src_addr,
sizeof(union vxlan_sockaddr));
memcpy(&cfg->vxlc_remote_sa, &sc->vxl_dst_addr,
sizeof(union vxlan_sockaddr));
cfg->vxlc_mc_ifindex = sc->vxl_mc_ifindex;
cfg->vxlc_ftable_cnt = sc->vxl_ftable_cnt;
cfg->vxlc_ftable_max = sc->vxl_ftable_max;
cfg->vxlc_ftable_timeout = sc->vxl_ftable_timeout;
cfg->vxlc_port_min = sc->vxl_min_port;
cfg->vxlc_port_max = sc->vxl_max_port;
cfg->vxlc_learn = (sc->vxl_flags & VXLAN_FLAG_LEARN) != 0;
cfg->vxlc_ttl = sc->vxl_ttl;
VXLAN_RUNLOCK(sc, &tracker);
#ifdef INET6
if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_local_sa))
sa6_recoverscope(&cfg->vxlc_local_sa.in6);
if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_remote_sa))
sa6_recoverscope(&cfg->vxlc_remote_sa.in6);
#endif
return (0);
}
static int
vxlan_ctrl_set_vni(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
int error;
cmd = arg;
if (vxlan_check_vni(cmd->vxlcmd_vni) != 0)
return (EINVAL);
VXLAN_WLOCK(sc);
if (vxlan_can_change_config(sc)) {
sc->vxl_vni = cmd->vxlcmd_vni;
error = 0;
} else
error = EBUSY;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_local_addr(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
union vxlan_sockaddr *vxlsa;
int error;
cmd = arg;
vxlsa = &cmd->vxlcmd_sa;
if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
return (EINVAL);
if (vxlan_sockaddr_in_multicast(vxlsa) != 0)
return (EINVAL);
if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
error = vxlan_sockaddr_in6_embedscope(vxlsa);
if (error)
return (error);
}
VXLAN_WLOCK(sc);
if (vxlan_can_change_config(sc)) {
vxlan_sockaddr_in_copy(&sc->vxl_src_addr, &vxlsa->sa);
vxlan_set_hwcaps(sc);
error = 0;
} else
error = EBUSY;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_remote_addr(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
union vxlan_sockaddr *vxlsa;
int error;
cmd = arg;
vxlsa = &cmd->vxlcmd_sa;
if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
return (EINVAL);
if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
error = vxlan_sockaddr_in6_embedscope(vxlsa);
if (error)
return (error);
}
VXLAN_WLOCK(sc);
if (vxlan_can_change_config(sc)) {
vxlan_sockaddr_in_copy(&sc->vxl_dst_addr, &vxlsa->sa);
vxlan_setup_interface_hdrlen(sc);
error = 0;
} else
error = EBUSY;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_local_port(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
int error;
cmd = arg;
if (cmd->vxlcmd_port == 0)
return (EINVAL);
VXLAN_WLOCK(sc);
if (vxlan_can_change_config(sc)) {
sc->vxl_src_addr.in4.sin_port = htons(cmd->vxlcmd_port);
error = 0;
} else
error = EBUSY;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_remote_port(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
int error;
cmd = arg;
if (cmd->vxlcmd_port == 0)
return (EINVAL);
VXLAN_WLOCK(sc);
if (vxlan_can_change_config(sc)) {
sc->vxl_dst_addr.in4.sin_port = htons(cmd->vxlcmd_port);
error = 0;
} else
error = EBUSY;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_port_range(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
uint16_t min, max;
int error;
cmd = arg;
min = cmd->vxlcmd_port_min;
max = cmd->vxlcmd_port_max;
if (max < min)
return (EINVAL);
VXLAN_WLOCK(sc);
if (vxlan_can_change_config(sc)) {
sc->vxl_min_port = min;
sc->vxl_max_port = max;
error = 0;
} else
error = EBUSY;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
int error;
cmd = arg;
VXLAN_WLOCK(sc);
if (vxlan_check_ftable_timeout(cmd->vxlcmd_ftable_timeout) == 0) {
sc->vxl_ftable_timeout = cmd->vxlcmd_ftable_timeout;
error = 0;
} else
error = EINVAL;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_ftable_max(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
int error;
cmd = arg;
VXLAN_WLOCK(sc);
if (vxlan_check_ftable_max(cmd->vxlcmd_ftable_max) == 0) {
sc->vxl_ftable_max = cmd->vxlcmd_ftable_max;
error = 0;
} else
error = EINVAL;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc, void *arg)
{
struct ifvxlancmd *cmd;
int error;
cmd = arg;
VXLAN_WLOCK(sc);
if (vxlan_can_change_config(sc)) {
strlcpy(sc->vxl_mc_ifname, cmd->vxlcmd_ifname, IFNAMSIZ);
vxlan_set_hwcaps(sc);
error = 0;
} else
error = EBUSY;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_ttl(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
int error;
cmd = arg;
VXLAN_WLOCK(sc);
if (vxlan_check_ttl(cmd->vxlcmd_ttl) == 0) {
sc->vxl_ttl = cmd->vxlcmd_ttl;
if (sc->vxl_im4o != NULL)
sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
if (sc->vxl_im6o != NULL)
sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
error = 0;
} else
error = EINVAL;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_set_learn(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
cmd = arg;
VXLAN_WLOCK(sc);
if (cmd->vxlcmd_flags & VXLAN_CMD_FLAG_LEARN)
sc->vxl_flags |= VXLAN_FLAG_LEARN;
else
sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
VXLAN_WUNLOCK(sc);
return (0);
}
static int
vxlan_ctrl_ftable_entry_add(struct vxlan_softc *sc, void *arg)
{
union vxlan_sockaddr vxlsa;
struct ifvxlancmd *cmd;
struct vxlan_ftable_entry *fe;
int error;
cmd = arg;
vxlsa = cmd->vxlcmd_sa;
if (!VXLAN_SOCKADDR_IS_IPV46(&vxlsa))
return (EINVAL);
if (vxlan_sockaddr_in_any(&vxlsa) != 0)
return (EINVAL);
if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
return (EINVAL);
/* BMV: We could support both IPv4 and IPv6 later. */
if (vxlsa.sa.sa_family != sc->vxl_dst_addr.sa.sa_family)
return (EAFNOSUPPORT);
if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
error = vxlan_sockaddr_in6_embedscope(&vxlsa);
if (error)
return (error);
}
fe = vxlan_ftable_entry_alloc();
if (fe == NULL)
return (ENOMEM);
if (vxlsa.in4.sin_port == 0)
vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
vxlan_ftable_entry_init(sc, fe, cmd->vxlcmd_mac, &vxlsa.sa,
VXLAN_FE_FLAG_STATIC);
VXLAN_WLOCK(sc);
error = vxlan_ftable_entry_insert(sc, fe);
VXLAN_WUNLOCK(sc);
if (error)
vxlan_ftable_entry_free(fe);
return (error);
}
static int
vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
struct vxlan_ftable_entry *fe;
int error;
cmd = arg;
VXLAN_WLOCK(sc);
fe = vxlan_ftable_entry_lookup(sc, cmd->vxlcmd_mac);
if (fe != NULL) {
vxlan_ftable_entry_destroy(sc, fe);
error = 0;
} else
error = ENOENT;
VXLAN_WUNLOCK(sc);
return (error);
}
static int
vxlan_ctrl_flush(struct vxlan_softc *sc, void *arg)
{
struct ifvxlancmd *cmd;
int all;
cmd = arg;
all = cmd->vxlcmd_flags & VXLAN_CMD_FLAG_FLUSH_ALL;
VXLAN_WLOCK(sc);
vxlan_ftable_flush(sc, all);
VXLAN_WUNLOCK(sc);
return (0);
}
static int
vxlan_ioctl_drvspec(struct vxlan_softc *sc, struct ifdrv *ifd, int get)
{
const struct vxlan_control *vc;
union {
struct ifvxlancfg cfg;
struct ifvxlancmd cmd;
} args;
int out, error;
if (ifd->ifd_cmd >= vxlan_control_table_size)
return (EINVAL);
bzero(&args, sizeof(args));
vc = &vxlan_control_table[ifd->ifd_cmd];
out = (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) != 0;
if ((get != 0 && out == 0) || (get == 0 && out != 0))
return (EINVAL);
if (vc->vxlc_flags & VXLAN_CTRL_FLAG_SUSER) {
error = priv_check(curthread, PRIV_NET_VXLAN);
if (error)
return (error);
}
if (ifd->ifd_len != vc->vxlc_argsize ||
ifd->ifd_len > sizeof(args))
return (EINVAL);
if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYIN) {
error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
if (error)
return (error);
}
error = vc->vxlc_func(sc, &args);
if (error)
return (error);
if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) {
error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
if (error)
return (error);
}
return (0);
}
static int
vxlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct rm_priotracker tracker;
struct vxlan_softc *sc;
struct ifreq *ifr;
struct ifdrv *ifd;
int error;
sc = ifp->if_softc;
ifr = (struct ifreq *) data;
ifd = (struct ifdrv *) data;
error = 0;
switch (cmd) {
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCGDRVSPEC:
case SIOCSDRVSPEC:
error = vxlan_ioctl_drvspec(sc, ifd, cmd == SIOCGDRVSPEC);
break;
case SIOCSIFFLAGS:
error = vxlan_ioctl_ifflags(sc);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->vxl_media, cmd);
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VXLAN_MAX_MTU) {
error = EINVAL;
} else {
VXLAN_WLOCK(sc);
ifp->if_mtu = ifr->ifr_mtu;
sc->vxl_flags |= VXLAN_FLAG_USER_MTU;
VXLAN_WUNLOCK(sc);
}
break;
case SIOCSIFCAP:
VXLAN_WLOCK(sc);
error = vxlan_set_reqcap(sc, ifp, ifr->ifr_reqcap);
if (error == 0)
vxlan_set_hwcaps(sc);
VXLAN_WUNLOCK(sc);
break;
case SIOCGTUNFIB:
VXLAN_RLOCK(sc, &tracker);
ifr->ifr_fib = sc->vxl_fibnum;
VXLAN_RUNLOCK(sc, &tracker);
break;
case SIOCSTUNFIB:
if ((error = priv_check(curthread, PRIV_NET_VXLAN)) != 0)
break;
if (ifr->ifr_fib >= rt_numfibs)
error = EINVAL;
else {
VXLAN_WLOCK(sc);
sc->vxl_fibnum = ifr->ifr_fib;
VXLAN_WUNLOCK(sc);
}
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
#if defined(INET) || defined(INET6)
static uint16_t
vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m)
{
int range;
uint32_t hash;
range = sc->vxl_max_port - sc->vxl_min_port + 1;
if (M_HASHTYPE_ISHASH(m))
hash = m->m_pkthdr.flowid;
else
hash = jenkins_hash(m->m_data, ETHER_HDR_LEN,
sc->vxl_port_hash_key);
return (sc->vxl_min_port + (hash % range));
}
static void
vxlan_encap_header(struct vxlan_softc *sc, struct mbuf *m, int ipoff,
uint16_t srcport, uint16_t dstport)
{
struct vxlanudphdr *hdr;
struct udphdr *udph;
struct vxlan_header *vxh;
int len;
len = m->m_pkthdr.len - ipoff;
MPASS(len >= sizeof(struct vxlanudphdr));
hdr = mtodo(m, ipoff);
udph = &hdr->vxlh_udp;
udph->uh_sport = srcport;
udph->uh_dport = dstport;
udph->uh_ulen = htons(len);
udph->uh_sum = 0;
vxh = &hdr->vxlh_hdr;
vxh->vxlh_flags = htonl(VXLAN_HDR_FLAGS_VALID_VNI);
vxh->vxlh_vni = htonl(sc->vxl_vni << VXLAN_HDR_VNI_SHIFT);
}
#endif
#if defined(INET6) || defined(INET)
/*
* Return the CSUM_INNER_* equivalent of CSUM_* caps.
*/
static uint32_t
csum_flags_to_inner_flags(uint32_t csum_flags_in, const uint32_t encap)
{
uint32_t csum_flags = encap;
const uint32_t v4 = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP;
/*
* csum_flags can request either v4 or v6 offload but not both.
* tcp_output always sets CSUM_TSO (both CSUM_IP_TSO and CSUM_IP6_TSO)
* so those bits are no good to detect the IP version. Other bits are
* always set with CSUM_TSO and we use those to figure out the IP
* version.
*/
if (csum_flags_in & v4) {
if (csum_flags_in & CSUM_IP)
csum_flags |= CSUM_INNER_IP;
if (csum_flags_in & CSUM_IP_UDP)
csum_flags |= CSUM_INNER_IP_UDP;
if (csum_flags_in & CSUM_IP_TCP)
csum_flags |= CSUM_INNER_IP_TCP;
if (csum_flags_in & CSUM_IP_TSO)
csum_flags |= CSUM_INNER_IP_TSO;
} else {
#ifdef INVARIANTS
const uint32_t v6 = CSUM_IP6_UDP | CSUM_IP6_TCP;
MPASS((csum_flags_in & v6) != 0);
#endif
if (csum_flags_in & CSUM_IP6_UDP)
csum_flags |= CSUM_INNER_IP6_UDP;
if (csum_flags_in & CSUM_IP6_TCP)
csum_flags |= CSUM_INNER_IP6_TCP;
if (csum_flags_in & CSUM_IP6_TSO)
csum_flags |= CSUM_INNER_IP6_TSO;
}
return (csum_flags);
}
#endif
static int
vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
struct mbuf *m)
{
#ifdef INET
struct ifnet *ifp;
struct ip *ip;
struct in_addr srcaddr, dstaddr;
uint16_t srcport, dstport;
int plen, mcast, error;
struct route route, *ro;
struct sockaddr_in *sin;
uint32_t csum_flags;
NET_EPOCH_ASSERT();
ifp = sc->vxl_ifp;
srcaddr = sc->vxl_src_addr.in4.sin_addr;
srcport = vxlan_pick_source_port(sc, m);
dstaddr = fvxlsa->in4.sin_addr;
dstport = fvxlsa->in4.sin_port;
plen = m->m_pkthdr.len;
M_PREPEND(m, sizeof(struct ip) + sizeof(struct vxlanudphdr),
M_NOWAIT);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
ip = mtod(m, struct ip *);
ip->ip_tos = 0;
ip->ip_len = htons(m->m_pkthdr.len);
ip->ip_off = 0;
ip->ip_ttl = sc->vxl_ttl;
ip->ip_p = IPPROTO_UDP;
ip->ip_sum = 0;
ip->ip_src = srcaddr;
ip->ip_dst = dstaddr;
vxlan_encap_header(sc, m, sizeof(struct ip), srcport, dstport);
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
m->m_flags &= ~(M_MCAST | M_BCAST);
m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
if (m->m_pkthdr.csum_flags != 0) {
/*
* HW checksum (L3 and/or L4) or TSO has been requested. Look
* up the ifnet for the outbound route and verify that the
* outbound ifnet can perform the requested operation on the
* inner frame.
*/
bzero(&route, sizeof(route));
ro = &route;
sin = (struct sockaddr_in *)&ro->ro_dst;
sin->sin_family = AF_INET;
sin->sin_len = sizeof(*sin);
sin->sin_addr = ip->ip_dst;
ro->ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_NONE,
0);
if (ro->ro_nh == NULL) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (EHOSTUNREACH);
}
csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
CSUM_ENCAP_VXLAN);
if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
csum_flags) {
if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
if_printf(ifp, "interface %s is missing hwcaps "
"0x%08x, csum_flags 0x%08x -> 0x%08x, "
"hwassist 0x%08x\n", nh_ifp->if_xname,
csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
m->m_pkthdr.csum_flags, csum_flags,
(uint32_t)nh_ifp->if_hwassist);
}
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENXIO);
}
m->m_pkthdr.csum_flags = csum_flags;
if (csum_flags &
(CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
counter_u64_add(sc->vxl_stats.txcsum, 1);
if (csum_flags & CSUM_INNER_TSO)
counter_u64_add(sc->vxl_stats.tso, 1);
}
} else
ro = NULL;
error = ip_output(m, NULL, ro, 0, sc->vxl_im4o, NULL);
if (error == 0) {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
if (mcast != 0)
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
} else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (error);
#else
m_freem(m);
return (ENOTSUP);
#endif
}
static int
vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
struct mbuf *m)
{
#ifdef INET6
struct ifnet *ifp;
struct ip6_hdr *ip6;
const struct in6_addr *srcaddr, *dstaddr;
uint16_t srcport, dstport;
int plen, mcast, error;
struct route_in6 route, *ro;
struct sockaddr_in6 *sin6;
uint32_t csum_flags;
NET_EPOCH_ASSERT();
ifp = sc->vxl_ifp;
srcaddr = &sc->vxl_src_addr.in6.sin6_addr;
srcport = vxlan_pick_source_port(sc, m);
dstaddr = &fvxlsa->in6.sin6_addr;
dstport = fvxlsa->in6.sin6_port;
plen = m->m_pkthdr.len;
M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct vxlanudphdr),
M_NOWAIT);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
ip6 = mtod(m, struct ip6_hdr *);
ip6->ip6_flow = 0; /* BMV: Keep in forwarding entry? */
ip6->ip6_vfc = IPV6_VERSION;
ip6->ip6_plen = 0;
ip6->ip6_nxt = IPPROTO_UDP;
ip6->ip6_hlim = sc->vxl_ttl;
ip6->ip6_src = *srcaddr;
ip6->ip6_dst = *dstaddr;
vxlan_encap_header(sc, m, sizeof(struct ip6_hdr), srcport, dstport);
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
m->m_flags &= ~(M_MCAST | M_BCAST);
ro = NULL;
m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
if (m->m_pkthdr.csum_flags != 0) {
/*
* HW checksum (L3 and/or L4) or TSO has been requested. Look
* up the ifnet for the outbound route and verify that the
* outbound ifnet can perform the requested operation on the
* inner frame.
*/
bzero(&route, sizeof(route));
ro = &route;
sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
sin6->sin6_family = AF_INET6;
sin6->sin6_len = sizeof(*sin6);
sin6->sin6_addr = ip6->ip6_dst;
ro->ro_nh = fib6_lookup(M_GETFIB(m), &ip6->ip6_dst, 0,
NHR_NONE, 0);
if (ro->ro_nh == NULL) {
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (EHOSTUNREACH);
}
csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
CSUM_ENCAP_VXLAN);
if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
csum_flags) {
if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
if_printf(ifp, "interface %s is missing hwcaps "
"0x%08x, csum_flags 0x%08x -> 0x%08x, "
"hwassist 0x%08x\n", nh_ifp->if_xname,
csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
m->m_pkthdr.csum_flags, csum_flags,
(uint32_t)nh_ifp->if_hwassist);
}
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENXIO);
}
m->m_pkthdr.csum_flags = csum_flags;
if (csum_flags &
(CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
counter_u64_add(sc->vxl_stats.txcsum, 1);
if (csum_flags & CSUM_INNER_TSO)
counter_u64_add(sc->vxl_stats.tso, 1);
}
} else if (ntohs(dstport) != V_zero_checksum_port) {
struct udphdr *hdr = mtodo(m, sizeof(struct ip6_hdr));
hdr->uh_sum = in6_cksum_pseudo(ip6,
m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0);
m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
}
error = ip6_output(m, NULL, ro, 0, sc->vxl_im6o, NULL, NULL);
if (error == 0) {
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
if (mcast != 0)
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
} else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (error);
#else
m_freem(m);
return (ENOTSUP);
#endif
}
#define MTAG_VXLAN_LOOP 0x7876706c /* vxlp */
static int
vxlan_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct rm_priotracker tracker;
union vxlan_sockaddr vxlsa;
struct vxlan_softc *sc;
struct vxlan_ftable_entry *fe;
struct ifnet *mcifp;
struct ether_header *eh;
int ipv4, error;
sc = ifp->if_softc;
eh = mtod(m, struct ether_header *);
fe = NULL;
mcifp = NULL;
ETHER_BPF_MTAP(ifp, m);
VXLAN_RLOCK(sc, &tracker);
M_SETFIB(m, sc->vxl_fibnum);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VXLAN_RUNLOCK(sc, &tracker);
m_freem(m);
return (ENETDOWN);
}
if (__predict_false(if_tunnel_check_nesting(ifp, m, MTAG_VXLAN_LOOP,
max_vxlan_nesting) != 0)) {
VXLAN_RUNLOCK(sc, &tracker);
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ELOOP);
}
if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
fe = vxlan_ftable_entry_lookup(sc, eh->ether_dhost);
if (fe == NULL)
fe = &sc->vxl_default_fe;
vxlan_sockaddr_copy(&vxlsa, &fe->vxlfe_raddr.sa);
ipv4 = VXLAN_SOCKADDR_IS_IPV4(&vxlsa) != 0;
if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
mcifp = vxlan_multicast_if_ref(sc, ipv4);
VXLAN_ACQUIRE(sc);
VXLAN_RUNLOCK(sc, &tracker);
if (ipv4 != 0)
error = vxlan_encap4(sc, &vxlsa, m);
else
error = vxlan_encap6(sc, &vxlsa, m);
vxlan_release(sc);
if (mcifp != NULL)
if_rele(mcifp);
return (error);
}
static void
vxlan_qflush(struct ifnet *ifp __unused)
{
}
static bool
vxlan_rcv_udp_packet(struct mbuf *m, int offset, struct inpcb *inpcb,
const struct sockaddr *srcsa, void *xvso)
{
struct vxlan_socket *vso;
struct vxlan_header *vxh, vxlanhdr;
uint32_t vni;
int error __unused;
M_ASSERTPKTHDR(m);
vso = xvso;
offset += sizeof(struct udphdr);
if (m->m_pkthdr.len < offset + sizeof(struct vxlan_header))
goto out;
if (__predict_false(m->m_len < offset + sizeof(struct vxlan_header))) {
m_copydata(m, offset, sizeof(struct vxlan_header),
(caddr_t) &vxlanhdr);
vxh = &vxlanhdr;
} else
vxh = mtodo(m, offset);
/*
* Drop if there is a reserved bit set in either the flags or VNI
* fields of the header. This goes against the specification, but
* a bit set may indicate an unsupported new feature. This matches
* the behavior of the Linux implementation.
*/
if (vxh->vxlh_flags != htonl(VXLAN_HDR_FLAGS_VALID_VNI) ||
vxh->vxlh_vni & ~VXLAN_VNI_MASK)
goto out;
vni = ntohl(vxh->vxlh_vni) >> VXLAN_HDR_VNI_SHIFT;
/* Adjust to the start of the inner Ethernet frame. */
m_adj_decap(m, offset + sizeof(struct vxlan_header));
error = vxlan_input(vso, vni, &m, srcsa);
MPASS(error != 0 || m == NULL);
out:
if (m != NULL)
m_freem(m);
return (true);
}
static int
vxlan_input(struct vxlan_socket *vso, uint32_t vni, struct mbuf **m0,
const struct sockaddr *sa)
{
struct vxlan_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
struct ether_header *eh;
int error;
m = *m0;
if (m->m_pkthdr.len < ETHER_HDR_LEN)
return (EINVAL);
sc = vxlan_socket_lookup_softc(vso, vni);
if (sc == NULL)
return (ENOENT);
ifp = sc->vxl_ifp;
if (m->m_len < ETHER_HDR_LEN &&
(m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
*m0 = NULL;
error = ENOBUFS;
goto out;
}
eh = mtod(m, struct ether_header *);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
error = ENETDOWN;
goto out;
} else if (ifp == m->m_pkthdr.rcvif) {
/* XXX Does not catch more complex loops. */
error = EDEADLK;
goto out;
}
if (sc->vxl_flags & VXLAN_FLAG_LEARN)
vxlan_ftable_learn(sc, sa, eh->ether_shost);
m_clrprotoflags(m);
m->m_pkthdr.rcvif = ifp;
M_SETFIB(m, ifp->if_fib);
if (((ifp->if_capenable & IFCAP_RXCSUM &&
m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) ||
(ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
!(m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)))) {
uint32_t csum_flags = 0;
if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)
csum_flags |= CSUM_L3_CALC;
if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_VALID)
csum_flags |= CSUM_L3_VALID;
if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_CALC)
csum_flags |= CSUM_L4_CALC;
if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_VALID)
csum_flags |= CSUM_L4_VALID;
m->m_pkthdr.csum_flags = csum_flags;
counter_u64_add(sc->vxl_stats.rxcsum, 1);
} else {
/* clear everything */
m->m_pkthdr.csum_flags = 0;
m->m_pkthdr.csum_data = 0;
}
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
(*ifp->if_input)(ifp, m);
*m0 = NULL;
error = 0;
out:
vxlan_release(sc);
return (error);
}
static int
vxlan_stats_alloc(struct vxlan_softc *sc)
{
struct vxlan_statistics *stats = &sc->vxl_stats;
stats->txcsum = counter_u64_alloc(M_WAITOK);
if (stats->txcsum == NULL)
goto failed;
stats->tso = counter_u64_alloc(M_WAITOK);
if (stats->tso == NULL)
goto failed;
stats->rxcsum = counter_u64_alloc(M_WAITOK);
if (stats->rxcsum == NULL)
goto failed;
return (0);
failed:
vxlan_stats_free(sc);
return (ENOMEM);
}
static void
vxlan_stats_free(struct vxlan_softc *sc)
{
struct vxlan_statistics *stats = &sc->vxl_stats;
if (stats->txcsum != NULL) {
counter_u64_free(stats->txcsum);
stats->txcsum = NULL;
}
if (stats->tso != NULL) {
counter_u64_free(stats->tso);
stats->tso = NULL;
}
if (stats->rxcsum != NULL) {
counter_u64_free(stats->rxcsum);
stats->rxcsum = NULL;
}
}
static void
vxlan_set_default_config(struct vxlan_softc *sc)
{
sc->vxl_flags |= VXLAN_FLAG_LEARN;
sc->vxl_vni = VXLAN_VNI_MAX;
sc->vxl_ttl = IPDEFTTL;
if (!vxlan_tunable_int(sc, "legacy_port", vxlan_legacy_port)) {
sc->vxl_src_addr.in4.sin_port = htons(VXLAN_PORT);
sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_PORT);
} else {
sc->vxl_src_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
}
sc->vxl_min_port = V_ipport_firstauto;
sc->vxl_max_port = V_ipport_lastauto;
sc->vxl_ftable_max = VXLAN_FTABLE_MAX;
sc->vxl_ftable_timeout = VXLAN_FTABLE_TIMEOUT;
}
static int
vxlan_set_user_config(struct vxlan_softc *sc, struct ifvxlanparam *vxlp)
{
#ifndef INET
if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR4 |
VXLAN_PARAM_WITH_REMOTE_ADDR4))
return (EAFNOSUPPORT);
#endif
#ifndef INET6
if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR6 |
VXLAN_PARAM_WITH_REMOTE_ADDR6))
return (EAFNOSUPPORT);
#else
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
int error = vxlan_sockaddr_in6_embedscope(&vxlp->vxlp_local_sa);
if (error)
return (error);
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
int error = vxlan_sockaddr_in6_embedscope(
&vxlp->vxlp_remote_sa);
if (error)
return (error);
}
#endif
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_VNI) {
if (vxlan_check_vni(vxlp->vxlp_vni) == 0)
sc->vxl_vni = vxlp->vxlp_vni;
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR4) {
sc->vxl_src_addr.in4.sin_len = sizeof(struct sockaddr_in);
sc->vxl_src_addr.in4.sin_family = AF_INET;
sc->vxl_src_addr.in4.sin_addr =
vxlp->vxlp_local_sa.in4.sin_addr;
} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
sc->vxl_src_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
sc->vxl_src_addr.in6.sin6_family = AF_INET6;
sc->vxl_src_addr.in6.sin6_addr =
vxlp->vxlp_local_sa.in6.sin6_addr;
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR4) {
sc->vxl_dst_addr.in4.sin_len = sizeof(struct sockaddr_in);
sc->vxl_dst_addr.in4.sin_family = AF_INET;
sc->vxl_dst_addr.in4.sin_addr =
vxlp->vxlp_remote_sa.in4.sin_addr;
} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
sc->vxl_dst_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
sc->vxl_dst_addr.in6.sin6_family = AF_INET6;
sc->vxl_dst_addr.in6.sin6_addr =
vxlp->vxlp_remote_sa.in6.sin6_addr;
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_PORT)
sc->vxl_src_addr.in4.sin_port = htons(vxlp->vxlp_local_port);
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_PORT)
sc->vxl_dst_addr.in4.sin_port = htons(vxlp->vxlp_remote_port);
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_PORT_RANGE) {
if (vxlp->vxlp_min_port <= vxlp->vxlp_max_port) {
sc->vxl_min_port = vxlp->vxlp_min_port;
sc->vxl_max_port = vxlp->vxlp_max_port;
}
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_MULTICAST_IF)
strlcpy(sc->vxl_mc_ifname, vxlp->vxlp_mc_ifname, IFNAMSIZ);
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_TIMEOUT) {
if (vxlan_check_ftable_timeout(vxlp->vxlp_ftable_timeout) == 0)
sc->vxl_ftable_timeout = vxlp->vxlp_ftable_timeout;
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_MAX) {
if (vxlan_check_ftable_max(vxlp->vxlp_ftable_max) == 0)
sc->vxl_ftable_max = vxlp->vxlp_ftable_max;
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_TTL) {
if (vxlan_check_ttl(vxlp->vxlp_ttl) == 0)
sc->vxl_ttl = vxlp->vxlp_ttl;
}
if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LEARN) {
if (vxlp->vxlp_learn == 0)
sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
}
return (0);
}
static int
vxlan_set_reqcap(struct vxlan_softc *sc, struct ifnet *ifp, int reqcap)
{
int mask = reqcap ^ ifp->if_capenable;
/* Disable TSO if tx checksums are disabled. */
if (mask & IFCAP_TXCSUM && !(reqcap & IFCAP_TXCSUM) &&
reqcap & IFCAP_TSO4) {
reqcap &= ~IFCAP_TSO4;
if_printf(ifp, "tso4 disabled due to -txcsum.\n");
}
if (mask & IFCAP_TXCSUM_IPV6 && !(reqcap & IFCAP_TXCSUM_IPV6) &&
reqcap & IFCAP_TSO6) {
reqcap &= ~IFCAP_TSO6;
if_printf(ifp, "tso6 disabled due to -txcsum6.\n");
}
/* Do not enable TSO if tx checksums are disabled. */
if (mask & IFCAP_TSO4 && reqcap & IFCAP_TSO4 &&
!(reqcap & IFCAP_TXCSUM)) {
if_printf(ifp, "enable txcsum first.\n");
return (EAGAIN);
}
if (mask & IFCAP_TSO6 && reqcap & IFCAP_TSO6 &&
!(reqcap & IFCAP_TXCSUM_IPV6)) {
if_printf(ifp, "enable txcsum6 first.\n");
return (EAGAIN);
}
sc->vxl_reqcap = reqcap;
return (0);
}
/*
* A VXLAN interface inherits the capabilities of the vxlandev or the interface
* hosting the vxlanlocal address.
*/
static void
vxlan_set_hwcaps(struct vxlan_softc *sc)
{
struct epoch_tracker et;
struct ifnet *p;
struct ifaddr *ifa;
u_long hwa;
int cap, ena;
bool rel;
struct ifnet *ifp = sc->vxl_ifp;
/* reset caps */
ifp->if_capabilities &= VXLAN_BASIC_IFCAPS;
ifp->if_capenable &= VXLAN_BASIC_IFCAPS;
ifp->if_hwassist = 0;
NET_EPOCH_ENTER(et);
CURVNET_SET(ifp->if_vnet);
rel = false;
p = NULL;
if (sc->vxl_mc_ifname[0] != '\0') {
rel = true;
p = ifunit_ref(sc->vxl_mc_ifname);
} else if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
if (sc->vxl_src_addr.sa.sa_family == AF_INET) {
struct sockaddr_in in4 = sc->vxl_src_addr.in4;
in4.sin_port = 0;
ifa = ifa_ifwithaddr((struct sockaddr *)&in4);
if (ifa != NULL)
p = ifa->ifa_ifp;
} else if (sc->vxl_src_addr.sa.sa_family == AF_INET6) {
struct sockaddr_in6 in6 = sc->vxl_src_addr.in6;
in6.sin6_port = 0;
ifa = ifa_ifwithaddr((struct sockaddr *)&in6);
if (ifa != NULL)
p = ifa->ifa_ifp;
}
}
if (p == NULL)
goto done;
cap = ena = hwa = 0;
/* checksum offload */
if (p->if_capabilities & IFCAP_VXLAN_HWCSUM)
cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
if (p->if_capenable & IFCAP_VXLAN_HWCSUM) {
ena |= sc->vxl_reqcap & p->if_capenable &
(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
if (ena & IFCAP_TXCSUM) {
if (p->if_hwassist & CSUM_INNER_IP)
hwa |= CSUM_IP;
if (p->if_hwassist & CSUM_INNER_IP_UDP)
hwa |= CSUM_IP_UDP;
if (p->if_hwassist & CSUM_INNER_IP_TCP)
hwa |= CSUM_IP_TCP;
}
if (ena & IFCAP_TXCSUM_IPV6) {
if (p->if_hwassist & CSUM_INNER_IP6_UDP)
hwa |= CSUM_IP6_UDP;
if (p->if_hwassist & CSUM_INNER_IP6_TCP)
hwa |= CSUM_IP6_TCP;
}
}
/* hardware TSO */
if (p->if_capabilities & IFCAP_VXLAN_HWTSO) {
cap |= p->if_capabilities & IFCAP_TSO;
if (p->if_hw_tsomax > IP_MAXPACKET - ifp->if_hdrlen)
ifp->if_hw_tsomax = IP_MAXPACKET - ifp->if_hdrlen;
else
ifp->if_hw_tsomax = p->if_hw_tsomax;
/* XXX: tsomaxsegcount decrement is cxgbe specific */
ifp->if_hw_tsomaxsegcount = p->if_hw_tsomaxsegcount - 1;
ifp->if_hw_tsomaxsegsize = p->if_hw_tsomaxsegsize;
}
if (p->if_capenable & IFCAP_VXLAN_HWTSO) {
ena |= sc->vxl_reqcap & p->if_capenable & IFCAP_TSO;
if (ena & IFCAP_TSO) {
if (p->if_hwassist & CSUM_INNER_IP_TSO)
hwa |= CSUM_IP_TSO;
if (p->if_hwassist & CSUM_INNER_IP6_TSO)
hwa |= CSUM_IP6_TSO;
}
}
ifp->if_capabilities |= cap;
ifp->if_capenable |= ena;
ifp->if_hwassist |= hwa;
if (rel)
if_rele(p);
done:
CURVNET_RESTORE();
NET_EPOCH_EXIT(et);
}
static int
vxlan_clone_create(struct if_clone *ifc, char *name, size_t len,
struct ifc_data *ifd, struct ifnet **ifpp)
{
struct vxlan_softc *sc;
struct ifnet *ifp;
struct ifvxlanparam vxlp;
int error;
sc = malloc(sizeof(struct vxlan_softc), M_VXLAN, M_WAITOK | M_ZERO);
sc->vxl_unit = ifd->unit;
sc->vxl_fibnum = curthread->td_proc->p_fibnum;
vxlan_set_default_config(sc);
error = vxlan_stats_alloc(sc);
if (error != 0)
goto fail;
if (ifd->params != NULL) {
error = ifc_copyin(ifd, &vxlp, sizeof(vxlp));
if (error)
goto fail;
error = vxlan_set_user_config(sc, &vxlp);
if (error)
goto fail;
}
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- error = ENOSPC;
- goto fail;
- }
-
sc->vxl_ifp = ifp;
rm_init(&sc->vxl_lock, "vxlanrm");
callout_init_rw(&sc->vxl_callout, &sc->vxl_lock, 0);
sc->vxl_port_hash_key = arc4random();
vxlan_ftable_init(sc);
vxlan_sysctl_setup(sc);
ifp->if_softc = sc;
if_initname(ifp, vxlan_name, ifd->unit);
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_init = vxlan_init;
ifp->if_ioctl = vxlan_ioctl;
ifp->if_transmit = vxlan_transmit;
ifp->if_qflush = vxlan_qflush;
ifp->if_capabilities = VXLAN_BASIC_IFCAPS;
ifp->if_capenable = VXLAN_BASIC_IFCAPS;
sc->vxl_reqcap = -1;
vxlan_set_hwcaps(sc);
ifmedia_init(&sc->vxl_media, 0, vxlan_media_change, vxlan_media_status);
ifmedia_add(&sc->vxl_media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->vxl_media, IFM_ETHER | IFM_AUTO);
ether_gen_addr(ifp, &sc->vxl_hwaddr);
ether_ifattach(ifp, sc->vxl_hwaddr.octet);
ifp->if_baudrate = 0;
VXLAN_WLOCK(sc);
vxlan_setup_interface_hdrlen(sc);
VXLAN_WUNLOCK(sc);
*ifpp = ifp;
return (0);
fail:
free(sc, M_VXLAN);
return (error);
}
static int
vxlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
struct vxlan_softc *sc;
sc = ifp->if_softc;
vxlan_teardown(sc);
vxlan_ftable_flush(sc, 1);
ether_ifdetach(ifp);
if_free(ifp);
ifmedia_removeall(&sc->vxl_media);
vxlan_ftable_fini(sc);
vxlan_sysctl_destroy(sc);
rm_destroy(&sc->vxl_lock);
vxlan_stats_free(sc);
free(sc, M_VXLAN);
return (0);
}
/* BMV: Taken from if_bridge. */
static uint32_t
vxlan_mac_hash(struct vxlan_softc *sc, const uint8_t *addr)
{
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->vxl_ftable_hash_key;
b += addr[5] << 8;
b += addr[4];
a += addr[3] << 24;
a += addr[2] << 16;
a += addr[1] << 8;
a += addr[0];
/*
* The following hash function is adapted from "Hash Functions" by Bob Jenkins
* ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
*/
#define mix(a, b, c) \
do { \
a -= b; a -= c; a ^= (c >> 13); \
b -= c; b -= a; b ^= (a << 8); \
c -= a; c -= b; c ^= (b >> 13); \
a -= b; a -= c; a ^= (c >> 12); \
b -= c; b -= a; b ^= (a << 16); \
c -= a; c -= b; c ^= (b >> 5); \
a -= b; a -= c; a ^= (c >> 3); \
b -= c; b -= a; b ^= (a << 10); \
c -= a; c -= b; c ^= (b >> 15); \
} while (0)
mix(a, b, c);
#undef mix
return (c);
}
static int
vxlan_media_change(struct ifnet *ifp)
{
/* Ignore. */
return (0);
}
static void
vxlan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
ifmr->ifm_active = IFM_ETHER | IFM_FDX;
}
static int
vxlan_sockaddr_cmp(const union vxlan_sockaddr *vxladdr,
const struct sockaddr *sa)
{
return (bcmp(&vxladdr->sa, sa, vxladdr->sa.sa_len));
}
static void
vxlan_sockaddr_copy(union vxlan_sockaddr *vxladdr,
const struct sockaddr *sa)
{
MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
bzero(vxladdr, sizeof(*vxladdr));
if (sa->sa_family == AF_INET) {
vxladdr->in4 = *satoconstsin(sa);
vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
} else if (sa->sa_family == AF_INET6) {
vxladdr->in6 = *satoconstsin6(sa);
vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
}
}
static int
vxlan_sockaddr_in_equal(const union vxlan_sockaddr *vxladdr,
const struct sockaddr *sa)
{
int equal;
if (sa->sa_family == AF_INET) {
const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
equal = in4->s_addr == vxladdr->in4.sin_addr.s_addr;
} else if (sa->sa_family == AF_INET6) {
const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
equal = IN6_ARE_ADDR_EQUAL(in6, &vxladdr->in6.sin6_addr);
} else
equal = 0;
return (equal);
}
static void
vxlan_sockaddr_in_copy(union vxlan_sockaddr *vxladdr,
const struct sockaddr *sa)
{
MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
if (sa->sa_family == AF_INET) {
const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
vxladdr->in4.sin_family = AF_INET;
vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
vxladdr->in4.sin_addr = *in4;
} else if (sa->sa_family == AF_INET6) {
const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
vxladdr->in6.sin6_family = AF_INET6;
vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
vxladdr->in6.sin6_addr = *in6;
}
}
static int
vxlan_sockaddr_supported(const union vxlan_sockaddr *vxladdr, int unspec)
{
const struct sockaddr *sa;
int supported;
sa = &vxladdr->sa;
supported = 0;
if (sa->sa_family == AF_UNSPEC && unspec != 0) {
supported = 1;
} else if (sa->sa_family == AF_INET) {
#ifdef INET
supported = 1;
#endif
} else if (sa->sa_family == AF_INET6) {
#ifdef INET6
supported = 1;
#endif
}
return (supported);
}
static int
vxlan_sockaddr_in_any(const union vxlan_sockaddr *vxladdr)
{
const struct sockaddr *sa;
int any;
sa = &vxladdr->sa;
if (sa->sa_family == AF_INET) {
const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
any = in4->s_addr == INADDR_ANY;
} else if (sa->sa_family == AF_INET6) {
const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
any = IN6_IS_ADDR_UNSPECIFIED(in6);
} else
any = -1;
return (any);
}
static int
vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *vxladdr)
{
const struct sockaddr *sa;
int mc;
sa = &vxladdr->sa;
if (sa->sa_family == AF_INET) {
const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
mc = IN_MULTICAST(ntohl(in4->s_addr));
} else if (sa->sa_family == AF_INET6) {
const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
mc = IN6_IS_ADDR_MULTICAST(in6);
} else
mc = -1;
return (mc);
}
static int
vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *vxladdr)
{
int error;
MPASS(VXLAN_SOCKADDR_IS_IPV6(vxladdr));
#ifdef INET6
error = sa6_embedscope(&vxladdr->in6, V_ip6_use_defzone);
#else
error = EAFNOSUPPORT;
#endif
return (error);
}
static int
vxlan_can_change_config(struct vxlan_softc *sc)
{
struct ifnet *ifp;
ifp = sc->vxl_ifp;
VXLAN_LOCK_ASSERT(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return (0);
if (sc->vxl_flags & (VXLAN_FLAG_INIT | VXLAN_FLAG_TEARDOWN))
return (0);
return (1);
}
static int
vxlan_check_vni(uint32_t vni)
{
return (vni >= VXLAN_VNI_MAX);
}
static int
vxlan_check_ttl(int ttl)
{
return (ttl > MAXTTL);
}
static int
vxlan_check_ftable_timeout(uint32_t timeout)
{
return (timeout > VXLAN_FTABLE_MAX_TIMEOUT);
}
static int
vxlan_check_ftable_max(uint32_t max)
{
return (max > VXLAN_FTABLE_MAX);
}
static void
vxlan_sysctl_setup(struct vxlan_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *node;
struct vxlan_statistics *stats;
char namebuf[8];
ctx = &sc->vxl_sysctl_ctx;
stats = &sc->vxl_stats;
snprintf(namebuf, sizeof(namebuf), "%d", sc->vxl_unit);
sysctl_ctx_init(ctx);
sc->vxl_sysctl_node = SYSCTL_ADD_NODE(ctx,
SYSCTL_STATIC_CHILDREN(_net_link_vxlan), OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
OID_AUTO, "ftable", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "count",
CTLFLAG_RD, &sc->vxl_ftable_cnt, 0,
"Number of entries in forwarding table");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "max",
CTLFLAG_RD, &sc->vxl_ftable_max, 0,
"Maximum number of entries allowed in forwarding table");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "timeout",
CTLFLAG_RD, &sc->vxl_ftable_timeout, 0,
"Number of seconds between prunes of the forwarding table");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
sc, 0, vxlan_ftable_sysctl_dump, "A",
"Dump the forwarding table entries");
node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
"ftable_nospace", CTLFLAG_RD, &stats->ftable_nospace, 0,
"Fowarding table reached maximum entries");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
"ftable_lock_upgrade_failed", CTLFLAG_RD,
&stats->ftable_lock_upgrade_failed, 0,
"Forwarding table update required lock upgrade");
SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "txcsum",
CTLFLAG_RD, &stats->txcsum,
"# of times hardware assisted with tx checksum");
SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tso",
CTLFLAG_RD, &stats->tso, "# of times hardware assisted with TSO");
SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "rxcsum",
CTLFLAG_RD, &stats->rxcsum,
"# of times hardware assisted with rx checksum");
}
static void
vxlan_sysctl_destroy(struct vxlan_softc *sc)
{
sysctl_ctx_free(&sc->vxl_sysctl_ctx);
sc->vxl_sysctl_node = NULL;
}
static int
vxlan_tunable_int(struct vxlan_softc *sc, const char *knob, int def)
{
char path[64];
snprintf(path, sizeof(path), "net.link.vxlan.%d.%s",
sc->vxl_unit, knob);
TUNABLE_INT_FETCH(path, &def);
return (def);
}
static void
vxlan_ifdetach_event(void *arg __unused, struct ifnet *ifp)
{
struct vxlan_softc_head list;
struct vxlan_socket *vso;
struct vxlan_softc *sc, *tsc;
LIST_INIT(&list);
if (ifp->if_flags & IFF_RENAMING)
return;
if ((ifp->if_flags & IFF_MULTICAST) == 0)
return;
VXLAN_LIST_LOCK();
LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry)
vxlan_socket_ifdetach(vso, ifp, &list);
VXLAN_LIST_UNLOCK();
LIST_FOREACH_SAFE(sc, &list, vxl_ifdetach_list, tsc) {
LIST_REMOVE(sc, vxl_ifdetach_list);
sx_xlock(&vxlan_sx);
VXLAN_WLOCK(sc);
if (sc->vxl_flags & VXLAN_FLAG_INIT)
vxlan_init_wait(sc);
vxlan_teardown_locked(sc);
sx_xunlock(&vxlan_sx);
}
}
static void
vxlan_load(void)
{
mtx_init(&vxlan_list_mtx, "vxlan list", NULL, MTX_DEF);
LIST_INIT(&vxlan_socket_list);
vxlan_ifdetach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
vxlan_ifdetach_event, NULL, EVENTHANDLER_PRI_ANY);
struct if_clone_addreq req = {
.create_f = vxlan_clone_create,
.destroy_f = vxlan_clone_destroy,
.flags = IFC_F_AUTOUNIT,
};
vxlan_cloner = ifc_attach_cloner(vxlan_name, &req);
}
static void
vxlan_unload(void)
{
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
vxlan_ifdetach_event_tag);
ifc_detach_cloner(vxlan_cloner);
mtx_destroy(&vxlan_list_mtx);
MPASS(LIST_EMPTY(&vxlan_socket_list));
}
static int
vxlan_modevent(module_t mod, int type, void *unused)
{
int error;
error = 0;
switch (type) {
case MOD_LOAD:
vxlan_load();
break;
case MOD_UNLOAD:
vxlan_unload();
break;
default:
error = ENOTSUP;
break;
}
return (error);
}
static moduledata_t vxlan_mod = {
"if_vxlan",
vxlan_modevent,
0
};
DECLARE_MODULE(if_vxlan, vxlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(if_vxlan, 1);
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index 8129fb3f2f0b..c6a01624436c 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -1,7183 +1,7179 @@
/*-
* Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Neither the name of Matthew Macy nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_acpi.h"
#include "opt_sched.h"
#include <sys/param.h>
#include <sys/types.h>
#include <sys/bus.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/kobj.h>
#include <sys/rman.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/taskqueue.h>
#include <sys/limits.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/mp_ring.h>
#include <net/debugnet.h>
#include <net/pfil.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/tcp_lro.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/ip_var.h>
#include <netinet6/ip6_var.h>
#include <machine/bus.h>
#include <machine/in_cksum.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <dev/led/led.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pci_private.h>
#include <net/iflib.h>
#include "ifdi_if.h"
#ifdef PCI_IOV
#include <dev/pci/pci_iov.h>
#endif
#include <sys/bitstring.h>
/*
* enable accounting of every mbuf as it comes in to and goes out of
* iflib's software descriptor references
*/
#define MEMORY_LOGGING 0
/*
* Enable mbuf vectors for compressing long mbuf chains
*/
/*
* NB:
* - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
* we prefetch needs to be determined by the time spent in m_free vis a vis
* the cost of a prefetch. This will of course vary based on the workload:
* - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
* is quite expensive, thus suggesting very little prefetch.
* - small packet forwarding which is just returning a single mbuf to
* UMA will typically be very fast vis a vis the cost of a memory
* access.
*/
/*
* File organization:
* - private structures
* - iflib private utility functions
* - ifnet functions
* - vlan registry and other exported functions
* - iflib public core functions
*
*
*/
static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
#define IFLIB_RXEOF_MORE (1U << 0)
#define IFLIB_RXEOF_EMPTY (2U << 0)
struct iflib_txq;
typedef struct iflib_txq *iflib_txq_t;
struct iflib_rxq;
typedef struct iflib_rxq *iflib_rxq_t;
struct iflib_fl;
typedef struct iflib_fl *iflib_fl_t;
struct iflib_ctx;
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
static void iflib_timer(void *arg);
static void iflib_tqg_detach(if_ctx_t ctx);
typedef struct iflib_filter_info {
driver_filter_t *ifi_filter;
void *ifi_filter_arg;
struct grouptask *ifi_task;
void *ifi_ctx;
} *iflib_filter_info_t;
struct iflib_ctx {
KOBJ_FIELDS;
/*
* Pointer to hardware driver's softc
*/
void *ifc_softc;
device_t ifc_dev;
if_t ifc_ifp;
cpuset_t ifc_cpus;
if_shared_ctx_t ifc_sctx;
struct if_softc_ctx ifc_softc_ctx;
struct sx ifc_ctx_sx;
struct mtx ifc_state_mtx;
iflib_txq_t ifc_txqs;
iflib_rxq_t ifc_rxqs;
uint32_t ifc_if_flags;
uint32_t ifc_flags;
uint32_t ifc_max_fl_buf_size;
uint32_t ifc_rx_mbuf_sz;
int ifc_link_state;
int ifc_watchdog_events;
struct cdev *ifc_led_dev;
struct resource *ifc_msix_mem;
struct if_irq ifc_legacy_irq;
struct grouptask ifc_admin_task;
struct grouptask ifc_vflr_task;
struct iflib_filter_info ifc_filter_info;
struct ifmedia ifc_media;
struct ifmedia *ifc_mediap;
struct sysctl_oid *ifc_sysctl_node;
uint16_t ifc_sysctl_ntxqs;
uint16_t ifc_sysctl_nrxqs;
uint16_t ifc_sysctl_qs_eq_override;
uint16_t ifc_sysctl_rx_budget;
uint16_t ifc_sysctl_tx_abdicate;
uint16_t ifc_sysctl_core_offset;
#define CORE_OFFSET_UNSPECIFIED 0xffff
uint8_t ifc_sysctl_separate_txrx;
uint8_t ifc_sysctl_use_logical_cores;
uint16_t ifc_sysctl_extra_msix_vectors;
bool ifc_cpus_are_physical_cores;
qidx_t ifc_sysctl_ntxds[8];
qidx_t ifc_sysctl_nrxds[8];
struct if_txrx ifc_txrx;
#define isc_txd_encap ifc_txrx.ift_txd_encap
#define isc_txd_flush ifc_txrx.ift_txd_flush
#define isc_txd_credits_update ifc_txrx.ift_txd_credits_update
#define isc_rxd_available ifc_txrx.ift_rxd_available
#define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
#define isc_rxd_flush ifc_txrx.ift_rxd_flush
#define isc_legacy_intr ifc_txrx.ift_legacy_intr
#define isc_txq_select ifc_txrx.ift_txq_select
#define isc_txq_select_v2 ifc_txrx.ift_txq_select_v2
eventhandler_tag ifc_vlan_attach_event;
eventhandler_tag ifc_vlan_detach_event;
struct ether_addr ifc_mac;
};
void *
iflib_get_softc(if_ctx_t ctx)
{
return (ctx->ifc_softc);
}
device_t
iflib_get_dev(if_ctx_t ctx)
{
return (ctx->ifc_dev);
}
if_t
iflib_get_ifp(if_ctx_t ctx)
{
return (ctx->ifc_ifp);
}
struct ifmedia *
iflib_get_media(if_ctx_t ctx)
{
return (ctx->ifc_mediap);
}
void
iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
{
bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
}
if_softc_ctx_t
iflib_get_softc_ctx(if_ctx_t ctx)
{
return (&ctx->ifc_softc_ctx);
}
if_shared_ctx_t
iflib_get_sctx(if_ctx_t ctx)
{
return (ctx->ifc_sctx);
}
uint16_t
iflib_get_extra_msix_vectors_sysctl(if_ctx_t ctx)
{
return (ctx->ifc_sysctl_extra_msix_vectors);
}
#define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
#define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
#define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
#define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
#define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
typedef struct iflib_sw_rx_desc_array {
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
struct mbuf **ifsd_m; /* pkthdr mbufs */
caddr_t *ifsd_cl; /* direct cluster pointer for rx */
bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */
} iflib_rxsd_array_t;
typedef struct iflib_sw_tx_desc_array {
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */
struct mbuf **ifsd_m; /* pkthdr mbufs */
} if_txsd_vec_t;
/* magic number that should be high enough for any hardware */
#define IFLIB_MAX_TX_SEGS 128
#define IFLIB_RX_COPY_THRESH 128
#define IFLIB_MAX_RX_REFRESH 32
/* The minimum descriptors per second before we start coalescing */
#define IFLIB_MIN_DESC_SEC 16384
#define IFLIB_DEFAULT_TX_UPDATE_FREQ 16
#define IFLIB_QUEUE_IDLE 0
#define IFLIB_QUEUE_HUNG 1
#define IFLIB_QUEUE_WORKING 2
/* maximum number of txqs that can share an rx interrupt */
#define IFLIB_MAX_TX_SHARED_INTR 4
/* this should really scale with ring size - this is a fairly arbitrary value */
#define TX_BATCH_SIZE 32
#define IFLIB_RESTART_BUDGET 8
#define IFC_LEGACY 0x001
#define IFC_QFLUSH 0x002
#define IFC_MULTISEG 0x004
#define IFC_SPARE1 0x008
#define IFC_SC_ALLOCATED 0x010
#define IFC_INIT_DONE 0x020
#define IFC_PREFETCH 0x040
#define IFC_DO_RESET 0x080
#define IFC_DO_WATCHDOG 0x100
#define IFC_SPARE0 0x200
#define IFC_SPARE2 0x400
#define IFC_IN_DETACH 0x800
#define IFC_NETMAP_TX_IRQ 0x80000000
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
struct iflib_txq {
qidx_t ift_in_use;
qidx_t ift_cidx;
qidx_t ift_cidx_processed;
qidx_t ift_pidx;
uint8_t ift_gen;
uint8_t ift_br_offset;
uint16_t ift_npending;
uint16_t ift_db_pending;
uint16_t ift_rs_pending;
/* implicit pad */
uint8_t ift_txd_size[8];
uint64_t ift_processed;
uint64_t ift_cleaned;
uint64_t ift_cleaned_prev;
#if MEMORY_LOGGING
uint64_t ift_enqueued;
uint64_t ift_dequeued;
#endif
uint64_t ift_no_tx_dma_setup;
uint64_t ift_no_desc_avail;
uint64_t ift_mbuf_defrag_failed;
uint64_t ift_mbuf_defrag;
uint64_t ift_map_failed;
uint64_t ift_txd_encap_efbig;
uint64_t ift_pullups;
uint64_t ift_last_timer_tick;
struct mtx ift_mtx;
struct mtx ift_db_mtx;
/* constant values */
if_ctx_t ift_ctx;
struct ifmp_ring *ift_br;
struct grouptask ift_task;
qidx_t ift_size;
uint16_t ift_id;
struct callout ift_timer;
#ifdef DEV_NETMAP
struct callout ift_netmap_timer;
#endif /* DEV_NETMAP */
if_txsd_vec_t ift_sds;
uint8_t ift_qstatus;
uint8_t ift_closed;
uint8_t ift_update_freq;
struct iflib_filter_info ift_filter_info;
bus_dma_tag_t ift_buf_tag;
bus_dma_tag_t ift_tso_buf_tag;
iflib_dma_info_t ift_ifdi;
#define MTX_NAME_LEN 32
char ift_mtx_name[MTX_NAME_LEN];
bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ift_cpu_exec_count[256];
#endif
} __aligned(CACHE_LINE_SIZE);
struct iflib_fl {
qidx_t ifl_cidx;
qidx_t ifl_pidx;
qidx_t ifl_credits;
uint8_t ifl_gen;
uint8_t ifl_rxd_size;
#if MEMORY_LOGGING
uint64_t ifl_m_enqueued;
uint64_t ifl_m_dequeued;
uint64_t ifl_cl_enqueued;
uint64_t ifl_cl_dequeued;
#endif
/* implicit pad */
bitstr_t *ifl_rx_bitmap;
qidx_t ifl_fragidx;
/* constant */
qidx_t ifl_size;
uint16_t ifl_buf_size;
uint16_t ifl_cltype;
uma_zone_t ifl_zone;
iflib_rxsd_array_t ifl_sds;
iflib_rxq_t ifl_rxq;
uint8_t ifl_id;
bus_dma_tag_t ifl_buf_tag;
iflib_dma_info_t ifl_ifdi;
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
} __aligned(CACHE_LINE_SIZE);
static inline qidx_t
get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
{
qidx_t used;
if (pidx > cidx)
used = pidx - cidx;
else if (pidx < cidx)
used = size - cidx + pidx;
else if (gen == 0 && pidx == cidx)
used = 0;
else if (gen == 1 && pidx == cidx)
used = size;
else
panic("bad state");
return (used);
}
#define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
#define IDXDIFF(head, tail, wrap) \
((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
struct iflib_rxq {
if_ctx_t ifr_ctx;
iflib_fl_t ifr_fl;
uint64_t ifr_rx_irq;
struct pfil_head *pfil;
/*
* If there is a separate completion queue (IFLIB_HAS_RXCQ), this is
* the completion queue consumer index. Otherwise it's unused.
*/
qidx_t ifr_cq_cidx;
uint16_t ifr_id;
uint8_t ifr_nfl;
uint8_t ifr_ntxqirq;
uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
uint8_t ifr_fl_offset;
struct lro_ctrl ifr_lc;
struct grouptask ifr_task;
struct callout ifr_watchdog;
struct iflib_filter_info ifr_filter_info;
iflib_dma_info_t ifr_ifdi;
/* dynamically allocate if any drivers need a value substantially larger than this */
struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ifr_cpu_exec_count[256];
#endif
} __aligned(CACHE_LINE_SIZE);
typedef struct if_rxsd {
caddr_t *ifsd_cl;
iflib_fl_t ifsd_fl;
} *if_rxsd_t;
/* multiple of word size */
#ifdef __LP64__
#define PKT_INFO_SIZE 6
#define RXD_INFO_SIZE 5
#define PKT_TYPE uint64_t
#else
#define PKT_INFO_SIZE 11
#define RXD_INFO_SIZE 8
#define PKT_TYPE uint32_t
#endif
#define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3)
#define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4)
typedef struct if_pkt_info_pad {
PKT_TYPE pkt_val[PKT_INFO_SIZE];
} *if_pkt_info_pad_t;
typedef struct if_rxd_info_pad {
PKT_TYPE rxd_val[RXD_INFO_SIZE];
} *if_rxd_info_pad_t;
CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
static inline void
pkt_info_zero(if_pkt_info_t pi)
{
if_pkt_info_pad_t pi_pad;
pi_pad = (if_pkt_info_pad_t)pi;
pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
#ifndef __LP64__
pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
#endif
}
static inline void
rxd_info_zero(if_rxd_info_t ri)
{
if_rxd_info_pad_t ri_pad;
int i;
ri_pad = (if_rxd_info_pad_t)ri;
for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
ri_pad->rxd_val[i] = 0;
ri_pad->rxd_val[i+1] = 0;
ri_pad->rxd_val[i+2] = 0;
ri_pad->rxd_val[i+3] = 0;
}
#ifdef __LP64__
ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
#endif
}
/*
* Only allow a single packet to take up most 1/nth of the tx ring
*/
#define MAX_SINGLE_PACKET_FRACTION 12
#define IF_BAD_DMA (bus_addr_t)-1
#define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
#define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
#define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
#define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
#define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
#define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
#define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
#define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
#define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
/* Our boot-time initialization hook */
static int iflib_module_event_handler(module_t, int, void *);
static moduledata_t iflib_moduledata = {
"iflib",
iflib_module_event_handler,
NULL
};
DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
MODULE_VERSION(iflib, 1);
MODULE_DEPEND(iflib, pci, 1, 1, 1);
MODULE_DEPEND(iflib, ether, 1, 1, 1);
TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
#ifndef IFLIB_DEBUG_COUNTERS
#ifdef INVARIANTS
#define IFLIB_DEBUG_COUNTERS 1
#else
#define IFLIB_DEBUG_COUNTERS 0
#endif /* !INVARIANTS */
#endif
static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"iflib driver parameters");
/*
* XXX need to ensure that this can't accidentally cause the head to be moved backwards
*/
static int iflib_min_tx_latency = 0;
SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
&iflib_min_tx_latency, 0,
"minimize transmit latency at the possible expense of throughput");
static int iflib_no_tx_batch = 0;
SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
&iflib_no_tx_batch, 0,
"minimize transmit latency at the possible expense of throughput");
static int iflib_timer_default = 1000;
SYSCTL_INT(_net_iflib, OID_AUTO, timer_default, CTLFLAG_RW,
&iflib_timer_default, 0, "number of ticks between iflib_timer calls");
#if IFLIB_DEBUG_COUNTERS
static int iflib_tx_seen;
static int iflib_tx_sent;
static int iflib_tx_encap;
static int iflib_rx_allocs;
static int iflib_fl_refills;
static int iflib_fl_refills_large;
static int iflib_tx_frees;
SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0,
"# TX mbufs seen");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0,
"# TX mbufs sent");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0,
"# TX mbufs encapped");
SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0,
"# TX frees");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0,
"# RX allocations");
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0,
"# refills");
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
&iflib_fl_refills_large, 0, "# large refills");
static int iflib_txq_drain_flushing;
static int iflib_txq_drain_oactive;
static int iflib_txq_drain_notready;
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
&iflib_txq_drain_flushing, 0, "# drain flushes");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
&iflib_txq_drain_oactive, 0, "# drain oactives");
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
&iflib_txq_drain_notready, 0, "# drain notready");
static int iflib_encap_load_mbuf_fail;
static int iflib_encap_pad_mbuf_fail;
static int iflib_encap_txq_avail_fail;
static int iflib_encap_txd_encap_fail;
SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
&iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
&iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
&iflib_encap_txq_avail_fail, 0, "# txq avail failures");
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
&iflib_encap_txd_encap_fail, 0, "# driver encap failures");
static int iflib_task_fn_rxs;
static int iflib_rx_intr_enables;
static int iflib_fast_intrs;
static int iflib_rx_unavail;
static int iflib_rx_ctx_inactive;
static int iflib_rx_if_input;
static int iflib_rxd_flush;
static int iflib_verbose_debug;
SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0,
"# task_fn_rx calls");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
&iflib_rx_intr_enables, 0, "# RX intr enables");
SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0,
"# fast_intr calls");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0,
"# times rxeof called with no available data");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
&iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input,
0, "# times rxeof called if_input");
SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0,
"# times rxd_flush called");
SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
&iflib_verbose_debug, 0, "enable verbose debugging");
#define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
static void
iflib_debug_reset(void)
{
iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
iflib_txq_drain_flushing = iflib_txq_drain_oactive =
iflib_txq_drain_notready =
iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
iflib_rx_unavail =
iflib_rx_ctx_inactive = iflib_rx_if_input =
iflib_rxd_flush = 0;
}
#else
#define DBG_COUNTER_INC(name)
static void iflib_debug_reset(void) {}
#endif
#define IFLIB_DEBUG 0
static void iflib_tx_structures_free(if_ctx_t ctx);
static void iflib_rx_structures_free(if_ctx_t ctx);
static int iflib_queues_alloc(if_ctx_t ctx);
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
static int iflib_qset_structures_setup(if_ctx_t ctx);
static int iflib_msix_init(if_ctx_t ctx);
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
#ifdef ALTQ
static void iflib_altq_if_start(if_t ifp);
static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
#endif
static int iflib_register(if_ctx_t);
static void iflib_deregister(if_ctx_t);
static void iflib_unregister_vlan_handlers(if_ctx_t ctx);
static uint16_t iflib_get_mbuf_size_for(unsigned int size);
static void iflib_init_locked(if_ctx_t ctx);
static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
static void iflib_add_device_sysctl_post(if_ctx_t ctx);
static void iflib_ifmp_purge(iflib_txq_t txq);
static void _iflib_pre_assert(if_softc_ctx_t scctx);
static void iflib_stop(if_ctx_t ctx);
static void iflib_if_init_locked(if_ctx_t ctx);
static void iflib_free_intr_mem(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf * iflib_fixup_rx(struct mbuf *m);
#endif
static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
SLIST_HEAD_INITIALIZER(cpu_offsets);
struct cpu_offset {
SLIST_ENTRY(cpu_offset) entries;
cpuset_t set;
unsigned int refcount;
uint16_t next_cpuid;
};
static struct mtx cpu_offset_mtx;
MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
MTX_DEF);
DEBUGNET_DEFINE(iflib);
static int
iflib_num_rx_descs(if_ctx_t ctx)
{
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
return scctx->isc_nrxd[first_rxq];
}
static int
iflib_num_tx_descs(if_ctx_t ctx)
{
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
return scctx->isc_ntxd[first_txq];
}
#ifdef DEV_NETMAP
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
MODULE_DEPEND(iflib, netmap, 1, 1, 1);
static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init);
static void iflib_netmap_timer(void *arg);
/*
* device-specific sysctl variables:
*
* iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
* During regular operations the CRC is stripped, but on some
* hardware reception of frames not multiple of 64 is slower,
* so using crcstrip=0 helps in benchmarks.
*
* iflib_rx_miss, iflib_rx_miss_bufs:
* count packets that might be missed due to lost interrupts.
*/
SYSCTL_DECL(_dev_netmap);
/*
* The xl driver by default strips CRCs and we do not override it.
*/
int iflib_crcstrip = 1;
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames");
int iflib_rx_miss, iflib_rx_miss_bufs;
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr");
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs");
/*
* Register/unregister. We are already under netmap lock.
* Only called on the first register or the last unregister.
*/
static int
iflib_netmap_register(struct netmap_adapter *na, int onoff)
{
if_t ifp = na->ifp;
if_ctx_t ctx = if_getsoftc(ifp);
int status;
CTX_LOCK(ctx);
if (!CTX_IS_VF(ctx))
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
iflib_stop(ctx);
/*
* Enable (or disable) netmap flags, and intercept (or restore)
* ifp->if_transmit. This is done once the device has been stopped
* to prevent race conditions. Also, this must be done after
* calling netmap_disable_all_rings() and before calling
* netmap_enable_all_rings(), so that these two functions see the
* updated state of the NAF_NETMAP_ON bit.
*/
if (onoff) {
nm_set_native_flags(na);
} else {
nm_clear_native_flags(na);
}
iflib_init_locked(ctx);
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
status = if_getdrvflags(ifp) & IFF_DRV_RUNNING ? 0 : 1;
if (status)
nm_clear_native_flags(na);
CTX_UNLOCK(ctx);
return (status);
}
static int
iflib_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
{
if_t ifp = na->ifp;
if_ctx_t ctx = if_getsoftc(ifp);
iflib_rxq_t rxq = &ctx->ifc_rxqs[0];
iflib_fl_t fl = &rxq->ifr_fl[0];
info->num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
info->num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
info->num_tx_descs = iflib_num_tx_descs(ctx);
info->num_rx_descs = iflib_num_rx_descs(ctx);
info->rx_buf_maxsize = fl->ifl_buf_size;
nm_prinf("txr %u rxr %u txd %u rxd %u rbufsz %u",
info->num_tx_rings, info->num_rx_rings, info->num_tx_descs,
info->num_rx_descs, info->rx_buf_maxsize);
return 0;
}
static int
netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init)
{
struct netmap_adapter *na = kring->na;
u_int const lim = kring->nkr_num_slots - 1;
struct netmap_ring *ring = kring->ring;
bus_dmamap_t *map;
struct if_rxd_update iru;
if_ctx_t ctx = rxq->ifr_ctx;
iflib_fl_t fl = &rxq->ifr_fl[0];
u_int nic_i_first, nic_i;
u_int nm_i;
int i, n;
#if IFLIB_DEBUG_COUNTERS
int rf_count = 0;
#endif
/*
* This function is used both at initialization and in rxsync.
* At initialization we need to prepare (with isc_rxd_refill())
* all the netmap buffers currently owned by the kernel, in
* such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync
* (except for kring->nkr_hwofs). These may be less than
* kring->nkr_num_slots if netmap_reset() was called while
* an application using the kring that still owned some
* buffers.
* At rxsync time, both indexes point to the next buffer to be
* refilled.
* In any case we publish (with isc_rxd_flush()) up to
* (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod
* pointer to overrun the head/cons pointer, although this is
* not necessary for some NICs (e.g. vmx).
*/
if (__predict_false(init)) {
n = kring->nkr_num_slots - nm_kr_rxspace(kring);
} else {
n = kring->rhead - kring->nr_hwcur;
if (n == 0)
return (0); /* Nothing to do. */
if (n < 0)
n += kring->nkr_num_slots;
}
iru_init(&iru, rxq, 0 /* flid */);
map = fl->ifl_sds.ifsd_map;
nic_i = fl->ifl_pidx;
nm_i = netmap_idx_n2k(kring, nic_i);
if (__predict_false(init)) {
/*
* On init/reset, nic_i must be 0, and we must
* start to refill from hwtail (see netmap_reset()).
*/
MPASS(nic_i == 0);
MPASS(nm_i == kring->nr_hwtail);
} else
MPASS(nm_i == kring->nr_hwcur);
DBG_COUNTER_INC(fl_refills);
while (n > 0) {
#if IFLIB_DEBUG_COUNTERS
if (++rf_count == 9)
DBG_COUNTER_INC(fl_refills_large);
#endif
nic_i_first = nic_i;
for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) {
struct netmap_slot *slot = &ring->slot[nm_i];
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
MPASS(i < IFLIB_MAX_RX_REFRESH);
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
return netmap_ring_reinit(kring);
fl->ifl_bus_addrs[i] = paddr +
nm_get_offset(kring, slot);
fl->ifl_rxd_idxs[i] = nic_i;
if (__predict_false(init)) {
netmap_load_map(na, fl->ifl_buf_tag,
map[nic_i], addr);
} else if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(na, fl->ifl_buf_tag,
map[nic_i], addr);
}
bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i],
BUS_DMASYNC_PREREAD);
slot->flags &= ~NS_BUF_CHANGED;
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
iru.iru_pidx = nic_i_first;
iru.iru_count = i;
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
}
fl->ifl_pidx = nic_i;
/*
* At the end of the loop we must have refilled everything
* we could possibly refill.
*/
MPASS(nm_i == kring->rhead);
kring->nr_hwcur = nm_i;
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id,
nm_prev(nic_i, lim));
DBG_COUNTER_INC(rxd_flush);
return (0);
}
#define NETMAP_TX_TIMER_US 90
/*
* Reconcile kernel and user view of the transmit ring.
*
* All information is in the kring.
* Userspace wants to send packets up to the one before kring->rhead,
* kernel knows kring->nr_hwcur is the first unsent packet.
*
* Here we push packets out (as many as possible), and possibly
* reclaim buffers from previously completed transmission.
*
* The caller (netmap) guarantees that there is only one instance
* running at any time. Any interference with other driver
* methods should be handled by the individual drivers.
*/
static int
iflib_netmap_txsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
if_t ifp = na->ifp;
struct netmap_ring *ring = kring->ring;
u_int nm_i; /* index into the netmap kring */
u_int nic_i; /* index into the NIC ring */
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = kring->rhead;
struct if_pkt_info pi;
int tx_pkts = 0, tx_bytes = 0;
/*
* interrupts on every tx packet are expensive so request
* them every half ring, or where NS_REPORT is set
*/
u_int report_frequency = kring->nkr_num_slots >> 1;
/* device-specific */
if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* First part: process new packets to send.
* nm_i is the current index in the netmap kring,
* nic_i is the corresponding index in the NIC ring.
*
* If we have packets to send (nm_i != head)
* iterate over the netmap ring, fetch length and update
* the corresponding slot in the NIC ring. Some drivers also
* need to update the buffer's physical address in the NIC slot
* even NS_BUF_CHANGED is not set (PNMB computes the addresses).
*
* The netmap_reload_map() calls is especially expensive,
* even when (as in this case) the tag is 0, so do only
* when the buffer has actually changed.
*
* If possible do not set the report/intr bit on all slots,
* but only a few times per ring or when NS_REPORT is set.
*
* Finally, on 10G and faster drivers, it might be useful
* to prefetch the next slot and txr entry.
*/
nm_i = kring->nr_hwcur;
if (nm_i != head) { /* we have new packets to send */
uint32_t pkt_len = 0, seg_idx = 0;
int nic_i_start = -1, flags = 0;
pkt_info_zero(&pi);
pi.ipi_segs = txq->ift_segs;
pi.ipi_qsidx = kring->ring_id;
nic_i = netmap_idx_k2n(kring, nm_i);
__builtin_prefetch(&ring->slot[nm_i]);
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
while (nm_i != head) {
struct netmap_slot *slot = &ring->slot[nm_i];
uint64_t offset = nm_get_offset(kring, slot);
u_int len = slot->len;
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
flags |= (slot->flags & NS_REPORT ||
nic_i == 0 || nic_i == report_frequency) ?
IPI_TX_INTR : 0;
/*
* If this is the first packet fragment, save the
* index of the first NIC slot for later.
*/
if (nic_i_start < 0)
nic_i_start = nic_i;
pi.ipi_segs[seg_idx].ds_addr = paddr + offset;
pi.ipi_segs[seg_idx].ds_len = len;
if (len) {
pkt_len += len;
seg_idx++;
}
if (!(slot->flags & NS_MOREFRAG)) {
pi.ipi_len = pkt_len;
pi.ipi_nsegs = seg_idx;
pi.ipi_pidx = nic_i_start;
pi.ipi_ndescs = 0;
pi.ipi_flags = flags;
/* Prepare the NIC TX ring. */
ctx->isc_txd_encap(ctx->ifc_softc, &pi);
DBG_COUNTER_INC(tx_encap);
/* Update transmit counters */
tx_bytes += pi.ipi_len;
tx_pkts++;
/* Reinit per-packet info for the next one. */
flags = seg_idx = pkt_len = 0;
nic_i_start = -1;
}
/* prefetch for next round */
__builtin_prefetch(&ring->slot[nm_i + 1]);
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
NM_CHECK_ADDR_LEN_OFF(na, len, offset);
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(na, txq->ift_buf_tag,
txq->ift_sds.ifsd_map[nic_i], addr);
}
/* make sure changes to the buffer are synced */
bus_dmamap_sync(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[nic_i],
BUS_DMASYNC_PREWRITE);
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
kring->nr_hwcur = nm_i;
/* synchronize the NIC ring */
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* (re)start the tx unit up to slot nic_i (excluded) */
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
}
/*
* Second part: reclaim buffers for completed transmissions.
*
* If there are unclaimed buffers, attempt to reclaim them.
* If we don't manage to reclaim them all, and TX IRQs are not in use,
* trigger a per-tx-queue timer to try again later.
*/
if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
if (iflib_tx_credits_update(ctx, txq)) {
/* some tx completed, increment avail */
nic_i = txq->ift_cidx_processed;
kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
}
}
if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
callout_reset_sbt_on(&txq->ift_netmap_timer,
NETMAP_TX_TIMER_US * SBT_1US, SBT_1US,
iflib_netmap_timer, txq,
txq->ift_netmap_timer.c_cpu, 0);
}
if_inc_counter(ifp, IFCOUNTER_OBYTES, tx_bytes);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, tx_pkts);
return (0);
}
/*
* Reconcile kernel and user view of the receive ring.
* Same as for the txsync, this routine must be efficient.
* The caller guarantees a single invocations, but races against
* the rest of the driver should be handled here.
*
* On call, kring->rhead is the first packet that userspace wants
* to keep, and kring->rcur is the wakeup point.
* The kernel has previously reported packets up to kring->rtail.
*
* If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
* of whether or not we received an interrupt.
*/
static int
iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
struct netmap_ring *ring = kring->ring;
if_t ifp = na->ifp;
uint32_t nm_i; /* index into the netmap ring */
uint32_t nic_i; /* index into the NIC ring */
u_int n;
u_int const lim = kring->nkr_num_slots - 1;
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
int i = 0, rx_bytes = 0, rx_pkts = 0;
if_ctx_t ctx = if_getsoftc(ifp);
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
iflib_fl_t fl = &rxq->ifr_fl[0];
struct if_rxd_info ri;
qidx_t *cidxp;
/*
* netmap only uses free list 0, to avoid out of order consumption
* of receive buffers
*/
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* First part: import newly received packets.
*
* nm_i is the index of the next free slot in the netmap ring,
* nic_i is the index of the next received packet in the NIC ring
* (or in the free list 0 if IFLIB_HAS_RXCQ is set), and they may
* differ in case if_init() has been called while
* in netmap mode. For the receive ring we have
*
* nic_i = fl->ifl_cidx;
* nm_i = kring->nr_hwtail (previous)
* and
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
*
* fl->ifl_cidx is set to 0 on a ring reinit
*/
if (netmap_no_pendintr || force_update) {
uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ;
int crclen = iflib_crcstrip ? 0 : 4;
int error, avail;
/*
* For the free list consumer index, we use the same
* logic as in iflib_rxeof().
*/
if (have_rxcq)
cidxp = &rxq->ifr_cq_cidx;
else
cidxp = &fl->ifl_cidx;
avail = ctx->isc_rxd_available(ctx->ifc_softc,
rxq->ifr_id, *cidxp, USHRT_MAX);
nic_i = fl->ifl_cidx;
nm_i = netmap_idx_n2k(kring, nic_i);
MPASS(nm_i == kring->nr_hwtail);
for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) {
rxd_info_zero(&ri);
ri.iri_frags = rxq->ifr_frags;
ri.iri_qsidx = kring->ring_id;
ri.iri_ifp = ctx->ifc_ifp;
ri.iri_cidx = *cidxp;
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
for (i = 0; i < ri.iri_nfrags; i++) {
if (error) {
ring->slot[nm_i].len = 0;
ring->slot[nm_i].flags = 0;
} else {
ring->slot[nm_i].len = ri.iri_frags[i].irf_len;
if (i == (ri.iri_nfrags - 1)) {
ring->slot[nm_i].len -= crclen;
ring->slot[nm_i].flags = 0;
/* Update receive counters */
rx_bytes += ri.iri_len;
rx_pkts++;
} else
ring->slot[nm_i].flags = NS_MOREFRAG;
}
bus_dmamap_sync(fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
fl->ifl_cidx = nic_i = nm_next(nic_i, lim);
}
if (have_rxcq) {
*cidxp = ri.iri_cidx;
while (*cidxp >= scctx->isc_nrxd[0])
*cidxp -= scctx->isc_nrxd[0];
}
}
if (n) { /* update the state variables */
if (netmap_no_pendintr && !force_update) {
/* diagnostics */
iflib_rx_miss ++;
iflib_rx_miss_bufs += n;
}
kring->nr_hwtail = nm_i;
}
kring->nr_kflags &= ~NKR_PENDINTR;
}
/*
* Second part: skip past packets that userspace has released.
* (kring->nr_hwcur to head excluded),
* and make the buffers available for reception.
* As usual nm_i is the index in the netmap ring,
* nic_i is the index in the NIC ring, and
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
*/
netmap_fl_refill(rxq, kring, false);
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
return (0);
}
static void
iflib_netmap_intr(struct netmap_adapter *na, int onoff)
{
if_ctx_t ctx = if_getsoftc(na->ifp);
CTX_LOCK(ctx);
if (onoff) {
IFDI_INTR_ENABLE(ctx);
} else {
IFDI_INTR_DISABLE(ctx);
}
CTX_UNLOCK(ctx);
}
static int
iflib_netmap_attach(if_ctx_t ctx)
{
struct netmap_adapter na;
bzero(&na, sizeof(na));
na.ifp = ctx->ifc_ifp;
na.na_flags = NAF_BDG_MAYSLEEP | NAF_MOREFRAG | NAF_OFFSETS;
MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
na.num_tx_desc = iflib_num_tx_descs(ctx);
na.num_rx_desc = iflib_num_rx_descs(ctx);
na.nm_txsync = iflib_netmap_txsync;
na.nm_rxsync = iflib_netmap_rxsync;
na.nm_register = iflib_netmap_register;
na.nm_intr = iflib_netmap_intr;
na.nm_config = iflib_netmap_config;
na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
return (netmap_attach(&na));
}
static int
iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
{
struct netmap_adapter *na = NA(ctx->ifc_ifp);
struct netmap_slot *slot;
slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
if (slot == NULL)
return (0);
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
/*
* In netmap mode, set the map for the packet buffer.
* NOTE: Some drivers (not this one) also need to set
* the physical buffer address in the NIC ring.
* netmap_idx_n2k() maps a nic index, i, into the corresponding
* netmap slot index, si
*/
int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
NMB(na, slot + si));
}
return (1);
}
static int
iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
{
struct netmap_adapter *na = NA(ctx->ifc_ifp);
struct netmap_kring *kring;
struct netmap_slot *slot;
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
if (slot == NULL)
return (0);
kring = na->rx_rings[rxq->ifr_id];
netmap_fl_refill(rxq, kring, true);
return (1);
}
static void
iflib_netmap_timer(void *arg)
{
iflib_txq_t txq = arg;
if_ctx_t ctx = txq->ift_ctx;
/*
* Wake up the netmap application, to give it a chance to
* call txsync and reclaim more completed TX buffers.
*/
netmap_tx_irq(ctx->ifc_ifp, txq->ift_id);
}
#define iflib_netmap_detach(ifp) netmap_detach(ifp)
#else
#define iflib_netmap_txq_init(ctx, txq) (0)
#define iflib_netmap_rxq_init(ctx, rxq) (0)
#define iflib_netmap_detach(ifp)
#define netmap_enable_all_rings(ifp)
#define netmap_disable_all_rings(ifp)
#define iflib_netmap_attach(ctx) (0)
#define netmap_rx_irq(ifp, qid, budget) (0)
#endif
#if defined(__i386__) || defined(__amd64__)
static __inline void
prefetch(void *x)
{
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
}
static __inline void
prefetch2cachelines(void *x)
{
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
#if (CACHE_LINE_SIZE < 128)
__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
#endif
}
#else
static __inline void
prefetch(void *x)
{
}
static __inline void
prefetch2cachelines(void *x)
{
}
#endif
static void
iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
{
iflib_fl_t fl;
fl = &rxq->ifr_fl[flid];
iru->iru_paddrs = fl->ifl_bus_addrs;
iru->iru_idxs = fl->ifl_rxd_idxs;
iru->iru_qsidx = rxq->ifr_id;
iru->iru_buf_size = fl->ifl_buf_size;
iru->iru_flidx = fl->ifl_id;
}
static void
_iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
{
if (err)
return;
*(bus_addr_t *) arg = segs[0].ds_addr;
}
#define DMA_WIDTH_TO_BUS_LOWADDR(width) \
(((width) == 0) || (width) == flsll(BUS_SPACE_MAXADDR) ? \
BUS_SPACE_MAXADDR : (1ULL << (width)) - 1ULL)
int
iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
{
int err;
device_t dev = ctx->ifc_dev;
bus_addr_t lowaddr;
lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(ctx->ifc_softc_ctx.isc_dma_width);
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
align, 0, /* alignment, bounds */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
size, /* maxsize */
1, /* nsegments */
size, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&dma->idi_tag);
if (err) {
device_printf(dev,
"%s: bus_dma_tag_create failed: %d (size=%d, align=%d)\n",
__func__, err, size, align);
goto fail_0;
}
err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
if (err) {
device_printf(dev,
"%s: bus_dmamem_alloc(%ju) failed: %d\n",
__func__, (uintmax_t)size, err);
goto fail_1;
}
dma->idi_paddr = IF_BAD_DMA;
err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
if (err || dma->idi_paddr == IF_BAD_DMA) {
device_printf(dev,
"%s: bus_dmamap_load failed: %d\n",
__func__, err);
goto fail_2;
}
dma->idi_size = size;
return (0);
fail_2:
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
fail_1:
bus_dma_tag_destroy(dma->idi_tag);
fail_0:
dma->idi_tag = NULL;
return (err);
}
int
iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
}
int
iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
{
int i, err;
iflib_dma_info_t *dmaiter;
dmaiter = dmalist;
for (i = 0; i < count; i++, dmaiter++) {
if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
break;
}
if (err)
iflib_dma_free_multi(dmalist, i);
return (err);
}
void
iflib_dma_free(iflib_dma_info_t dma)
{
if (dma->idi_tag == NULL)
return;
if (dma->idi_paddr != IF_BAD_DMA) {
bus_dmamap_sync(dma->idi_tag, dma->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->idi_tag, dma->idi_map);
dma->idi_paddr = IF_BAD_DMA;
}
if (dma->idi_vaddr != NULL) {
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
dma->idi_vaddr = NULL;
}
bus_dma_tag_destroy(dma->idi_tag);
dma->idi_tag = NULL;
}
void
iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
{
int i;
iflib_dma_info_t *dmaiter = dmalist;
for (i = 0; i < count; i++, dmaiter++)
iflib_dma_free(*dmaiter);
}
static int
iflib_fast_intr(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
int result;
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL) {
result = info->ifi_filter(info->ifi_filter_arg);
if ((result & FILTER_SCHEDULE_THREAD) == 0)
return (result);
}
GROUPTASK_ENQUEUE(gtask);
return (FILTER_HANDLED);
}
static int
iflib_fast_intr_rxtx(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
if_ctx_t ctx;
iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
iflib_txq_t txq;
void *sc;
int i, cidx, result;
qidx_t txqid;
bool intr_enable, intr_legacy;
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL) {
result = info->ifi_filter(info->ifi_filter_arg);
if ((result & FILTER_SCHEDULE_THREAD) == 0)
return (result);
}
ctx = rxq->ifr_ctx;
sc = ctx->ifc_softc;
intr_enable = false;
intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY);
MPASS(rxq->ifr_ntxqirq);
for (i = 0; i < rxq->ifr_ntxqirq; i++) {
txqid = rxq->ifr_txqid[i];
txq = &ctx->ifc_txqs[txqid];
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_POSTREAD);
if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
if (intr_legacy)
intr_enable = true;
else
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
continue;
}
GROUPTASK_ENQUEUE(&txq->ift_task);
}
if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
cidx = rxq->ifr_cq_cidx;
else
cidx = rxq->ifr_fl[0].ifl_cidx;
if (iflib_rxd_avail(ctx, rxq, cidx, 1))
GROUPTASK_ENQUEUE(gtask);
else {
if (intr_legacy)
intr_enable = true;
else
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
DBG_COUNTER_INC(rx_intr_enables);
}
if (intr_enable)
IFDI_INTR_ENABLE(ctx);
return (FILTER_HANDLED);
}
static int
iflib_fast_intr_ctx(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
int result;
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL) {
result = info->ifi_filter(info->ifi_filter_arg);
if ((result & FILTER_SCHEDULE_THREAD) == 0)
return (result);
}
if (gtask->gt_taskqueue != NULL)
GROUPTASK_ENQUEUE(gtask);
return (FILTER_HANDLED);
}
static int
_iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
driver_filter_t filter, driver_intr_t handler, void *arg,
const char *name)
{
struct resource *res;
void *tag = NULL;
device_t dev = ctx->ifc_dev;
int flags, i, rc;
flags = RF_ACTIVE;
if (ctx->ifc_flags & IFC_LEGACY)
flags |= RF_SHAREABLE;
MPASS(rid < 512);
i = rid;
res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags);
if (res == NULL) {
device_printf(dev,
"failed to allocate IRQ for rid %d, name %s.\n", rid, name);
return (ENOMEM);
}
irq->ii_res = res;
KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
filter, handler, arg, &tag);
if (rc != 0) {
device_printf(dev,
"failed to setup interrupt for rid %d, name %s: %d\n",
rid, name ? name : "unknown", rc);
return (rc);
} else if (name)
bus_describe_intr(dev, res, tag, "%s", name);
irq->ii_tag = tag;
return (0);
}
/*********************************************************************
*
* Allocate DMA resources for TX buffers as well as memory for the TX
* mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
* iflib_sw_tx_desc_array structure, storing all the information that
* is needed to transmit a packet on the wire. This is called only
* once at attach, setup is done every reset.
*
**********************************************************************/
static int
iflib_txsd_alloc(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
bus_size_t tsomaxsize;
bus_addr_t lowaddr;
int err, nsegments, ntsosegments;
bool tso;
nsegments = scctx->isc_tx_nsegments;
ntsosegments = scctx->isc_tx_tso_segments_max;
tsomaxsize = scctx->isc_tx_tso_size_max;
if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
tsomaxsize += sizeof(struct ether_vlan_header);
MPASS(scctx->isc_ntxd[0] > 0);
MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
MPASS(nsegments > 0);
if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
MPASS(ntsosegments > 0);
MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
}
lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
/*
* Set up DMA tags for TX buffers.
*/
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sctx->isc_tx_maxsize, /* maxsize */
nsegments, /* nsegments */
sctx->isc_tx_maxsegsize, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txq->ift_buf_tag))) {
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
goto fail;
}
tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
tsomaxsize, /* maxsize */
ntsosegments, /* nsegments */
sctx->isc_tso_maxsegsize,/* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txq->ift_tso_buf_tag))) {
device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
err);
goto fail;
}
/* Allocate memory for the TX mbuf map. */
if (!(txq->ift_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate TX mbuf map memory\n");
err = ENOMEM;
goto fail;
}
/*
* Create the DMA maps for TX buffers.
*/
if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev,
"Unable to allocate TX buffer DMA map memory\n");
err = ENOMEM;
goto fail;
}
if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev,
"Unable to allocate TSO TX buffer map memory\n");
err = ENOMEM;
goto fail;
}
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
err = bus_dmamap_create(txq->ift_buf_tag, 0,
&txq->ift_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
goto fail;
}
if (!tso)
continue;
err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
&txq->ift_sds.ifsd_tso_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TSO TX DMA map\n");
goto fail;
}
}
return (0);
fail:
/* We free all, it handles case where we are in the middle */
iflib_tx_structures_free(ctx);
return (err);
}
static void
iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
{
bus_dmamap_t map;
if (txq->ift_sds.ifsd_map != NULL) {
map = txq->ift_sds.ifsd_map[i];
bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_buf_tag, map);
bus_dmamap_destroy(txq->ift_buf_tag, map);
txq->ift_sds.ifsd_map[i] = NULL;
}
if (txq->ift_sds.ifsd_tso_map != NULL) {
map = txq->ift_sds.ifsd_tso_map[i];
bus_dmamap_sync(txq->ift_tso_buf_tag, map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_tso_buf_tag, map);
bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
txq->ift_sds.ifsd_tso_map[i] = NULL;
}
}
static void
iflib_txq_destroy(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
for (int i = 0; i < txq->ift_size; i++)
iflib_txsd_destroy(ctx, txq, i);
if (txq->ift_br != NULL) {
ifmp_ring_free(txq->ift_br);
txq->ift_br = NULL;
}
mtx_destroy(&txq->ift_mtx);
if (txq->ift_sds.ifsd_map != NULL) {
free(txq->ift_sds.ifsd_map, M_IFLIB);
txq->ift_sds.ifsd_map = NULL;
}
if (txq->ift_sds.ifsd_tso_map != NULL) {
free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
txq->ift_sds.ifsd_tso_map = NULL;
}
if (txq->ift_sds.ifsd_m != NULL) {
free(txq->ift_sds.ifsd_m, M_IFLIB);
txq->ift_sds.ifsd_m = NULL;
}
if (txq->ift_buf_tag != NULL) {
bus_dma_tag_destroy(txq->ift_buf_tag);
txq->ift_buf_tag = NULL;
}
if (txq->ift_tso_buf_tag != NULL) {
bus_dma_tag_destroy(txq->ift_tso_buf_tag);
txq->ift_tso_buf_tag = NULL;
}
if (txq->ift_ifdi != NULL) {
free(txq->ift_ifdi, M_IFLIB);
}
}
static void
iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
{
struct mbuf **mp;
mp = &txq->ift_sds.ifsd_m[i];
if (*mp == NULL)
return;
if (txq->ift_sds.ifsd_map != NULL) {
bus_dmamap_sync(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
}
if (txq->ift_sds.ifsd_tso_map != NULL) {
bus_dmamap_sync(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[i]);
}
m_freem(*mp);
DBG_COUNTER_INC(tx_frees);
*mp = NULL;
}
static int
iflib_txq_setup(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
iflib_dma_info_t di;
int i;
/* Set number of descriptors available */
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
/* XXX make configurable */
txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
/* Reset indices */
txq->ift_cidx_processed = 0;
txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
IFDI_TXQ_SETUP(ctx, txq->ift_id);
for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
bus_dmamap_sync(di->idi_tag, di->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*********************************************************************
*
* Allocate DMA resources for RX buffers as well as memory for the RX
* mbuf map, direct RX cluster pointer map and RX cluster bus address
* map. RX DMA map, RX mbuf map, direct RX cluster pointer map and
* RX cluster map are kept in a iflib_sw_rx_desc_array structure.
* Since we use use one entry in iflib_sw_rx_desc_array per received
* packet, the maximum number of entries we'll need is equal to the
* number of hardware receive descriptors that we've allocated.
*
**********************************************************************/
static int
iflib_rxsd_alloc(iflib_rxq_t rxq)
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
iflib_fl_t fl;
bus_addr_t lowaddr;
int err;
MPASS(scctx->isc_nrxd[0] > 0);
MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
fl = rxq->ifr_fl;
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
/* Set up DMA tag for RX buffers. */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sctx->isc_rx_maxsize, /* maxsize */
sctx->isc_rx_nsegments, /* nsegments */
sctx->isc_rx_maxsegsize, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&fl->ifl_buf_tag);
if (err) {
device_printf(dev,
"Unable to allocate RX DMA tag: %d\n", err);
goto fail;
}
/* Allocate memory for the RX mbuf map. */
if (!(fl->ifl_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev,
"Unable to allocate RX mbuf map memory\n");
err = ENOMEM;
goto fail;
}
/* Allocate memory for the direct RX cluster pointer map. */
if (!(fl->ifl_sds.ifsd_cl =
(caddr_t *) malloc(sizeof(caddr_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev,
"Unable to allocate RX cluster map memory\n");
err = ENOMEM;
goto fail;
}
/* Allocate memory for the RX cluster bus address map. */
if (!(fl->ifl_sds.ifsd_ba =
(bus_addr_t *) malloc(sizeof(bus_addr_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev,
"Unable to allocate RX bus address map memory\n");
err = ENOMEM;
goto fail;
}
/*
* Create the DMA maps for RX buffers.
*/
if (!(fl->ifl_sds.ifsd_map =
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev,
"Unable to allocate RX buffer DMA map memory\n");
err = ENOMEM;
goto fail;
}
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
err = bus_dmamap_create(fl->ifl_buf_tag, 0,
&fl->ifl_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create RX buffer DMA map\n");
goto fail;
}
}
}
return (0);
fail:
iflib_rx_structures_free(ctx);
return (err);
}
/*
* Internal service routines
*/
struct rxq_refill_cb_arg {
int error;
bus_dma_segment_t seg;
int nseg;
};
static void
_rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct rxq_refill_cb_arg *cb_arg = arg;
cb_arg->error = error;
cb_arg->seg = segs[0];
cb_arg->nseg = nseg;
}
/**
* iflib_fl_refill - refill an rxq free-buffer list
* @ctx: the iflib context
* @fl: the free list to refill
* @count: the number of new buffers to allocate
*
* (Re)populate an rxq free-buffer list with up to @count new packet buffers.
* The caller must assure that @count does not exceed the queue's capacity
* minus one (since we always leave a descriptor unavailable).
*/
static uint8_t
iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
{
struct if_rxd_update iru;
struct rxq_refill_cb_arg cb_arg;
struct mbuf *m;
caddr_t cl, *sd_cl;
struct mbuf **sd_m;
bus_dmamap_t *sd_map;
bus_addr_t bus_addr, *sd_ba;
int err, frag_idx, i, idx, n, pidx;
qidx_t credits;
MPASS(count <= fl->ifl_size - fl->ifl_credits - 1);
sd_m = fl->ifl_sds.ifsd_m;
sd_map = fl->ifl_sds.ifsd_map;
sd_cl = fl->ifl_sds.ifsd_cl;
sd_ba = fl->ifl_sds.ifsd_ba;
pidx = fl->ifl_pidx;
idx = pidx;
frag_idx = fl->ifl_fragidx;
credits = fl->ifl_credits;
i = 0;
n = count;
MPASS(n > 0);
MPASS(credits + n <= fl->ifl_size);
if (pidx < fl->ifl_cidx)
MPASS(pidx + n <= fl->ifl_cidx);
if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
MPASS(fl->ifl_gen == 0);
if (pidx > fl->ifl_cidx)
MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
DBG_COUNTER_INC(fl_refills);
if (n > 8)
DBG_COUNTER_INC(fl_refills_large);
iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
while (n-- > 0) {
/*
* We allocate an uninitialized mbuf + cluster, mbuf is
* initialized after rx.
*
* If the cluster is still set then we know a minimum sized
* packet was received
*/
bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
&frag_idx);
if (frag_idx < 0)
bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
MPASS(frag_idx >= 0);
if ((cl = sd_cl[frag_idx]) == NULL) {
cl = uma_zalloc(fl->ifl_zone, M_NOWAIT);
if (__predict_false(cl == NULL))
break;
cb_arg.error = 0;
MPASS(sd_map != NULL);
err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
BUS_DMA_NOWAIT);
if (__predict_false(err != 0 || cb_arg.error)) {
uma_zfree(fl->ifl_zone, cl);
break;
}
sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
sd_cl[frag_idx] = cl;
#if MEMORY_LOGGING
fl->ifl_cl_enqueued++;
#endif
} else {
bus_addr = sd_ba[frag_idx];
}
bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
BUS_DMASYNC_PREREAD);
if (sd_m[frag_idx] == NULL) {
m = m_gethdr_raw(M_NOWAIT, 0);
if (__predict_false(m == NULL))
break;
sd_m[frag_idx] = m;
}
bit_set(fl->ifl_rx_bitmap, frag_idx);
#if MEMORY_LOGGING
fl->ifl_m_enqueued++;
#endif
DBG_COUNTER_INC(rx_allocs);
fl->ifl_rxd_idxs[i] = frag_idx;
fl->ifl_bus_addrs[i] = bus_addr;
credits++;
i++;
MPASS(credits <= fl->ifl_size);
if (++idx == fl->ifl_size) {
#ifdef INVARIANTS
fl->ifl_gen = 1;
#endif
idx = 0;
}
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
iru.iru_pidx = pidx;
iru.iru_count = i;
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
fl->ifl_pidx = idx;
fl->ifl_credits = credits;
pidx = idx;
i = 0;
}
}
if (n < count - 1) {
if (i != 0) {
iru.iru_pidx = pidx;
iru.iru_count = i;
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
fl->ifl_pidx = idx;
fl->ifl_credits = credits;
}
DBG_COUNTER_INC(rxd_flush);
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id,
fl->ifl_id, fl->ifl_pidx);
if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) {
fl->ifl_fragidx = frag_idx + 1;
if (fl->ifl_fragidx == fl->ifl_size)
fl->ifl_fragidx = 0;
} else {
fl->ifl_fragidx = frag_idx;
}
}
return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY);
}
static inline uint8_t
iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl)
{
/*
* We leave an unused descriptor to avoid pidx to catch up with cidx.
* This is important as it confuses most NICs. For instance,
* Intel NICs have (per receive ring) RDH and RDT registers, where
* RDH points to the next receive descriptor to be used by the NIC,
* and RDT for the next receive descriptor to be published by the
* driver to the NIC (RDT - 1 is thus the last valid one).
* The condition RDH == RDT means no descriptors are available to
* the NIC, and thus it would be ambiguous if it also meant that
* all the descriptors are available to the NIC.
*/
int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
#ifdef INVARIANTS
int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
#endif
MPASS(fl->ifl_credits <= fl->ifl_size);
MPASS(reclaimable == delta);
if (reclaimable > 0)
return (iflib_fl_refill(ctx, fl, reclaimable));
return (0);
}
uint8_t
iflib_in_detach(if_ctx_t ctx)
{
bool in_detach;
STATE_LOCK(ctx);
in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
STATE_UNLOCK(ctx);
return (in_detach);
}
static void
iflib_fl_bufs_free(iflib_fl_t fl)
{
iflib_dma_info_t idi = fl->ifl_ifdi;
bus_dmamap_t sd_map;
uint32_t i;
for (i = 0; i < fl->ifl_size; i++) {
struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
if (*sd_cl != NULL) {
sd_map = fl->ifl_sds.ifsd_map[i];
bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
uma_zfree(fl->ifl_zone, *sd_cl);
*sd_cl = NULL;
if (*sd_m != NULL) {
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
m_free_raw(*sd_m);
*sd_m = NULL;
}
} else {
MPASS(*sd_m == NULL);
}
#if MEMORY_LOGGING
fl->ifl_m_dequeued++;
fl->ifl_cl_dequeued++;
#endif
}
#ifdef INVARIANTS
for (i = 0; i < fl->ifl_size; i++) {
MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
}
#endif
/*
* Reset free list values
*/
fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
bzero(idi->idi_vaddr, idi->idi_size);
}
/*********************************************************************
*
* Initialize a free list and its buffers.
*
**********************************************************************/
static int
iflib_fl_setup(iflib_fl_t fl)
{
iflib_rxq_t rxq = fl->ifl_rxq;
if_ctx_t ctx = rxq->ifr_ctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int qidx;
bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
/*
** Free current RX buffer structs and their mbufs
*/
iflib_fl_bufs_free(fl);
/* Now replenish the mbufs */
MPASS(fl->ifl_credits == 0);
qidx = rxq->ifr_fl_offset + fl->ifl_id;
if (scctx->isc_rxd_buf_size[qidx] != 0)
fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx];
else
fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
/*
* ifl_buf_size may be a driver-supplied value, so pull it up
* to the selected mbuf size.
*/
fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size);
if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
/*
* Avoid pre-allocating zillions of clusters to an idle card
* potentially speeding up attach. In any case make sure
* to leave a descriptor unavailable. See the comment in
* iflib_fl_refill_all().
*/
MPASS(fl->ifl_size > 0);
(void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1));
if (min(128, fl->ifl_size - 1) != fl->ifl_credits)
return (ENOBUFS);
/*
* handle failure
*/
MPASS(rxq != NULL);
MPASS(fl->ifl_ifdi != NULL);
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
/*********************************************************************
*
* Free receive ring data structures
*
**********************************************************************/
static void
iflib_rx_sds_free(iflib_rxq_t rxq)
{
iflib_fl_t fl;
int i, j;
if (rxq->ifr_fl != NULL) {
for (i = 0; i < rxq->ifr_nfl; i++) {
fl = &rxq->ifr_fl[i];
if (fl->ifl_buf_tag != NULL) {
if (fl->ifl_sds.ifsd_map != NULL) {
for (j = 0; j < fl->ifl_size; j++) {
bus_dmamap_sync(
fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[j],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(
fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[j]);
bus_dmamap_destroy(
fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[j]);
}
}
bus_dma_tag_destroy(fl->ifl_buf_tag);
fl->ifl_buf_tag = NULL;
}
free(fl->ifl_sds.ifsd_m, M_IFLIB);
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
free(fl->ifl_sds.ifsd_ba, M_IFLIB);
free(fl->ifl_sds.ifsd_map, M_IFLIB);
free(fl->ifl_rx_bitmap, M_IFLIB);
fl->ifl_sds.ifsd_m = NULL;
fl->ifl_sds.ifsd_cl = NULL;
fl->ifl_sds.ifsd_ba = NULL;
fl->ifl_sds.ifsd_map = NULL;
fl->ifl_rx_bitmap = NULL;
}
free(rxq->ifr_fl, M_IFLIB);
rxq->ifr_fl = NULL;
free(rxq->ifr_ifdi, M_IFLIB);
rxq->ifr_ifdi = NULL;
rxq->ifr_cq_cidx = 0;
}
}
/*
* Timer routine
*/
static void
iflib_timer(void *arg)
{
iflib_txq_t txq = arg;
if_ctx_t ctx = txq->ift_ctx;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
uint64_t this_tick = ticks;
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
return;
/*
** Check on the state of the TX queue(s), this
** can be done without the lock because its RO
** and the HUNG state will be static if set.
*/
if (this_tick - txq->ift_last_timer_tick >= iflib_timer_default) {
txq->ift_last_timer_tick = this_tick;
IFDI_TIMER(ctx, txq->ift_id);
if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
((txq->ift_cleaned_prev == txq->ift_cleaned) ||
(sctx->isc_pause_frames == 0)))
goto hung;
if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
ifmp_ring_is_stalled(txq->ift_br)) {
KASSERT(ctx->ifc_link_state == LINK_STATE_UP,
("queue can't be marked as hung if interface is down"));
txq->ift_qstatus = IFLIB_QUEUE_HUNG;
}
txq->ift_cleaned_prev = txq->ift_cleaned;
}
/* handle any laggards */
if (txq->ift_db_pending)
GROUPTASK_ENQUEUE(&txq->ift_task);
sctx->isc_pause_frames = 0;
if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer,
txq, txq->ift_timer.c_cpu);
return;
hung:
device_printf(ctx->ifc_dev,
"Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n",
txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
STATE_LOCK(ctx);
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
iflib_admin_intr_deferred(ctx);
STATE_UNLOCK(ctx);
}
static uint16_t
iflib_get_mbuf_size_for(unsigned int size)
{
if (size <= MCLBYTES)
return (MCLBYTES);
else
return (MJUMPAGESIZE);
}
static void
iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
{
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
/*
* XXX don't set the max_frame_size to larger
* than the hardware can handle
*/
ctx->ifc_rx_mbuf_sz =
iflib_get_mbuf_size_for(sctx->isc_max_frame_size);
}
uint32_t
iflib_get_rx_mbuf_sz(if_ctx_t ctx)
{
return (ctx->ifc_rx_mbuf_sz);
}
static void
iflib_init_locked(if_ctx_t ctx)
{
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_t ifp = ctx->ifc_ifp;
iflib_fl_t fl;
iflib_txq_t txq;
iflib_rxq_t rxq;
int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
IFDI_INTR_DISABLE(ctx);
/*
* See iflib_stop(). Useful in case iflib_init_locked() is
* called without first calling iflib_stop().
*/
netmap_disable_all_rings(ifp);
tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
/* Set hardware offload abilities */
if_clearhwassist(ifp);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TSO6)
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
#ifdef DEV_NETMAP
callout_stop(&txq->ift_netmap_timer);
#endif /* DEV_NETMAP */
CALLOUT_UNLOCK(txq);
(void)iflib_netmap_txq_init(ctx, txq);
}
/*
* Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
* that drivers can use the value when setting up the hardware receive
* buffers.
*/
iflib_calc_rx_mbuf_sz(ctx);
#ifdef INVARIANTS
i = if_getdrvflags(ifp);
#endif
IFDI_INIT(ctx);
MPASS(if_getdrvflags(ifp) == i);
for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
if (iflib_netmap_rxq_init(ctx, rxq) > 0) {
/* This rxq is in netmap mode. Skip normal init. */
continue;
}
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
if (iflib_fl_setup(fl)) {
device_printf(ctx->ifc_dev,
"setting up free list %d failed - "
"check cluster settings\n", j);
goto done;
}
}
}
done:
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
IFDI_INTR_ENABLE(ctx);
txq = ctx->ifc_txqs;
for (i = 0; i < scctx->isc_ntxqsets; i++, txq++)
callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
txq->ift_timer.c_cpu);
/* Re-enable txsync/rxsync. */
netmap_enable_all_rings(ifp);
}
static int
iflib_media_change(if_t ifp)
{
if_ctx_t ctx = if_getsoftc(ifp);
int err;
CTX_LOCK(ctx);
if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
return (err);
}
static void
iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
{
if_ctx_t ctx = if_getsoftc(ifp);
CTX_LOCK(ctx);
IFDI_UPDATE_ADMIN_STATUS(ctx);
IFDI_MEDIA_STATUS(ctx, ifmr);
CTX_UNLOCK(ctx);
}
void
iflib_stop(if_ctx_t ctx)
{
iflib_txq_t txq = ctx->ifc_txqs;
iflib_rxq_t rxq = ctx->ifc_rxqs;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
iflib_dma_info_t di;
iflib_fl_t fl;
int i, j;
/* Tell the stack that the interface is no longer active */
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
IFDI_INTR_DISABLE(ctx);
DELAY(1000);
IFDI_STOP(ctx);
DELAY(1000);
/*
* Stop any pending txsync/rxsync and prevent new ones
* form starting. Processes blocked in poll() will get
* POLLERR.
*/
netmap_disable_all_rings(ctx->ifc_ifp);
iflib_debug_reset();
/* Wait for current tx queue users to exit to disarm watchdog timer. */
for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
/* make sure all transmitters have completed before proceeding XXX */
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
#ifdef DEV_NETMAP
callout_stop(&txq->ift_netmap_timer);
#endif /* DEV_NETMAP */
CALLOUT_UNLOCK(txq);
/* clean any enqueued buffers */
iflib_ifmp_purge(txq);
/* Free any existing tx buffers. */
for (j = 0; j < txq->ift_size; j++) {
iflib_txsd_free(ctx, txq, j);
}
txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
txq->ift_in_use = txq->ift_gen = txq->ift_no_desc_avail = 0;
if (sctx->isc_flags & IFLIB_PRESERVE_TX_INDICES)
txq->ift_cidx = txq->ift_pidx;
else
txq->ift_cidx = txq->ift_pidx = 0;
txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
txq->ift_pullups = 0;
ifmp_ring_reset_stats(txq->ift_br);
for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
}
for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
if (rxq->ifr_task.gt_taskqueue != NULL)
gtaskqueue_drain(rxq->ifr_task.gt_taskqueue,
&rxq->ifr_task.gt_task);
rxq->ifr_cq_cidx = 0;
for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
/* also resets the free lists pidx/cidx */
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
iflib_fl_bufs_free(fl);
}
}
static inline caddr_t
calc_next_rxd(iflib_fl_t fl, int cidx)
{
qidx_t size;
int nrxd;
caddr_t start, end, cur, next;
nrxd = fl->ifl_size;
size = fl->ifl_rxd_size;
start = fl->ifl_ifdi->idi_vaddr;
if (__predict_false(size == 0))
return (start);
cur = start + size*cidx;
end = start + size*nrxd;
next = CACHE_PTR_NEXT(cur);
return (next < end ? next : start);
}
static inline void
prefetch_pkts(iflib_fl_t fl, int cidx)
{
int nextptr;
int nrxd = fl->ifl_size;
caddr_t next_rxd;
nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
next_rxd = calc_next_rxd(fl, cidx);
prefetch(next_rxd);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
}
static struct mbuf *
rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd,
int *pf_rv, if_rxd_info_t ri)
{
bus_dmamap_t map;
iflib_fl_t fl;
caddr_t payload;
struct mbuf *m;
int flid, cidx, len, next;
map = NULL;
flid = irf->irf_flid;
cidx = irf->irf_idx;
fl = &rxq->ifr_fl[flid];
sd->ifsd_fl = fl;
sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
fl->ifl_credits--;
#if MEMORY_LOGGING
fl->ifl_m_dequeued++;
#endif
if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
prefetch_pkts(fl, cidx);
next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
prefetch(&fl->ifl_sds.ifsd_map[next]);
map = fl->ifl_sds.ifsd_map[cidx];
bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL &&
irf->irf_len != 0) {
payload = *sd->ifsd_cl;
payload += ri->iri_pad;
len = ri->iri_len - ri->iri_pad;
*pf_rv = pfil_mem_in(rxq->pfil, payload, len, ri->iri_ifp, &m);
switch (*pf_rv) {
case PFIL_DROPPED:
case PFIL_CONSUMED:
/*
* The filter ate it. Everything is recycled.
*/
m = NULL;
unload = 0;
break;
case PFIL_REALLOCED:
/*
* The filter copied it. Everything is recycled.
* 'm' points at new mbuf.
*/
unload = 0;
break;
case PFIL_PASS:
/*
* Filter said it was OK, so receive like
* normal
*/
m = fl->ifl_sds.ifsd_m[cidx];
fl->ifl_sds.ifsd_m[cidx] = NULL;
break;
default:
MPASS(0);
}
} else {
m = fl->ifl_sds.ifsd_m[cidx];
fl->ifl_sds.ifsd_m[cidx] = NULL;
if (pf_rv != NULL)
*pf_rv = PFIL_PASS;
}
if (unload && irf->irf_len != 0)
bus_dmamap_unload(fl->ifl_buf_tag, map);
fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
if (__predict_false(fl->ifl_cidx == 0))
fl->ifl_gen = 0;
bit_clear(fl->ifl_rx_bitmap, cidx);
return (m);
}
static struct mbuf *
assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv)
{
struct mbuf *m, *mh, *mt;
caddr_t cl;
int *pf_rv_ptr, flags, i, padlen;
bool consumed;
i = 0;
mh = NULL;
consumed = false;
*pf_rv = PFIL_PASS;
pf_rv_ptr = pf_rv;
do {
m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
pf_rv_ptr, ri);
MPASS(*sd->ifsd_cl != NULL);
/*
* Exclude zero-length frags & frags from
* packets the filter has consumed or dropped
*/
if (ri->iri_frags[i].irf_len == 0 || consumed ||
*pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) {
if (mh == NULL) {
/* everything saved here */
consumed = true;
pf_rv_ptr = NULL;
continue;
}
/* XXX we can save the cluster here, but not the mbuf */
m_init(m, M_NOWAIT, MT_DATA, 0);
m_free(m);
continue;
}
if (mh == NULL) {
flags = M_PKTHDR|M_EXT;
mh = mt = m;
padlen = ri->iri_pad;
} else {
flags = M_EXT;
mt->m_next = m;
mt = m;
/* assuming padding is only on the first fragment */
padlen = 0;
}
cl = *sd->ifsd_cl;
*sd->ifsd_cl = NULL;
/* Can these two be made one ? */
m_init(m, M_NOWAIT, MT_DATA, flags);
m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
/*
* These must follow m_init and m_cljset
*/
m->m_data += padlen;
ri->iri_len -= padlen;
m->m_len = ri->iri_frags[i].irf_len;
} while (++i < ri->iri_nfrags);
return (mh);
}
/*
* Process one software descriptor
*/
static struct mbuf *
iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
{
struct if_rxsd sd;
struct mbuf *m;
int pf_rv;
/* should I merge this back in now that the two paths are basically duplicated? */
if (ri->iri_nfrags == 1 &&
ri->iri_frags[0].irf_len != 0 &&
ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
&pf_rv, ri);
if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
return (m);
if (pf_rv == PFIL_PASS) {
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
#ifndef __NO_STRICT_ALIGNMENT
if (!IP_ALIGNED(m) && ri->iri_pad == 0)
m->m_data += 2;
#endif
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
m->m_len = ri->iri_frags[0].irf_len;
m->m_data += ri->iri_pad;
ri->iri_len -= ri->iri_pad;
}
} else {
m = assemble_segments(rxq, ri, &sd, &pf_rv);
if (m == NULL)
return (NULL);
if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
return (m);
}
m->m_pkthdr.len = ri->iri_len;
m->m_pkthdr.rcvif = ri->iri_ifp;
m->m_flags |= ri->iri_flags;
m->m_pkthdr.ether_vtag = ri->iri_vtag;
m->m_pkthdr.flowid = ri->iri_flowid;
M_HASHTYPE_SET(m, ri->iri_rsstype);
m->m_pkthdr.csum_flags = ri->iri_csum_flags;
m->m_pkthdr.csum_data = ri->iri_csum_data;
return (m);
}
#if defined(INET6) || defined(INET)
static void
iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
{
CURVNET_SET(if_getvnet(lc->ifp));
#if defined(INET6)
*v6 = V_ip6_forwarding;
#endif
#if defined(INET)
*v4 = V_ipforwarding;
#endif
CURVNET_RESTORE();
}
/*
* Returns true if it's possible this packet could be LROed.
* if it returns false, it is guaranteed that tcp_lro_rx()
* would not return zero.
*/
static bool
iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
{
struct ether_header *eh;
eh = mtod(m, struct ether_header *);
switch (eh->ether_type) {
#if defined(INET6)
case htons(ETHERTYPE_IPV6):
return (!v6_forwarding);
#endif
#if defined (INET)
case htons(ETHERTYPE_IP):
return (!v4_forwarding);
#endif
}
return false;
}
#else
static void
iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
{
}
#endif
static void
_task_fn_rx_watchdog(void *context)
{
iflib_rxq_t rxq = context;
GROUPTASK_ENQUEUE(&rxq->ifr_task);
}
static uint8_t
iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
{
if_t ifp;
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int avail, i;
qidx_t *cidxp;
struct if_rxd_info ri;
int err, budget_left, rx_bytes, rx_pkts;
iflib_fl_t fl;
int lro_enabled;
bool v4_forwarding, v6_forwarding, lro_possible;
uint8_t retval = 0;
/*
* XXX early demux data packets so that if_input processing only handles
* acks in interrupt context
*/
struct mbuf *m, *mh, *mt, *mf;
NET_EPOCH_ASSERT();
lro_possible = v4_forwarding = v6_forwarding = false;
ifp = ctx->ifc_ifp;
mh = mt = NULL;
MPASS(budget > 0);
rx_pkts = rx_bytes = 0;
if (sctx->isc_flags & IFLIB_HAS_RXCQ)
cidxp = &rxq->ifr_cq_cidx;
else
cidxp = &rxq->ifr_fl[0].ifl_cidx;
if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
retval |= iflib_fl_refill_all(ctx, fl);
DBG_COUNTER_INC(rx_unavail);
return (retval);
}
/* pfil needs the vnet to be set */
CURVNET_SET_QUIET(if_getvnet(ifp));
for (budget_left = budget; budget_left > 0 && avail > 0;) {
if (__predict_false(!CTX_ACTIVE(ctx))) {
DBG_COUNTER_INC(rx_ctx_inactive);
break;
}
/*
* Reset client set fields to their default values
*/
rxd_info_zero(&ri);
ri.iri_qsidx = rxq->ifr_id;
ri.iri_cidx = *cidxp;
ri.iri_ifp = ifp;
ri.iri_frags = rxq->ifr_frags;
err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
if (err)
goto err;
rx_pkts += 1;
rx_bytes += ri.iri_len;
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
*cidxp = ri.iri_cidx;
/* Update our consumer index */
/* XXX NB: shurd - check if this is still safe */
while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0])
rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
/* was this only a completion queue message? */
if (__predict_false(ri.iri_nfrags == 0))
continue;
}
MPASS(ri.iri_nfrags != 0);
MPASS(ri.iri_len != 0);
/* will advance the cidx on the corresponding free lists */
m = iflib_rxd_pkt_get(rxq, &ri);
avail--;
budget_left--;
if (avail == 0 && budget_left)
avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
if (__predict_false(m == NULL))
continue;
/* imm_pkt: -- cxgb */
if (mh == NULL)
mh = mt = m;
else {
mt->m_nextpkt = m;
mt = m;
}
}
CURVNET_RESTORE();
/* make sure that we can refill faster than drain */
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
retval |= iflib_fl_refill_all(ctx, fl);
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
if (lro_enabled)
iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
mt = mf = NULL;
while (mh != NULL) {
m = mh;
mh = mh->m_nextpkt;
m->m_nextpkt = NULL;
#ifndef __NO_STRICT_ALIGNMENT
if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
continue;
#endif
#if defined(INET6) || defined(INET)
if (lro_enabled) {
if (!lro_possible) {
lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
if (lro_possible && mf != NULL) {
if_input(ifp, mf);
DBG_COUNTER_INC(rx_if_input);
mt = mf = NULL;
}
}
if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
(CSUM_L4_CALC|CSUM_L4_VALID)) {
if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
continue;
}
}
#endif
if (lro_possible) {
if_input(ifp, m);
DBG_COUNTER_INC(rx_if_input);
continue;
}
if (mf == NULL)
mf = m;
if (mt != NULL)
mt->m_nextpkt = m;
mt = m;
}
if (mf != NULL) {
if_input(ifp, mf);
DBG_COUNTER_INC(rx_if_input);
}
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
/*
* Flush any outstanding LRO work
*/
#if defined(INET6) || defined(INET)
tcp_lro_flush_all(&rxq->ifr_lc);
#endif
if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0)
retval |= IFLIB_RXEOF_MORE;
return (retval);
err:
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_DO_RESET;
iflib_admin_intr_deferred(ctx);
STATE_UNLOCK(ctx);
return (0);
}
#define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
static inline qidx_t
txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
{
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
qidx_t minthresh = txq->ift_size / 8;
if (in_use > 4*minthresh)
return (notify_count);
if (in_use > 2*minthresh)
return (notify_count >> 1);
if (in_use > minthresh)
return (notify_count >> 3);
return (0);
}
static inline qidx_t
txq_max_rs_deferred(iflib_txq_t txq)
{
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
qidx_t minthresh = txq->ift_size / 8;
if (txq->ift_in_use > 4*minthresh)
return (notify_count);
if (txq->ift_in_use > 2*minthresh)
return (notify_count >> 1);
if (txq->ift_in_use > minthresh)
return (notify_count >> 2);
return (2);
}
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
#define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
#define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
#define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
/* forward compatibility for cxgb */
#define FIRST_QSET(ctx) 0
#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
/* XXX we should be setting this to something other than zero */
#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
#define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
(ctx)->ifc_softc_ctx.isc_tx_nsegments)
static inline bool
iflib_txd_db_check(iflib_txq_t txq, int ring)
{
if_ctx_t ctx = txq->ift_ctx;
qidx_t dbval, max;
max = TXQ_MAX_DB_DEFERRED(txq, txq->ift_in_use);
/* force || threshold exceeded || at the edge of the ring */
if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) {
/*
* 'npending' is used if the card's doorbell is in terms of the number of descriptors
* pending flush (BRCM). 'pidx' is used in cases where the card's doorbeel uses the
* producer index explicitly (INTC).
*/
dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
/*
* Absent bugs there are zero packets pending so reset pending counts to zero.
*/
txq->ift_db_pending = txq->ift_npending = 0;
return (true);
}
return (false);
}
#ifdef PKT_DEBUG
static void
print_pkt(if_pkt_info_t pi)
{
printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
}
#endif
#define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
#define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
#define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
#define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
/**
* Parses out ethernet header information in the given mbuf.
* Returns in pi: ipi_etype (EtherType) and ipi_ehdrlen (Ethernet header length)
*
* This will account for the VLAN header if present.
*
* XXX: This doesn't handle QinQ, which could prevent TX offloads for those
* types of packets.
*/
static int
iflib_parse_ether_header(if_pkt_info_t pi, struct mbuf **mp, uint64_t *pullups)
{
struct ether_vlan_header *eh;
struct mbuf *m;
m = *mp;
if (__predict_false(m->m_len < sizeof(*eh))) {
(*pullups)++;
if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
return (ENOMEM);
}
eh = mtod(m, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
pi->ipi_etype = ntohs(eh->evl_proto);
pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
} else {
pi->ipi_etype = ntohs(eh->evl_encap_proto);
pi->ipi_ehdrlen = ETHER_HDR_LEN;
}
*mp = m;
return (0);
}
/**
* Parse up to the L3 header and extract IPv4/IPv6 header information into pi.
* Currently this information includes: IP ToS value, IP header version/presence
*
* This is missing some checks and doesn't edit the packet content as it goes,
* unlike iflib_parse_header(), in order to keep the amount of code here minimal.
*/
static int
iflib_parse_header_partial(if_pkt_info_t pi, struct mbuf **mp, uint64_t *pullups)
{
struct mbuf *m;
int err;
*pullups = 0;
m = *mp;
if (!M_WRITABLE(m)) {
if ((m = m_dup(m, M_NOWAIT)) == NULL) {
return (ENOMEM);
} else {
m_freem(*mp);
DBG_COUNTER_INC(tx_frees);
*mp = m;
}
}
/* Fills out pi->ipi_etype */
err = iflib_parse_ether_header(pi, mp, pullups);
if (err)
return (err);
m = *mp;
switch (pi->ipi_etype) {
#ifdef INET
case ETHERTYPE_IP:
{
struct mbuf *n;
struct ip *ip = NULL;
int miniplen;
miniplen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip));
if (__predict_false(m->m_len < miniplen)) {
/*
* Check for common case where the first mbuf only contains
* the Ethernet header
*/
if (m->m_len == pi->ipi_ehdrlen) {
n = m->m_next;
MPASS(n);
/* If next mbuf contains at least the minimal IP header, then stop */
if (n->m_len >= sizeof(*ip)) {
ip = (struct ip *)n->m_data;
} else {
(*pullups)++;
if (__predict_false((m = m_pullup(m, miniplen)) == NULL))
return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
}
} else {
(*pullups)++;
if (__predict_false((m = m_pullup(m, miniplen)) == NULL))
return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
}
} else {
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
}
/* Have the IPv4 header w/ no options here */
pi->ipi_ip_hlen = ip->ip_hl << 2;
pi->ipi_ipproto = ip->ip_p;
pi->ipi_ip_tos = ip->ip_tos;
pi->ipi_flags |= IPI_TX_IPV4;
break;
}
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
{
struct ip6_hdr *ip6;
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
(*pullups)++;
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
return (ENOMEM);
}
ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
/* Have the IPv6 fixed header here */
pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
pi->ipi_ipproto = ip6->ip6_nxt;
pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
pi->ipi_flags |= IPI_TX_IPV6;
break;
}
#endif
default:
pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
pi->ipi_ip_hlen = 0;
break;
}
*mp = m;
return (0);
}
static int
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
{
if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
struct mbuf *m;
int err;
m = *mp;
if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
M_WRITABLE(m) == 0) {
if ((m = m_dup(m, M_NOWAIT)) == NULL) {
return (ENOMEM);
} else {
m_freem(*mp);
DBG_COUNTER_INC(tx_frees);
*mp = m;
}
}
/* Fills out pi->ipi_etype */
err = iflib_parse_ether_header(pi, mp, &txq->ift_pullups);
if (__predict_false(err))
return (err);
m = *mp;
switch (pi->ipi_etype) {
#ifdef INET
case ETHERTYPE_IP:
{
struct mbuf *n;
struct ip *ip = NULL;
struct tcphdr *th = NULL;
int minthlen;
minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
if (__predict_false(m->m_len < minthlen)) {
/*
* if this code bloat is causing too much of a hit
* move it to a separate function and mark it noinline
*/
if (m->m_len == pi->ipi_ehdrlen) {
n = m->m_next;
MPASS(n);
if (n->m_len >= sizeof(*ip)) {
ip = (struct ip *)n->m_data;
if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
} else {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
}
} else {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
return (ENOMEM);
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
}
} else {
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
}
pi->ipi_ip_hlen = ip->ip_hl << 2;
pi->ipi_ipproto = ip->ip_p;
pi->ipi_ip_tos = ip->ip_tos;
pi->ipi_flags |= IPI_TX_IPV4;
/* TCP checksum offload may require TCP header length */
if (IS_TX_OFFLOAD4(pi)) {
if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
if (__predict_false(th == NULL)) {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
return (ENOMEM);
th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
}
pi->ipi_tcp_hflags = th->th_flags;
pi->ipi_tcp_hlen = th->th_off << 2;
pi->ipi_tcp_seq = th->th_seq;
}
if (IS_TSO4(pi)) {
if (__predict_false(ip->ip_p != IPPROTO_TCP))
return (ENXIO);
/*
* TSO always requires hardware checksum offload.
*/
pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
th->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
ip->ip_sum = 0;
ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
}
}
}
if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
ip->ip_sum = 0;
break;
}
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
{
struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
struct tcphdr *th;
pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
return (ENOMEM);
}
th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
/* XXX-BZ this will go badly in case of ext hdrs. */
pi->ipi_ipproto = ip6->ip6_nxt;
pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
pi->ipi_flags |= IPI_TX_IPV6;
/* TCP checksum offload may require TCP header length */
if (IS_TX_OFFLOAD6(pi)) {
if (pi->ipi_ipproto == IPPROTO_TCP) {
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
txq->ift_pullups++;
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
return (ENOMEM);
}
pi->ipi_tcp_hflags = th->th_flags;
pi->ipi_tcp_hlen = th->th_off << 2;
pi->ipi_tcp_seq = th->th_seq;
}
if (IS_TSO6(pi)) {
if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
return (ENXIO);
/*
* TSO always requires hardware checksum offload.
*/
pi->ipi_csum_flags |= CSUM_IP6_TCP;
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
}
}
break;
}
#endif
default:
pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
pi->ipi_ip_hlen = 0;
break;
}
*mp = m;
return (0);
}
/*
* If dodgy hardware rejects the scatter gather chain we've handed it
* we'll need to remove the mbuf chain from ifsg_m[] before we can add the
* m_defrag'd mbufs
*/
static __noinline struct mbuf *
iflib_remove_mbuf(iflib_txq_t txq)
{
int ntxd, pidx;
struct mbuf *m, **ifsd_m;
ifsd_m = txq->ift_sds.ifsd_m;
ntxd = txq->ift_size;
pidx = txq->ift_pidx & (ntxd - 1);
ifsd_m = txq->ift_sds.ifsd_m;
m = ifsd_m[pidx];
ifsd_m[pidx] = NULL;
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
if (txq->ift_sds.ifsd_tso_map != NULL)
bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[pidx]);
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
return (m);
}
static inline caddr_t
calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
{
qidx_t size;
int ntxd;
caddr_t start, end, cur, next;
ntxd = txq->ift_size;
size = txq->ift_txd_size[qid];
start = txq->ift_ifdi[qid].idi_vaddr;
if (__predict_false(size == 0))
return (start);
cur = start + size*cidx;
end = start + size*ntxd;
next = CACHE_PTR_NEXT(cur);
return (next < end ? next : start);
}
/*
* Pad an mbuf to ensure a minimum ethernet frame size.
* min_frame_size is the frame size (less CRC) to pad the mbuf to
*/
static __noinline int
iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
{
/*
* 18 is enough bytes to pad an ARP packet to 46 bytes, and
* and ARP message is the smallest common payload I can think of
*/
static char pad[18]; /* just zeros */
int n;
struct mbuf *new_head;
if (!M_WRITABLE(*m_head)) {
new_head = m_dup(*m_head, M_NOWAIT);
if (new_head == NULL) {
m_freem(*m_head);
device_printf(dev, "cannot pad short frame, m_dup() failed");
DBG_COUNTER_INC(encap_pad_mbuf_fail);
DBG_COUNTER_INC(tx_frees);
return ENOMEM;
}
m_freem(*m_head);
*m_head = new_head;
}
for (n = min_frame_size - (*m_head)->m_pkthdr.len;
n > 0; n -= sizeof(pad))
if (!m_append(*m_head, min(n, sizeof(pad)), pad))
break;
if (n > 0) {
m_freem(*m_head);
device_printf(dev, "cannot pad short frame\n");
DBG_COUNTER_INC(encap_pad_mbuf_fail);
DBG_COUNTER_INC(tx_frees);
return (ENOBUFS);
}
return 0;
}
static int
iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
{
if_ctx_t ctx;
if_shared_ctx_t sctx;
if_softc_ctx_t scctx;
bus_dma_tag_t buf_tag;
bus_dma_segment_t *segs;
struct mbuf *m_head, **ifsd_m;
void *next_txd;
bus_dmamap_t map;
struct if_pkt_info pi;
int remap = 0;
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
ctx = txq->ift_ctx;
sctx = ctx->ifc_sctx;
scctx = &ctx->ifc_softc_ctx;
segs = txq->ift_segs;
ntxd = txq->ift_size;
m_head = *m_headp;
map = NULL;
/*
* If we're doing TSO the next descriptor to clean may be quite far ahead
*/
cidx = txq->ift_cidx;
pidx = txq->ift_pidx;
if (ctx->ifc_flags & IFC_PREFETCH) {
next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
next_txd = calc_next_txd(txq, cidx, 0);
prefetch(next_txd);
}
/* prefetch the next cache line of mbuf pointers and flags */
prefetch(&txq->ift_sds.ifsd_m[next]);
prefetch(&txq->ift_sds.ifsd_map[next]);
next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
}
map = txq->ift_sds.ifsd_map[pidx];
ifsd_m = txq->ift_sds.ifsd_m;
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
buf_tag = txq->ift_tso_buf_tag;
max_segs = scctx->isc_tx_tso_segments_max;
map = txq->ift_sds.ifsd_tso_map[pidx];
MPASS(buf_tag != NULL);
MPASS(max_segs > 0);
} else {
buf_tag = txq->ift_buf_tag;
max_segs = scctx->isc_tx_nsegments;
map = txq->ift_sds.ifsd_map[pidx];
}
if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
__predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
if (err) {
DBG_COUNTER_INC(encap_txd_encap_fail);
return err;
}
}
m_head = *m_headp;
pkt_info_zero(&pi);
pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
pi.ipi_pidx = pidx;
pi.ipi_qsidx = txq->ift_id;
pi.ipi_len = m_head->m_pkthdr.len;
pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0;
/* deliberate bitwise OR to make one condition */
if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
DBG_COUNTER_INC(encap_txd_encap_fail);
return (err);
}
m_head = *m_headp;
}
retry:
err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
BUS_DMA_NOWAIT);
defrag:
if (__predict_false(err)) {
switch (err) {
case EFBIG:
/* try collapse once and defrag once */
if (remap == 0) {
m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
/* try defrag if collapsing fails */
if (m_head == NULL)
remap++;
}
if (remap == 1) {
txq->ift_mbuf_defrag++;
m_head = m_defrag(*m_headp, M_NOWAIT);
}
/*
* remap should never be >1 unless bus_dmamap_load_mbuf_sg
* failed to map an mbuf that was run through m_defrag
*/
MPASS(remap <= 1);
if (__predict_false(m_head == NULL || remap > 1))
goto defrag_failed;
remap++;
*m_headp = m_head;
goto retry;
break;
case ENOMEM:
txq->ift_no_tx_dma_setup++;
break;
default:
txq->ift_no_tx_dma_setup++;
m_freem(*m_headp);
DBG_COUNTER_INC(tx_frees);
*m_headp = NULL;
break;
}
txq->ift_map_failed++;
DBG_COUNTER_INC(encap_load_mbuf_fail);
DBG_COUNTER_INC(encap_txd_encap_fail);
return (err);
}
ifsd_m[pidx] = m_head;
/*
* XXX assumes a 1 to 1 relationship between segments and
* descriptors - this does not hold true on all drivers, e.g.
* cxgb
*/
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
txq->ift_no_desc_avail++;
bus_dmamap_unload(buf_tag, map);
DBG_COUNTER_INC(encap_txq_avail_fail);
DBG_COUNTER_INC(encap_txd_encap_fail);
if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
GROUPTASK_ENQUEUE(&txq->ift_task);
return (ENOBUFS);
}
/*
* On Intel cards we can greatly reduce the number of TX interrupts
* we see by only setting report status on every Nth descriptor.
* However, this also means that the driver will need to keep track
* of the descriptors that RS was set on to check them for the DD bit.
*/
txq->ift_rs_pending += nsegs + 1;
if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
pi.ipi_flags |= IPI_TX_INTR;
txq->ift_rs_pending = 0;
}
pi.ipi_segs = segs;
pi.ipi_nsegs = nsegs;
MPASS(pidx >= 0 && pidx < txq->ift_size);
#ifdef PKT_DEBUG
print_pkt(&pi);
#endif
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
DBG_COUNTER_INC(tx_encap);
MPASS(pi.ipi_new_pidx < txq->ift_size);
ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
if (pi.ipi_new_pidx < pi.ipi_pidx) {
ndesc += txq->ift_size;
txq->ift_gen = 1;
}
/*
* drivers can need as many as
* two sentinels
*/
MPASS(ndesc <= pi.ipi_nsegs + 2);
MPASS(pi.ipi_new_pidx != pidx);
MPASS(ndesc > 0);
txq->ift_in_use += ndesc;
txq->ift_db_pending += ndesc;
/*
* We update the last software descriptor again here because there may
* be a sentinel and/or there may be more mbufs than segments
*/
txq->ift_pidx = pi.ipi_new_pidx;
txq->ift_npending += pi.ipi_ndescs;
} else {
*m_headp = m_head = iflib_remove_mbuf(txq);
if (err == EFBIG) {
txq->ift_txd_encap_efbig++;
if (remap < 2) {
remap = 1;
goto defrag;
}
}
goto defrag_failed;
}
/*
* err can't possibly be non-zero here, so we don't neet to test it
* to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
*/
return (err);
defrag_failed:
txq->ift_mbuf_defrag_failed++;
txq->ift_map_failed++;
m_freem(*m_headp);
DBG_COUNTER_INC(tx_frees);
*m_headp = NULL;
DBG_COUNTER_INC(encap_txd_encap_fail);
return (ENOMEM);
}
static void
iflib_tx_desc_free(iflib_txq_t txq, int n)
{
uint32_t qsize, cidx, mask, gen;
struct mbuf *m, **ifsd_m;
bool do_prefetch;
cidx = txq->ift_cidx;
gen = txq->ift_gen;
qsize = txq->ift_size;
mask = qsize-1;
ifsd_m = txq->ift_sds.ifsd_m;
do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
while (n-- > 0) {
if (do_prefetch) {
prefetch(ifsd_m[(cidx + 3) & mask]);
prefetch(ifsd_m[(cidx + 4) & mask]);
}
if ((m = ifsd_m[cidx]) != NULL) {
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
bus_dmamap_sync(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[cidx],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[cidx]);
} else {
bus_dmamap_sync(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[cidx],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[cidx]);
}
/* XXX we don't support any drivers that batch packets yet */
MPASS(m->m_nextpkt == NULL);
m_freem(m);
ifsd_m[cidx] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
DBG_COUNTER_INC(tx_frees);
}
if (__predict_false(++cidx == qsize)) {
cidx = 0;
gen = 0;
}
}
txq->ift_cidx = cidx;
txq->ift_gen = gen;
}
static __inline int
iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
{
int reclaim;
if_ctx_t ctx = txq->ift_ctx;
KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
/*
* Need a rate-limiting check so that this isn't called every time
*/
iflib_tx_credits_update(ctx, txq);
reclaim = DESC_RECLAIMABLE(txq);
if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
#ifdef INVARIANTS
if (iflib_verbose_debug) {
printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
reclaim, thresh);
}
#endif
return (0);
}
iflib_tx_desc_free(txq, reclaim);
txq->ift_cleaned += reclaim;
txq->ift_in_use -= reclaim;
return (reclaim);
}
static struct mbuf **
_ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
{
int next, size;
struct mbuf **items;
size = r->size;
next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
items = __DEVOLATILE(struct mbuf **, &r->items[0]);
prefetch(items[(cidx + offset) & (size-1)]);
if (remaining > 1) {
prefetch2cachelines(&items[next]);
prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
}
return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
}
static void
iflib_txq_check_drain(iflib_txq_t txq, int budget)
{
ifmp_ring_check_drainage(txq->ift_br, budget);
}
static uint32_t
iflib_txq_can_drain(struct ifmp_ring *r)
{
iflib_txq_t txq = r->cookie;
if_ctx_t ctx = txq->ift_ctx;
if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
return (1);
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_POSTREAD);
return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
false));
}
static uint32_t
iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
{
iflib_txq_t txq = r->cookie;
if_ctx_t ctx = txq->ift_ctx;
if_t ifp = ctx->ifc_ifp;
struct mbuf *m, **mp;
int avail, bytes_sent, skipped, count, err, i;
int mcast_sent, pkt_sent, reclaimed;
bool do_prefetch, rang, ring;
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
!LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(txq_drain_notready);
return (0);
}
reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending);
avail = IDXDIFF(pidx, cidx, r->size);
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
/*
* The driver is unloading so we need to free all pending packets.
*/
DBG_COUNTER_INC(txq_drain_flushing);
for (i = 0; i < avail; i++) {
if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
m_freem(r->items[(cidx + i) & (r->size-1)]);
r->items[(cidx + i) & (r->size-1)] = NULL;
}
return (avail);
}
if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
DBG_COUNTER_INC(txq_drain_oactive);
return (0);
}
/*
* If we've reclaimed any packets this queue cannot be hung.
*/
if (reclaimed)
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
skipped = mcast_sent = bytes_sent = pkt_sent = 0;
count = MIN(avail, TX_BATCH_SIZE);
#ifdef INVARIANTS
if (iflib_verbose_debug)
printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
avail, ctx->ifc_flags, TXQ_AVAIL(txq));
#endif
do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
err = 0;
for (i = 0; i < count && TXQ_AVAIL(txq) >= MAX_TX_DESC(ctx) + 2; i++) {
int rem = do_prefetch ? count - i : 0;
mp = _ring_peek_one(r, cidx, i, rem);
MPASS(mp != NULL && *mp != NULL);
/*
* Completion interrupts will use the address of the txq
* as a sentinel to enqueue _something_ in order to acquire
* the lock on the mp_ring (there's no direct lock call).
* We obviously whave to check for these sentinel cases
* and skip them.
*/
if (__predict_false(*mp == (struct mbuf *)txq)) {
skipped++;
continue;
}
err = iflib_encap(txq, mp);
if (__predict_false(err)) {
/* no room - bail out */
if (err == ENOBUFS)
break;
skipped++;
/* we can't send this packet - skip it */
continue;
}
pkt_sent++;
m = *mp;
DBG_COUNTER_INC(tx_sent);
bytes_sent += m->m_pkthdr.len;
mcast_sent += !!(m->m_flags & M_MCAST);
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
break;
ETHER_BPF_MTAP(ifp, m);
rang = iflib_txd_db_check(txq, false);
}
/* deliberate use of bitwise or to avoid gratuitous short-circuit */
ring = rang ? false : (iflib_min_tx_latency | err);
iflib_txd_db_check(txq, ring);
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
if (mcast_sent)
if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
#ifdef INVARIANTS
if (iflib_verbose_debug)
printf("consumed=%d\n", skipped + pkt_sent);
#endif
return (skipped + pkt_sent);
}
static uint32_t
iflib_txq_drain_always(struct ifmp_ring *r)
{
return (1);
}
static uint32_t
iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
{
int i, avail;
struct mbuf **mp;
iflib_txq_t txq;
txq = r->cookie;
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
avail = IDXDIFF(pidx, cidx, r->size);
for (i = 0; i < avail; i++) {
mp = _ring_peek_one(r, cidx, i, avail - i);
if (__predict_false(*mp == (struct mbuf *)txq))
continue;
m_freem(*mp);
DBG_COUNTER_INC(tx_frees);
}
MPASS(ifmp_ring_is_stalled(r) == 0);
return (avail);
}
static void
iflib_ifmp_purge(iflib_txq_t txq)
{
struct ifmp_ring *r;
r = txq->ift_br;
r->drain = iflib_txq_drain_free;
r->can_drain = iflib_txq_drain_always;
ifmp_ring_check_drainage(r, r->size);
r->drain = iflib_txq_drain;
r->can_drain = iflib_txq_can_drain;
}
static void
_task_fn_tx(void *context)
{
iflib_txq_t txq = context;
if_ctx_t ctx = txq->ift_ctx;
if_t ifp = ctx->ifc_ifp;
int abdicate = ctx->ifc_sysctl_tx_abdicate;
#ifdef IFLIB_DIAGNOSTICS
txq->ift_cpu_exec_count[curcpu]++;
#endif
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return;
#ifdef DEV_NETMAP
if ((if_getcapenable(ifp) & IFCAP_NETMAP) &&
netmap_tx_irq(ifp, txq->ift_id))
goto skip_ifmp;
#endif
#ifdef ALTQ
if (if_altq_is_enabled(ifp))
iflib_altq_if_start(ifp);
#endif
if (txq->ift_db_pending)
ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
else if (!abdicate)
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
/*
* When abdicating, we always need to check drainage, not just when we don't enqueue
*/
if (abdicate)
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
#ifdef DEV_NETMAP
skip_ifmp:
#endif
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
else
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
}
static void
_task_fn_rx(void *context)
{
iflib_rxq_t rxq = context;
if_ctx_t ctx = rxq->ifr_ctx;
uint8_t more;
uint16_t budget;
#ifdef DEV_NETMAP
u_int work = 0;
int nmirq;
#endif
#ifdef IFLIB_DIAGNOSTICS
rxq->ifr_cpu_exec_count[curcpu]++;
#endif
DBG_COUNTER_INC(task_fn_rxs);
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
return;
#ifdef DEV_NETMAP
nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work);
if (nmirq != NM_IRQ_PASS) {
more = (nmirq == NM_IRQ_RESCHED) ? IFLIB_RXEOF_MORE : 0;
goto skip_rxeof;
}
#endif
budget = ctx->ifc_sysctl_rx_budget;
if (budget == 0)
budget = 16; /* XXX */
more = iflib_rxeof(rxq, budget);
#ifdef DEV_NETMAP
skip_rxeof:
#endif
if ((more & IFLIB_RXEOF_MORE) == 0) {
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
else
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
DBG_COUNTER_INC(rx_intr_enables);
}
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
return;
if (more & IFLIB_RXEOF_MORE)
GROUPTASK_ENQUEUE(&rxq->ifr_task);
else if (more & IFLIB_RXEOF_EMPTY)
callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq);
}
static void
_task_fn_admin(void *context)
{
if_ctx_t ctx = context;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
iflib_txq_t txq;
int i;
bool oactive, running, do_reset, do_watchdog, in_detach;
STATE_LOCK(ctx);
running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
do_reset = (ctx->ifc_flags & IFC_DO_RESET);
do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
STATE_UNLOCK(ctx);
if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
return;
if (in_detach)
return;
CTX_LOCK(ctx);
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
CALLOUT_LOCK(txq);
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
}
if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_ADMINCQ)
IFDI_ADMIN_COMPLETION_HANDLE(ctx);
if (do_watchdog) {
ctx->ifc_watchdog_events++;
IFDI_WATCHDOG_RESET(ctx);
}
IFDI_UPDATE_ADMIN_STATUS(ctx);
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
txq->ift_timer.c_cpu);
}
IFDI_LINK_INTR_ENABLE(ctx);
if (do_reset)
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
if (LINK_ACTIVE(ctx) == 0)
return;
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
}
static void
_task_fn_iov(void *context)
{
if_ctx_t ctx = context;
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
!(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
return;
CTX_LOCK(ctx);
IFDI_VFLR_HANDLE(ctx);
CTX_UNLOCK(ctx);
}
static int
iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
{
int err;
if_int_delay_info_t info;
if_ctx_t ctx;
info = (if_int_delay_info_t)arg1;
ctx = info->iidi_ctx;
info->iidi_req = req;
info->iidi_oidp = oidp;
CTX_LOCK(ctx);
err = IFDI_SYSCTL_INT_DELAY(ctx, info);
CTX_UNLOCK(ctx);
return (err);
}
/*********************************************************************
*
* IFNET FUNCTIONS
*
**********************************************************************/
static void
iflib_if_init_locked(if_ctx_t ctx)
{
iflib_stop(ctx);
iflib_init_locked(ctx);
}
static void
iflib_if_init(void *arg)
{
if_ctx_t ctx = arg;
CTX_LOCK(ctx);
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
}
static int
iflib_if_transmit(if_t ifp, struct mbuf *m)
{
if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq;
int err, qidx;
int abdicate;
if (__predict_false((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(tx_frees);
m_freem(m);
return (ENETDOWN);
}
MPASS(m->m_nextpkt == NULL);
/* ALTQ-enabled interfaces always use queue 0. */
qidx = 0;
/* Use driver-supplied queue selection method if it exists */
if (ctx->isc_txq_select_v2) {
struct if_pkt_info pi;
uint64_t early_pullups = 0;
pkt_info_zero(&pi);
err = iflib_parse_header_partial(&pi, &m, &early_pullups);
if (__predict_false(err != 0)) {
/* Assign pullups for bad pkts to default queue */
ctx->ifc_txqs[0].ift_pullups += early_pullups;
DBG_COUNTER_INC(encap_txd_encap_fail);
return (err);
}
/* Let driver make queueing decision */
qidx = ctx->isc_txq_select_v2(ctx->ifc_softc, m, &pi);
ctx->ifc_txqs[qidx].ift_pullups += early_pullups;
}
/* Backwards compatibility w/ simpler queue select */
else if (ctx->isc_txq_select)
qidx = ctx->isc_txq_select(ctx->ifc_softc, m);
/* If not, use iflib's standard method */
else if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !if_altq_is_enabled(ifp))
qidx = QIDX(ctx, m);
/* Set TX queue */
txq = &ctx->ifc_txqs[qidx];
#ifdef DRIVER_BACKPRESSURE
if (txq->ift_closed) {
while (m != NULL) {
next = m->m_nextpkt;
m->m_nextpkt = NULL;
m_freem(m);
DBG_COUNTER_INC(tx_frees);
m = next;
}
return (ENOBUFS);
}
#endif
#ifdef notyet
qidx = count = 0;
mp = marr;
next = m;
do {
count++;
next = next->m_nextpkt;
} while (next != NULL);
if (count > nitems(marr))
if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
/* XXX check nextpkt */
m_freem(m);
/* XXX simplify for now */
DBG_COUNTER_INC(tx_frees);
return (ENOBUFS);
}
for (next = m, i = 0; next != NULL; i++) {
mp[i] = next;
next = next->m_nextpkt;
mp[i]->m_nextpkt = NULL;
}
#endif
DBG_COUNTER_INC(tx_seen);
abdicate = ctx->ifc_sysctl_tx_abdicate;
err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
if (abdicate)
GROUPTASK_ENQUEUE(&txq->ift_task);
if (err) {
if (!abdicate)
GROUPTASK_ENQUEUE(&txq->ift_task);
/* support forthcoming later */
#ifdef DRIVER_BACKPRESSURE
txq->ift_closed = TRUE;
#endif
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
m_freem(m);
DBG_COUNTER_INC(tx_frees);
}
return (err);
}
#ifdef ALTQ
/*
* The overall approach to integrating iflib with ALTQ is to continue to use
* the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
* ring. Technically, when using ALTQ, queueing to an intermediate mp_ring
* is redundant/unnecessary, but doing so minimizes the amount of
* ALTQ-specific code required in iflib. It is assumed that the overhead of
* redundantly queueing to an intermediate mp_ring is swamped by the
* performance limitations inherent in using ALTQ.
*
* When ALTQ support is compiled in, all iflib drivers will use a transmit
* routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
* given interface. If ALTQ is enabled for an interface, then all
* transmitted packets for that interface will be submitted to the ALTQ
* subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit()
* implementation because it uses IFQ_HANDOFF(), which will duplicatively
* update stats that the iflib machinery handles, and which is sensitve to
* the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start()
* will be installed as the start routine for use by ALTQ facilities that
* need to trigger queue drains on a scheduled basis.
*
*/
static void
iflib_altq_if_start(if_t ifp)
{
struct ifaltq *ifq = &ifp->if_snd; /* XXX - DRVAPI */
struct mbuf *m;
IFQ_LOCK(ifq);
IFQ_DEQUEUE_NOLOCK(ifq, m);
while (m != NULL) {
iflib_if_transmit(ifp, m);
IFQ_DEQUEUE_NOLOCK(ifq, m);
}
IFQ_UNLOCK(ifq);
}
static int
iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
{
int err;
if (if_altq_is_enabled(ifp)) {
IFQ_ENQUEUE(&ifp->if_snd, m, err); /* XXX - DRVAPI */
if (err == 0)
iflib_altq_if_start(ifp);
} else
err = iflib_if_transmit(ifp, m);
return (err);
}
#endif /* ALTQ */
static void
iflib_if_qflush(if_t ifp)
{
if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq = ctx->ifc_txqs;
int i;
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_QFLUSH;
STATE_UNLOCK(ctx);
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
iflib_txq_check_drain(txq, 0);
STATE_LOCK(ctx);
ctx->ifc_flags &= ~IFC_QFLUSH;
STATE_UNLOCK(ctx);
/*
* When ALTQ is enabled, this will also take care of purging the
* ALTQ queue(s).
*/
if_qflush(ifp);
}
#define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_MEXTPG)
static int
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
{
if_ctx_t ctx = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *)data;
#if defined(INET) || defined(INET6)
struct ifaddr *ifa = (struct ifaddr *)data;
#endif
bool avoid_reset = false;
int err = 0, reinit = 0, bits;
switch (command) {
case SIOCSIFADDR:
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
avoid_reset = true;
#endif
#ifdef INET6
if (ifa->ifa_addr->sa_family == AF_INET6)
avoid_reset = true;
#endif
/*
** Calling init results in link renegotiation,
** so we avoid doing it when possible.
*/
if (avoid_reset) {
if_setflagbits(ifp, IFF_UP,0);
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
reinit = 1;
#ifdef INET
if (!(if_getflags(ifp) & IFF_NOARP))
arp_ifinit(ifp, ifa);
#endif
} else
err = ether_ioctl(ifp, command, data);
break;
case SIOCSIFMTU:
CTX_LOCK(ctx);
if (ifr->ifr_mtu == if_getmtu(ifp)) {
CTX_UNLOCK(ctx);
break;
}
bits = if_getdrvflags(ifp);
/* stop the driver and free any clusters before proceeding */
iflib_stop(ctx);
if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
STATE_LOCK(ctx);
if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
ctx->ifc_flags |= IFC_MULTISEG;
else
ctx->ifc_flags &= ~IFC_MULTISEG;
STATE_UNLOCK(ctx);
err = if_setmtu(ifp, ifr->ifr_mtu);
}
iflib_init_locked(ctx);
STATE_LOCK(ctx);
if_setdrvflags(ifp, bits);
STATE_UNLOCK(ctx);
CTX_UNLOCK(ctx);
break;
case SIOCSIFFLAGS:
CTX_LOCK(ctx);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
CTX_UNLOCK(ctx);
err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
CTX_LOCK(ctx);
}
} else
reinit = 1;
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
iflib_stop(ctx);
}
ctx->ifc_if_flags = if_getflags(ifp);
CTX_UNLOCK(ctx);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
CTX_LOCK(ctx);
IFDI_INTR_DISABLE(ctx);
IFDI_MULTI_SET(ctx);
IFDI_INTR_ENABLE(ctx);
CTX_UNLOCK(ctx);
}
break;
case SIOCSIFMEDIA:
CTX_LOCK(ctx);
IFDI_MEDIA_SET(ctx);
CTX_UNLOCK(ctx);
/* FALLTHROUGH */
case SIOCGIFMEDIA:
case SIOCGIFXMEDIA:
err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command);
break;
case SIOCGI2C:
{
struct ifi2creq i2c;
err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
if (err != 0)
break;
if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
err = EINVAL;
break;
}
if (i2c.len > sizeof(i2c.data)) {
err = EINVAL;
break;
}
if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
err = copyout(&i2c, ifr_data_get_ptr(ifr),
sizeof(i2c));
break;
}
case SIOCSIFCAP:
{
int mask, setmask, oldmask;
oldmask = if_getcapenable(ifp);
mask = ifr->ifr_reqcap ^ oldmask;
mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_MEXTPG;
setmask = 0;
#ifdef TCP_OFFLOAD
setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
#endif
setmask |= (mask & IFCAP_FLAGS);
setmask |= (mask & IFCAP_WOL);
/*
* If any RX csum has changed, change all the ones that
* are supported by the driver.
*/
if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
setmask |= ctx->ifc_softc_ctx.isc_capabilities &
(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
}
/*
* want to ensure that traffic has stopped before we change any of the flags
*/
if (setmask) {
CTX_LOCK(ctx);
bits = if_getdrvflags(ifp);
if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
iflib_stop(ctx);
STATE_LOCK(ctx);
if_togglecapenable(ifp, setmask);
ctx->ifc_softc_ctx.isc_capenable ^= setmask;
STATE_UNLOCK(ctx);
if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
iflib_init_locked(ctx);
STATE_LOCK(ctx);
if_setdrvflags(ifp, bits);
STATE_UNLOCK(ctx);
CTX_UNLOCK(ctx);
}
if_vlancap(ifp);
break;
}
case SIOCGPRIVATE_0:
case SIOCSDRVSPEC:
case SIOCGDRVSPEC:
CTX_LOCK(ctx);
err = IFDI_PRIV_IOCTL(ctx, command, data);
CTX_UNLOCK(ctx);
break;
default:
err = ether_ioctl(ifp, command, data);
break;
}
if (reinit)
iflib_if_init(ctx);
return (err);
}
static uint64_t
iflib_if_get_counter(if_t ifp, ift_counter cnt)
{
if_ctx_t ctx = if_getsoftc(ifp);
return (IFDI_GET_COUNTER(ctx, cnt));
}
/*********************************************************************
*
* OTHER FUNCTIONS EXPORTED TO THE STACK
*
**********************************************************************/
static void
iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
{
if_ctx_t ctx = if_getsoftc(ifp);
if ((void *)ctx != arg)
return;
if ((vtag == 0) || (vtag > 4095))
return;
if (iflib_in_detach(ctx))
return;
CTX_LOCK(ctx);
/* Driver may need all untagged packets to be flushed */
if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
iflib_stop(ctx);
IFDI_VLAN_REGISTER(ctx, vtag);
/* Re-init to load the changes, if required */
if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
iflib_init_locked(ctx);
CTX_UNLOCK(ctx);
}
static void
iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
{
if_ctx_t ctx = if_getsoftc(ifp);
if ((void *)ctx != arg)
return;
if ((vtag == 0) || (vtag > 4095))
return;
CTX_LOCK(ctx);
/* Driver may need all tagged packets to be flushed */
if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
iflib_stop(ctx);
IFDI_VLAN_UNREGISTER(ctx, vtag);
/* Re-init to load the changes, if required */
if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
iflib_init_locked(ctx);
CTX_UNLOCK(ctx);
}
static void
iflib_led_func(void *arg, int onoff)
{
if_ctx_t ctx = arg;
CTX_LOCK(ctx);
IFDI_LED_FUNC(ctx, onoff);
CTX_UNLOCK(ctx);
}
/*********************************************************************
*
* BUS FUNCTION DEFINITIONS
*
**********************************************************************/
int
iflib_device_probe(device_t dev)
{
const pci_vendor_info_t *ent;
if_shared_ctx_t sctx;
uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id;
uint16_t pci_vendor_id;
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
return (ENOTSUP);
pci_vendor_id = pci_get_vendor(dev);
pci_device_id = pci_get_device(dev);
pci_subvendor_id = pci_get_subvendor(dev);
pci_subdevice_id = pci_get_subdevice(dev);
pci_rev_id = pci_get_revid(dev);
if (sctx->isc_parse_devinfo != NULL)
sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
ent = sctx->isc_vendor_info;
while (ent->pvi_vendor_id != 0) {
if (pci_vendor_id != ent->pvi_vendor_id) {
ent++;
continue;
}
if ((pci_device_id == ent->pvi_device_id) &&
((pci_subvendor_id == ent->pvi_subvendor_id) ||
(ent->pvi_subvendor_id == 0)) &&
((pci_subdevice_id == ent->pvi_subdevice_id) ||
(ent->pvi_subdevice_id == 0)) &&
((pci_rev_id == ent->pvi_rev_id) ||
(ent->pvi_rev_id == 0))) {
device_set_desc_copy(dev, ent->pvi_name);
/* this needs to be changed to zero if the bus probing code
* ever stops re-probing on best match because the sctx
* may have its values over written by register calls
* in subsequent probes
*/
return (BUS_PROBE_DEFAULT);
}
ent++;
}
return (ENXIO);
}
int
iflib_device_probe_vendor(device_t dev)
{
int probe;
probe = iflib_device_probe(dev);
if (probe == BUS_PROBE_DEFAULT)
return (BUS_PROBE_VENDOR);
else
return (probe);
}
static void
iflib_reset_qvalues(if_ctx_t ctx)
{
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
device_t dev = ctx->ifc_dev;
int i;
if (ctx->ifc_sysctl_ntxqs != 0)
scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
if (ctx->ifc_sysctl_nrxqs != 0)
scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
for (i = 0; i < sctx->isc_ntxqs; i++) {
if (ctx->ifc_sysctl_ntxds[i] != 0)
scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
else
scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
}
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (ctx->ifc_sysctl_nrxds[i] != 0)
scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
else
scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
}
for (i = 0; i < sctx->isc_nrxqs; i++) {
if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
}
if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
}
if (!powerof2(scctx->isc_nrxd[i])) {
device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n",
i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]);
scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
}
}
for (i = 0; i < sctx->isc_ntxqs; i++) {
if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
}
if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
}
if (!powerof2(scctx->isc_ntxd[i])) {
device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n",
i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]);
scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
}
}
}
static void
iflib_add_pfil(if_ctx_t ctx)
{
struct pfil_head *pfil;
struct pfil_head_args pa;
iflib_rxq_t rxq;
int i;
pa.pa_version = PFIL_VERSION;
pa.pa_flags = PFIL_IN;
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = if_name(ctx->ifc_ifp);
pfil = pfil_head_register(&pa);
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
rxq->pfil = pfil;
}
}
static void
iflib_rem_pfil(if_ctx_t ctx)
{
struct pfil_head *pfil;
iflib_rxq_t rxq;
int i;
rxq = ctx->ifc_rxqs;
pfil = rxq->pfil;
for (i = 0; i < NRXQSETS(ctx); i++, rxq++) {
rxq->pfil = NULL;
}
pfil_head_unregister(pfil);
}
/*
* Advance forward by n members of the cpuset ctx->ifc_cpus starting from
* cpuid and wrapping as necessary.
*/
static unsigned int
cpuid_advance(if_ctx_t ctx, unsigned int cpuid, unsigned int n)
{
unsigned int first_valid;
unsigned int last_valid;
/* cpuid should always be in the valid set */
MPASS(CPU_ISSET(cpuid, &ctx->ifc_cpus));
/* valid set should never be empty */
MPASS(!CPU_EMPTY(&ctx->ifc_cpus));
first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
n = n % CPU_COUNT(&ctx->ifc_cpus);
while (n > 0) {
do {
cpuid++;
if (cpuid > last_valid)
cpuid = first_valid;
} while (!CPU_ISSET(cpuid, &ctx->ifc_cpus));
n--;
}
return (cpuid);
}
#if defined(SMP) && defined(SCHED_ULE)
extern struct cpu_group *cpu_top; /* CPU topology */
static int
find_child_with_core(int cpu, struct cpu_group *grp)
{
int i;
if (grp->cg_children == 0)
return -1;
MPASS(grp->cg_child);
for (i = 0; i < grp->cg_children; i++) {
if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
return i;
}
return -1;
}
/*
* Find an L2 neighbor of the given CPU or return -1 if none found. This
* does not distinguish among multiple L2 neighbors if the given CPU has
* more than one (it will always return the same result in that case).
*/
static int
find_l2_neighbor(int cpu)
{
struct cpu_group *grp;
int i;
grp = cpu_top;
if (grp == NULL)
return -1;
/*
* Find the smallest CPU group that contains the given core.
*/
i = 0;
while ((i = find_child_with_core(cpu, grp)) != -1) {
/*
* If the smallest group containing the given CPU has less
* than two members, we conclude the given CPU has no
* L2 neighbor.
*/
if (grp->cg_child[i].cg_count <= 1)
return (-1);
grp = &grp->cg_child[i];
}
/* Must share L2. */
if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
return -1;
/*
* Select the first member of the set that isn't the reference
* CPU, which at this point is guaranteed to exist.
*/
for (i = 0; i < CPU_SETSIZE; i++) {
if (CPU_ISSET(i, &grp->cg_mask) && i != cpu)
return (i);
}
/* Should never be reached */
return (-1);
}
#else
static int
find_l2_neighbor(int cpu)
{
return (-1);
}
#endif
/*
* CPU mapping behaviors
* ---------------------
* 'separate txrx' refers to the separate_txrx sysctl
* 'use logical' refers to the use_logical_cores sysctl
* 'INTR CPUS' indicates whether bus_get_cpus(INTR_CPUS) succeeded
*
* separate use INTR
* txrx logical CPUS result
* ---------- --------- ------ ------------------------------------------------
* - - X RX and TX queues mapped to consecutive physical
* cores with RX/TX pairs on same core and excess
* of either following
* - X X RX and TX queues mapped to consecutive cores
* of any type with RX/TX pairs on same core and
* excess of either following
* X - X RX and TX queues mapped to consecutive physical
* cores; all RX then all TX
* X X X RX queues mapped to consecutive physical cores
* first, then TX queues mapped to L2 neighbor of
* the corresponding RX queue if one exists,
* otherwise to consecutive physical cores
* - n/a - RX and TX queues mapped to consecutive cores of
* any type with RX/TX pairs on same core and excess
* of either following
* X n/a - RX and TX queues mapped to consecutive cores of
* any type; all RX then all TX
*/
static unsigned int
get_cpuid_for_queue(if_ctx_t ctx, unsigned int base_cpuid, unsigned int qid,
bool is_tx)
{
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
unsigned int core_index;
if (ctx->ifc_sysctl_separate_txrx) {
/*
* When using separate CPUs for TX and RX, the assignment
* will always be of a consecutive CPU out of the set of
* context CPUs, except for the specific case where the
* context CPUs are phsyical cores, the use of logical cores
* has been enabled, the assignment is for TX, the TX qid
* corresponds to an RX qid, and the CPU assigned to the
* corresponding RX queue has an L2 neighbor.
*/
if (ctx->ifc_sysctl_use_logical_cores &&
ctx->ifc_cpus_are_physical_cores &&
is_tx && qid < scctx->isc_nrxqsets) {
int l2_neighbor;
unsigned int rx_cpuid;
rx_cpuid = cpuid_advance(ctx, base_cpuid, qid);
l2_neighbor = find_l2_neighbor(rx_cpuid);
if (l2_neighbor != -1) {
return (l2_neighbor);
}
/*
* ... else fall through to the normal
* consecutive-after-RX assignment scheme.
*
* Note that we are assuming that all RX queue CPUs
* have an L2 neighbor, or all do not. If a mixed
* scenario is possible, we will have to keep track
* separately of how many queues prior to this one
* were not able to be assigned to an L2 neighbor.
*/
}
if (is_tx)
core_index = scctx->isc_nrxqsets + qid;
else
core_index = qid;
} else {
core_index = qid;
}
return (cpuid_advance(ctx, base_cpuid, core_index));
}
static uint16_t
get_ctx_core_offset(if_ctx_t ctx)
{
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
struct cpu_offset *op;
cpuset_t assigned_cpus;
unsigned int cores_consumed;
unsigned int base_cpuid = ctx->ifc_sysctl_core_offset;
unsigned int first_valid;
unsigned int last_valid;
unsigned int i;
first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
if (base_cpuid != CORE_OFFSET_UNSPECIFIED) {
/*
* Align the user-chosen base CPU ID to the next valid CPU
* for this device. If the chosen base CPU ID is smaller
* than the first valid CPU or larger than the last valid
* CPU, we assume the user does not know what the valid
* range is for this device and is thinking in terms of a
* zero-based reference frame, and so we shift the given
* value into the valid range (and wrap accordingly) so the
* intent is translated to the proper frame of reference.
* If the base CPU ID is within the valid first/last, but
* does not correspond to a valid CPU, it is advanced to the
* next valid CPU (wrapping if necessary).
*/
if (base_cpuid < first_valid || base_cpuid > last_valid) {
/* shift from zero-based to first_valid-based */
base_cpuid += first_valid;
/* wrap to range [first_valid, last_valid] */
base_cpuid = (base_cpuid - first_valid) %
(last_valid - first_valid + 1);
}
if (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus)) {
/*
* base_cpuid is in [first_valid, last_valid], but
* not a member of the valid set. In this case,
* there will always be a member of the valid set
* with a CPU ID that is greater than base_cpuid,
* and we simply advance to it.
*/
while (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus))
base_cpuid++;
}
return (base_cpuid);
}
/*
* Determine how many cores will be consumed by performing the CPU
* assignments and counting how many of the assigned CPUs correspond
* to CPUs in the set of context CPUs. This is done using the CPU
* ID first_valid as the base CPU ID, as the base CPU must be within
* the set of context CPUs.
*
* Note not all assigned CPUs will be in the set of context CPUs
* when separate CPUs are being allocated to TX and RX queues,
* assignment to logical cores has been enabled, the set of context
* CPUs contains only physical CPUs, and TX queues are mapped to L2
* neighbors of CPUs that RX queues have been mapped to - in this
* case we do only want to count how many CPUs in the set of context
* CPUs have been consumed, as that determines the next CPU in that
* set to start allocating at for the next device for which
* core_offset is not set.
*/
CPU_ZERO(&assigned_cpus);
for (i = 0; i < scctx->isc_ntxqsets; i++)
CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, true),
&assigned_cpus);
for (i = 0; i < scctx->isc_nrxqsets; i++)
CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false),
&assigned_cpus);
CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
cores_consumed = CPU_COUNT(&assigned_cpus);
mtx_lock(&cpu_offset_mtx);
SLIST_FOREACH(op, &cpu_offsets, entries) {
if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
base_cpuid = op->next_cpuid;
op->next_cpuid = cpuid_advance(ctx, op->next_cpuid,
cores_consumed);
MPASS(op->refcount < UINT_MAX);
op->refcount++;
break;
}
}
if (base_cpuid == CORE_OFFSET_UNSPECIFIED) {
base_cpuid = first_valid;
op = malloc(sizeof(struct cpu_offset), M_IFLIB,
M_NOWAIT | M_ZERO);
if (op == NULL) {
device_printf(ctx->ifc_dev,
"allocation for cpu offset failed.\n");
} else {
op->next_cpuid = cpuid_advance(ctx, base_cpuid,
cores_consumed);
op->refcount = 1;
CPU_COPY(&ctx->ifc_cpus, &op->set);
SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
}
}
mtx_unlock(&cpu_offset_mtx);
return (base_cpuid);
}
static void
unref_ctx_core_offset(if_ctx_t ctx)
{
struct cpu_offset *op, *top;
mtx_lock(&cpu_offset_mtx);
SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) {
if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
MPASS(op->refcount > 0);
op->refcount--;
if (op->refcount == 0) {
SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries);
free(op, M_IFLIB);
}
break;
}
}
mtx_unlock(&cpu_offset_mtx);
}
int
iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
{
if_ctx_t ctx;
if_t ifp;
if_softc_ctx_t scctx;
kobjop_desc_t kobj_desc;
kobj_method_t *kobj_method;
int err, msix, rid;
int num_txd, num_rxd;
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
if (sc == NULL) {
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
device_set_softc(dev, ctx);
ctx->ifc_flags |= IFC_SC_ALLOCATED;
}
ctx->ifc_sctx = sctx;
ctx->ifc_dev = dev;
ctx->ifc_softc = sc;
if ((err = iflib_register(ctx)) != 0) {
device_printf(dev, "iflib_register failed %d\n", err);
goto fail_ctx_free;
}
iflib_add_device_sysctl_pre(ctx);
scctx = &ctx->ifc_softc_ctx;
ifp = ctx->ifc_ifp;
iflib_reset_qvalues(ctx);
IFNET_WLOCK();
CTX_LOCK(ctx);
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
goto fail_unlock;
}
_iflib_pre_assert(scctx);
ctx->ifc_txrx = *scctx->isc_txrx;
MPASS(scctx->isc_dma_width <= flsll(BUS_SPACE_MAXADDR));
if (sctx->isc_flags & IFLIB_DRIVER_MEDIA)
ctx->ifc_mediap = scctx->isc_media;
#ifdef INVARIANTS
if (scctx->isc_capabilities & IFCAP_TXCSUM)
MPASS(scctx->isc_tx_csum_flags);
#endif
if_setcapabilities(ifp,
scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_MEXTPG);
if_setcapenable(ifp,
scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_MEXTPG);
if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
num_txd = iflib_num_tx_descs(ctx);
num_rxd = iflib_num_rx_descs(ctx);
/* XXX change for per-queue sizes */
device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
num_txd, num_rxd);
if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION)
scctx->isc_tx_nsegments = max(1, num_txd /
MAX_SINGLE_PACKET_FRACTION);
if (scctx->isc_tx_tso_segments_max > num_txd /
MAX_SINGLE_PACKET_FRACTION)
scctx->isc_tx_tso_segments_max = max(1,
num_txd / MAX_SINGLE_PACKET_FRACTION);
/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
if (if_getcapabilities(ifp) & IFCAP_TSO) {
/*
* The stack can't handle a TSO size larger than IP_MAXPACKET,
* but some MACs do.
*/
if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
IP_MAXPACKET));
/*
* Take maximum number of m_pullup(9)'s in iflib_parse_header()
* into account. In the worst case, each of these calls will
* add another mbuf and, thus, the requirement for another DMA
* segment. So for best performance, it doesn't make sense to
* advertize a maximum of TSO segments that typically will
* require defragmentation in iflib_encap().
*/
if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
}
if (scctx->isc_rss_table_size == 0)
scctx->isc_rss_table_size = 64;
scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
/* XXX format name */
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
NULL, NULL, "admin");
/* Set up cpu set. If it fails, use the set of all CPUs. */
if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
device_printf(dev, "Unable to fetch CPU list\n");
CPU_COPY(&all_cpus, &ctx->ifc_cpus);
ctx->ifc_cpus_are_physical_cores = false;
} else
ctx->ifc_cpus_are_physical_cores = true;
MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
/*
** Now set up MSI or MSI-X, should return us the number of supported
** vectors (will be 1 for a legacy interrupt and MSI).
*/
if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
msix = scctx->isc_vectors;
} else if (scctx->isc_msix_bar != 0)
/*
* The simple fact that isc_msix_bar is not 0 does not mean we
* we have a good value there that is known to work.
*/
msix = iflib_msix_init(ctx);
else {
scctx->isc_vectors = 1;
scctx->isc_ntxqsets = 1;
scctx->isc_nrxqsets = 1;
scctx->isc_intr = IFLIB_INTR_LEGACY;
msix = 0;
}
/* Get memory for the station queues */
if ((err = iflib_queues_alloc(ctx))) {
device_printf(dev, "Unable to allocate queue memory\n");
goto fail_intr_free;
}
if ((err = iflib_qset_structures_setup(ctx)))
goto fail_queues;
/*
* Now that we know how many queues there are, get the core offset.
*/
ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
if (msix > 1) {
/*
* When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable
* aren't the default NULL implementation.
*/
kobj_desc = &ifdi_rx_queue_intr_enable_desc;
kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
kobj_desc);
if (kobj_method == &kobj_desc->deflt) {
device_printf(dev,
"MSI-X requires ifdi_rx_queue_intr_enable method");
err = EOPNOTSUPP;
goto fail_queues;
}
kobj_desc = &ifdi_tx_queue_intr_enable_desc;
kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
kobj_desc);
if (kobj_method == &kobj_desc->deflt) {
device_printf(dev,
"MSI-X requires ifdi_tx_queue_intr_enable method");
err = EOPNOTSUPP;
goto fail_queues;
}
/*
* Assign the MSI-X vectors.
* Note that the default NULL ifdi_msix_intr_assign method will
* fail here, too.
*/
err = IFDI_MSIX_INTR_ASSIGN(ctx, msix);
if (err != 0) {
device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n",
err);
goto fail_queues;
}
} else if (scctx->isc_intr != IFLIB_INTR_MSIX) {
rid = 0;
if (scctx->isc_intr == IFLIB_INTR_MSI) {
MPASS(msix == 1);
rid = 1;
}
if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
device_printf(dev, "iflib_legacy_setup failed %d\n", err);
goto fail_queues;
}
} else {
device_printf(dev,
"Cannot use iflib with only 1 MSI-X interrupt!\n");
err = ENODEV;
goto fail_queues;
}
/*
* It prevents a double-locking panic with iflib_media_status when
* the driver loads.
*/
CTX_UNLOCK(ctx);
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
CTX_LOCK(ctx);
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
goto fail_detach;
}
/*
* Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
* This must appear after the call to ether_ifattach() because
* ether_ifattach() sets if_hdrlen to the default value.
*/
if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if ((err = iflib_netmap_attach(ctx))) {
device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
goto fail_detach;
}
*ctxp = ctx;
DEBUGNET_SET(ctx->ifc_ifp, iflib);
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
iflib_add_device_sysctl_post(ctx);
iflib_add_pfil(ctx);
ctx->ifc_flags |= IFC_INIT_DONE;
CTX_UNLOCK(ctx);
IFNET_WUNLOCK();
return (0);
fail_detach:
ether_ifdetach(ctx->ifc_ifp);
fail_queues:
iflib_tqg_detach(ctx);
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
IFDI_DETACH(ctx);
IFDI_QUEUES_FREE(ctx);
fail_intr_free:
iflib_free_intr_mem(ctx);
fail_unlock:
CTX_UNLOCK(ctx);
IFNET_WUNLOCK();
iflib_deregister(ctx);
fail_ctx_free:
device_set_softc(ctx->ifc_dev, NULL);
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
free(ctx->ifc_softc, M_IFLIB);
free(ctx, M_IFLIB);
return (err);
}
int
iflib_device_attach(device_t dev)
{
if_ctx_t ctx;
if_shared_ctx_t sctx;
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
return (ENOTSUP);
pci_enable_busmaster(dev);
return (iflib_device_register(dev, NULL, sctx, &ctx));
}
int
iflib_device_deregister(if_ctx_t ctx)
{
if_t ifp = ctx->ifc_ifp;
device_t dev = ctx->ifc_dev;
/* Make sure VLANS are not using driver */
if (if_vlantrunkinuse(ifp)) {
device_printf(dev, "Vlan in use, detach first\n");
return (EBUSY);
}
#ifdef PCI_IOV
if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
device_printf(dev, "SR-IOV in use; detach first.\n");
return (EBUSY);
}
#endif
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_IN_DETACH;
STATE_UNLOCK(ctx);
/* Unregister VLAN handlers before calling iflib_stop() */
iflib_unregister_vlan_handlers(ctx);
iflib_netmap_detach(ifp);
ether_ifdetach(ifp);
CTX_LOCK(ctx);
iflib_stop(ctx);
CTX_UNLOCK(ctx);
iflib_rem_pfil(ctx);
if (ctx->ifc_led_dev != NULL)
led_destroy(ctx->ifc_led_dev);
iflib_tqg_detach(ctx);
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
CTX_LOCK(ctx);
IFDI_DETACH(ctx);
IFDI_QUEUES_FREE(ctx);
CTX_UNLOCK(ctx);
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
iflib_free_intr_mem(ctx);
bus_generic_detach(dev);
iflib_deregister(ctx);
device_set_softc(ctx->ifc_dev, NULL);
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
free(ctx->ifc_softc, M_IFLIB);
unref_ctx_core_offset(ctx);
free(ctx, M_IFLIB);
return (0);
}
static void
iflib_tqg_detach(if_ctx_t ctx)
{
iflib_txq_t txq;
iflib_rxq_t rxq;
int i;
struct taskqgroup *tqg;
/* XXX drain any dependent tasks */
tqg = qgroup_if_io_tqg;
for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
callout_drain(&txq->ift_timer);
#ifdef DEV_NETMAP
callout_drain(&txq->ift_netmap_timer);
#endif /* DEV_NETMAP */
if (txq->ift_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &txq->ift_task);
}
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
if (rxq->ifr_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &rxq->ifr_task);
}
tqg = qgroup_if_config_tqg;
if (ctx->ifc_admin_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &ctx->ifc_admin_task);
if (ctx->ifc_vflr_task.gt_uniq != NULL)
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
}
static void
iflib_free_intr_mem(if_ctx_t ctx)
{
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
}
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
pci_release_msi(ctx->ifc_dev);
}
if (ctx->ifc_msix_mem != NULL) {
bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
}
}
int
iflib_device_detach(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
return (iflib_device_deregister(ctx));
}
int
iflib_device_suspend(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
IFDI_SUSPEND(ctx);
CTX_UNLOCK(ctx);
return bus_generic_suspend(dev);
}
int
iflib_device_shutdown(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
IFDI_SHUTDOWN(ctx);
CTX_UNLOCK(ctx);
return bus_generic_suspend(dev);
}
int
iflib_device_resume(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
iflib_txq_t txq = ctx->ifc_txqs;
CTX_LOCK(ctx);
IFDI_RESUME(ctx);
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
return (bus_generic_resume(dev));
}
int
iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
{
int error;
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
error = IFDI_IOV_INIT(ctx, num_vfs, params);
CTX_UNLOCK(ctx);
return (error);
}
void
iflib_device_iov_uninit(device_t dev)
{
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
IFDI_IOV_UNINIT(ctx);
CTX_UNLOCK(ctx);
}
int
iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
{
int error;
if_ctx_t ctx = device_get_softc(dev);
CTX_LOCK(ctx);
error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
CTX_UNLOCK(ctx);
return (error);
}
/*********************************************************************
*
* MODULE FUNCTION DEFINITIONS
*
**********************************************************************/
/*
* - Start a fast taskqueue thread for each core
* - Start a taskqueue for control operations
*/
static int
iflib_module_init(void)
{
iflib_timer_default = hz / 2;
return (0);
}
static int
iflib_module_event_handler(module_t mod, int what, void *arg)
{
int err;
switch (what) {
case MOD_LOAD:
if ((err = iflib_module_init()) != 0)
return (err);
break;
case MOD_UNLOAD:
return (EBUSY);
default:
return (EOPNOTSUPP);
}
return (0);
}
/*********************************************************************
*
* PUBLIC FUNCTION DEFINITIONS
* ordered as in iflib.h
*
**********************************************************************/
static void
_iflib_assert(if_shared_ctx_t sctx)
{
int i;
MPASS(sctx->isc_tx_maxsize);
MPASS(sctx->isc_tx_maxsegsize);
MPASS(sctx->isc_rx_maxsize);
MPASS(sctx->isc_rx_nsegments);
MPASS(sctx->isc_rx_maxsegsize);
MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8);
for (i = 0; i < sctx->isc_nrxqs; i++) {
MPASS(sctx->isc_nrxd_min[i]);
MPASS(powerof2(sctx->isc_nrxd_min[i]));
MPASS(sctx->isc_nrxd_max[i]);
MPASS(powerof2(sctx->isc_nrxd_max[i]));
MPASS(sctx->isc_nrxd_default[i]);
MPASS(powerof2(sctx->isc_nrxd_default[i]));
}
MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8);
for (i = 0; i < sctx->isc_ntxqs; i++) {
MPASS(sctx->isc_ntxd_min[i]);
MPASS(powerof2(sctx->isc_ntxd_min[i]));
MPASS(sctx->isc_ntxd_max[i]);
MPASS(powerof2(sctx->isc_ntxd_max[i]));
MPASS(sctx->isc_ntxd_default[i]);
MPASS(powerof2(sctx->isc_ntxd_default[i]));
}
}
static void
_iflib_pre_assert(if_softc_ctx_t scctx)
{
MPASS(scctx->isc_txrx->ift_txd_encap);
MPASS(scctx->isc_txrx->ift_txd_flush);
MPASS(scctx->isc_txrx->ift_txd_credits_update);
MPASS(scctx->isc_txrx->ift_rxd_available);
MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
MPASS(scctx->isc_txrx->ift_rxd_refill);
MPASS(scctx->isc_txrx->ift_rxd_flush);
}
static int
iflib_register(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
driver_t *driver = sctx->isc_driver;
device_t dev = ctx->ifc_dev;
if_t ifp;
_iflib_assert(sctx);
CTX_LOCK_INIT(ctx);
STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (ENOMEM);
- }
/*
* Initialize our context's device specific methods
*/
kobj_init((kobj_t) ctx, (kobj_class_t) driver);
kobj_class_compile((kobj_class_t) driver);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setsoftc(ifp, ctx);
if_setdev(ifp, dev);
if_setinitfn(ifp, iflib_if_init);
if_setioctlfn(ifp, iflib_if_ioctl);
#ifdef ALTQ
if_setstartfn(ifp, iflib_altq_if_start);
if_settransmitfn(ifp, iflib_altq_if_transmit);
if_setsendqready(ifp);
#else
if_settransmitfn(ifp, iflib_if_transmit);
#endif
if_setqflushfn(ifp, iflib_if_qflush);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
ctx->ifc_vlan_attach_event =
EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
EVENTHANDLER_PRI_FIRST);
ctx->ifc_vlan_detach_event =
EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
EVENTHANDLER_PRI_FIRST);
if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) {
ctx->ifc_mediap = &ctx->ifc_media;
ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
iflib_media_change, iflib_media_status);
}
return (0);
}
static void
iflib_unregister_vlan_handlers(if_ctx_t ctx)
{
/* Unregister VLAN events */
if (ctx->ifc_vlan_attach_event != NULL) {
EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
ctx->ifc_vlan_attach_event = NULL;
}
if (ctx->ifc_vlan_detach_event != NULL) {
EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
ctx->ifc_vlan_detach_event = NULL;
}
}
static void
iflib_deregister(if_ctx_t ctx)
{
if_t ifp = ctx->ifc_ifp;
/* Remove all media */
ifmedia_removeall(&ctx->ifc_media);
/* Ensure that VLAN event handlers are unregistered */
iflib_unregister_vlan_handlers(ctx);
/* Release kobject reference */
kobj_delete((kobj_t) ctx, NULL);
/* Free the ifnet structure */
if_free(ifp);
STATE_LOCK_DESTROY(ctx);
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
CTX_LOCK_DESTROY(ctx);
}
static int
iflib_queues_alloc(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
int nrxqsets = scctx->isc_nrxqsets;
int ntxqsets = scctx->isc_ntxqsets;
iflib_txq_t txq;
iflib_rxq_t rxq;
iflib_fl_t fl = NULL;
int i, j, cpu, err, txconf, rxconf;
iflib_dma_info_t ifdip;
uint32_t *rxqsizes = scctx->isc_rxqsizes;
uint32_t *txqsizes = scctx->isc_txqsizes;
uint8_t nrxqs = sctx->isc_nrxqs;
uint8_t ntxqs = sctx->isc_ntxqs;
int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
int fl_offset = (sctx->isc_flags & IFLIB_HAS_RXCQ ? 1 : 0);
caddr_t *vaddrs;
uint64_t *paddrs;
KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
KASSERT(nrxqs >= fl_offset + nfree_lists,
("there must be at least a rxq for each free list"));
/* Allocate the TX ring struct memory */
if (!(ctx->ifc_txqs =
(iflib_txq_t) malloc(sizeof(struct iflib_txq) *
ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate TX ring memory\n");
err = ENOMEM;
goto fail;
}
/* Now allocate the RX */
if (!(ctx->ifc_rxqs =
(iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate RX ring memory\n");
err = ENOMEM;
goto rx_fail;
}
txq = ctx->ifc_txqs;
rxq = ctx->ifc_rxqs;
/*
* XXX handle allocation failure
*/
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
/* Set up some basics */
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev,
"Unable to allocate TX DMA info memory\n");
err = ENOMEM;
goto err_tx_desc;
}
txq->ift_ifdi = ifdip;
for (j = 0; j < ntxqs; j++, ifdip++) {
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
device_printf(dev,
"Unable to allocate TX descriptors\n");
err = ENOMEM;
goto err_tx_desc;
}
txq->ift_txd_size[j] = scctx->isc_txd_size[j];
bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
}
txq->ift_ctx = ctx;
txq->ift_id = i;
if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
txq->ift_br_offset = 1;
} else {
txq->ift_br_offset = 0;
}
if (iflib_txsd_alloc(txq)) {
device_printf(dev, "Critical Failure setting up TX buffers\n");
err = ENOMEM;
goto err_tx_desc;
}
/* Initialize the TX lock */
snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout",
device_get_nameunit(dev), txq->ift_id);
mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
txq->ift_timer.c_cpu = cpu;
#ifdef DEV_NETMAP
callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0);
txq->ift_netmap_timer.c_cpu = cpu;
#endif /* DEV_NETMAP */
err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
iflib_txq_can_drain, M_IFLIB, M_WAITOK);
if (err) {
/* XXX free any allocated rings */
device_printf(dev, "Unable to allocate buf_ring\n");
goto err_tx_desc;
}
}
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
/* Set up some basics */
callout_init(&rxq->ifr_watchdog, 1);
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev,
"Unable to allocate RX DMA info memory\n");
err = ENOMEM;
goto err_tx_desc;
}
rxq->ifr_ifdi = ifdip;
/* XXX this needs to be changed if #rx queues != #tx queues */
rxq->ifr_ntxqirq = 1;
rxq->ifr_txqid[0] = i;
for (j = 0; j < nrxqs; j++, ifdip++) {
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
device_printf(dev,
"Unable to allocate RX descriptors\n");
err = ENOMEM;
goto err_tx_desc;
}
bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
}
rxq->ifr_ctx = ctx;
rxq->ifr_id = i;
rxq->ifr_fl_offset = fl_offset;
rxq->ifr_nfl = nfree_lists;
if (!(fl =
(iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate free list memory\n");
err = ENOMEM;
goto err_tx_desc;
}
rxq->ifr_fl = fl;
for (j = 0; j < nfree_lists; j++) {
fl[j].ifl_rxq = rxq;
fl[j].ifl_id = j;
fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
}
/* Allocate receive buffers for the ring */
if (iflib_rxsd_alloc(rxq)) {
device_printf(dev,
"Critical Failure setting up receive buffers\n");
err = ENOMEM;
goto err_rx_desc;
}
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
M_WAITOK);
}
/* TXQs */
vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
for (i = 0; i < ntxqsets; i++) {
iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
for (j = 0; j < ntxqs; j++, di++) {
vaddrs[i*ntxqs + j] = di->idi_vaddr;
paddrs[i*ntxqs + j] = di->idi_paddr;
}
}
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
device_printf(ctx->ifc_dev,
"Unable to allocate device TX queue\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
goto err_rx_desc;
}
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
/* RXQs */
vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
for (i = 0; i < nrxqsets; i++) {
iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
for (j = 0; j < nrxqs; j++, di++) {
vaddrs[i*nrxqs + j] = di->idi_vaddr;
paddrs[i*nrxqs + j] = di->idi_paddr;
}
}
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
device_printf(ctx->ifc_dev,
"Unable to allocate device RX queue\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
goto err_rx_desc;
}
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
return (0);
/* XXX handle allocation failure changes */
err_rx_desc:
err_tx_desc:
rx_fail:
if (ctx->ifc_rxqs != NULL)
free(ctx->ifc_rxqs, M_IFLIB);
ctx->ifc_rxqs = NULL;
if (ctx->ifc_txqs != NULL)
free(ctx->ifc_txqs, M_IFLIB);
ctx->ifc_txqs = NULL;
fail:
return (err);
}
static int
iflib_tx_structures_setup(if_ctx_t ctx)
{
iflib_txq_t txq = ctx->ifc_txqs;
int i;
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
iflib_txq_setup(txq);
return (0);
}
static void
iflib_tx_structures_free(if_ctx_t ctx)
{
iflib_txq_t txq = ctx->ifc_txqs;
if_shared_ctx_t sctx = ctx->ifc_sctx;
int i, j;
for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
for (j = 0; j < sctx->isc_ntxqs; j++)
iflib_dma_free(&txq->ift_ifdi[j]);
iflib_txq_destroy(txq);
}
free(ctx->ifc_txqs, M_IFLIB);
ctx->ifc_txqs = NULL;
}
/*********************************************************************
*
* Initialize all receive rings.
*
**********************************************************************/
static int
iflib_rx_structures_setup(if_ctx_t ctx)
{
iflib_rxq_t rxq = ctx->ifc_rxqs;
int q;
#if defined(INET6) || defined(INET)
int err, i;
#endif
for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
#if defined(INET6) || defined(INET)
err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
TCP_LRO_ENTRIES, min(1024,
ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]));
if (err != 0) {
device_printf(ctx->ifc_dev,
"LRO Initialization failed!\n");
goto fail;
}
#endif
IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
}
return (0);
#if defined(INET6) || defined(INET)
fail:
/*
* Free LRO resources allocated so far, we will only handle
* the rings that completed, the failing case will have
* cleaned up for itself. 'q' failed, so its the terminus.
*/
rxq = ctx->ifc_rxqs;
for (i = 0; i < q; ++i, rxq++) {
tcp_lro_free(&rxq->ifr_lc);
}
return (err);
#endif
}
/*********************************************************************
*
* Free all receive rings.
*
**********************************************************************/
static void
iflib_rx_structures_free(if_ctx_t ctx)
{
iflib_rxq_t rxq = ctx->ifc_rxqs;
if_shared_ctx_t sctx = ctx->ifc_sctx;
int i, j;
for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
for (j = 0; j < sctx->isc_nrxqs; j++)
iflib_dma_free(&rxq->ifr_ifdi[j]);
iflib_rx_sds_free(rxq);
#if defined(INET6) || defined(INET)
tcp_lro_free(&rxq->ifr_lc);
#endif
}
free(ctx->ifc_rxqs, M_IFLIB);
ctx->ifc_rxqs = NULL;
}
static int
iflib_qset_structures_setup(if_ctx_t ctx)
{
int err;
/*
* It is expected that the caller takes care of freeing queues if this
* fails.
*/
if ((err = iflib_tx_structures_setup(ctx)) != 0) {
device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
return (err);
}
if ((err = iflib_rx_structures_setup(ctx)) != 0)
device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
return (err);
}
int
iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
{
return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
}
/* Just to avoid copy/paste */
static inline int
iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
const char *name)
{
device_t dev;
unsigned int base_cpuid, cpuid;
int err;
dev = ctx->ifc_dev;
base_cpuid = ctx->ifc_sysctl_core_offset;
cpuid = get_cpuid_for_queue(ctx, base_cpuid, qid, type == IFLIB_INTR_TX);
err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev,
irq ? irq->ii_res : NULL, name);
if (err) {
device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
return (err);
}
#ifdef notyet
if (cpuid > ctx->ifc_cpuid_highest)
ctx->ifc_cpuid_highest = cpuid;
#endif
return (0);
}
/*
* Allocate a hardware interrupt for subctx using the parent (ctx)'s hardware
* resources.
*
* Similar to iflib_irq_alloc_generic(), but for interrupt type IFLIB_INTR_RXTX
* only.
*
* XXX: Could be removed if subctx's dev has its intr resource allocation
* methods replaced with custom ones?
*/
int
iflib_irq_alloc_generic_subctx(if_ctx_t ctx, if_ctx_t subctx, if_irq_t irq,
int rid, iflib_intr_type_t type,
driver_filter_t *filter, void *filter_arg,
int qid, const char *name)
{
device_t dev, subdev;
struct grouptask *gtask;
struct taskqgroup *tqg;
iflib_filter_info_t info;
gtask_fn_t *fn;
int tqrid, err;
driver_filter_t *intr_fast;
void *q;
MPASS(ctx != NULL);
MPASS(subctx != NULL);
tqrid = rid;
dev = ctx->ifc_dev;
subdev = subctx->ifc_dev;
switch (type) {
case IFLIB_INTR_RXTX:
q = &subctx->ifc_rxqs[qid];
info = &subctx->ifc_rxqs[qid].ifr_filter_info;
gtask = &subctx->ifc_rxqs[qid].ifr_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
intr_fast = iflib_fast_intr_rxtx;
NET_GROUPTASK_INIT(gtask, 0, fn, q);
break;
default:
device_printf(dev, "%s: unknown net intr type for subctx %s (%d)\n",
__func__, device_get_nameunit(subdev), type);
return (EINVAL);
}
info->ifi_filter = filter;
info->ifi_filter_arg = filter_arg;
info->ifi_task = gtask;
info->ifi_ctx = q;
NET_GROUPTASK_INIT(gtask, 0, fn, q);
/* Allocate interrupts from hardware using parent context */
err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
if (err != 0) {
device_printf(dev, "_iflib_irq_alloc failed for subctx %s: %d\n",
device_get_nameunit(subdev), err);
return (err);
}
if (tqrid != -1) {
err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q,
name);
if (err)
return (err);
} else {
taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
}
return (0);
}
int
iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
iflib_intr_type_t type, driver_filter_t *filter,
void *filter_arg, int qid, const char *name)
{
device_t dev;
struct grouptask *gtask;
struct taskqgroup *tqg;
iflib_filter_info_t info;
gtask_fn_t *fn;
int tqrid, err;
driver_filter_t *intr_fast;
void *q;
info = &ctx->ifc_filter_info;
tqrid = rid;
switch (type) {
/* XXX merge tx/rx for netmap? */
case IFLIB_INTR_TX:
q = &ctx->ifc_txqs[qid];
info = &ctx->ifc_txqs[qid].ift_filter_info;
gtask = &ctx->ifc_txqs[qid].ift_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_tx;
intr_fast = iflib_fast_intr;
GROUPTASK_INIT(gtask, 0, fn, q);
ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
break;
case IFLIB_INTR_RX:
q = &ctx->ifc_rxqs[qid];
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
gtask = &ctx->ifc_rxqs[qid].ifr_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
intr_fast = iflib_fast_intr;
NET_GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_RXTX:
q = &ctx->ifc_rxqs[qid];
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
gtask = &ctx->ifc_rxqs[qid].ifr_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
intr_fast = iflib_fast_intr_rxtx;
NET_GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_ADMIN:
q = ctx;
tqrid = -1;
info = &ctx->ifc_filter_info;
gtask = &ctx->ifc_admin_task;
tqg = qgroup_if_config_tqg;
fn = _task_fn_admin;
intr_fast = iflib_fast_intr_ctx;
break;
default:
device_printf(ctx->ifc_dev, "%s: unknown net intr type\n",
__func__);
return (EINVAL);
}
info->ifi_filter = filter;
info->ifi_filter_arg = filter_arg;
info->ifi_task = gtask;
info->ifi_ctx = q;
dev = ctx->ifc_dev;
err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
if (err != 0) {
device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
return (err);
}
if (type == IFLIB_INTR_ADMIN)
return (0);
if (tqrid != -1) {
err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q,
name);
if (err)
return (err);
} else {
taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
}
return (0);
}
void
iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
void *arg, int qid, const char *name)
{
device_t dev;
struct grouptask *gtask;
struct taskqgroup *tqg;
gtask_fn_t *fn;
void *q;
int err;
switch (type) {
case IFLIB_INTR_TX:
q = &ctx->ifc_txqs[qid];
gtask = &ctx->ifc_txqs[qid].ift_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_tx;
GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_RX:
q = &ctx->ifc_rxqs[qid];
gtask = &ctx->ifc_rxqs[qid].ifr_task;
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
NET_GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_IOV:
q = ctx;
gtask = &ctx->ifc_vflr_task;
tqg = qgroup_if_config_tqg;
fn = _task_fn_iov;
GROUPTASK_INIT(gtask, 0, fn, q);
break;
default:
panic("unknown net intr type");
}
err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name);
if (err) {
dev = ctx->ifc_dev;
taskqgroup_attach(tqg, gtask, q, dev, irq ? irq->ii_res : NULL,
name);
}
}
void
iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
{
if (irq->ii_tag)
bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
if (irq->ii_res)
bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
rman_get_rid(irq->ii_res), irq->ii_res);
}
static int
iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
{
iflib_txq_t txq = ctx->ifc_txqs;
iflib_rxq_t rxq = ctx->ifc_rxqs;
if_irq_t irq = &ctx->ifc_legacy_irq;
iflib_filter_info_t info;
device_t dev;
struct grouptask *gtask;
struct resource *res;
struct taskqgroup *tqg;
void *q;
int err, tqrid;
bool rx_only;
q = &ctx->ifc_rxqs[0];
info = &rxq[0].ifr_filter_info;
gtask = &rxq[0].ifr_task;
tqg = qgroup_if_io_tqg;
tqrid = *rid;
rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0;
ctx->ifc_flags |= IFC_LEGACY;
info->ifi_filter = filter;
info->ifi_filter_arg = filter_arg;
info->ifi_task = gtask;
info->ifi_ctx = rx_only ? ctx : q;
dev = ctx->ifc_dev;
/* We allocate a single interrupt resource */
err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx :
iflib_fast_intr_rxtx, NULL, info, name);
if (err != 0)
return (err);
NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, q);
res = irq->ii_res;
taskqgroup_attach(tqg, gtask, q, dev, res, name);
GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
"tx");
return (0);
}
void
iflib_led_create(if_ctx_t ctx)
{
ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
device_get_nameunit(ctx->ifc_dev));
}
void
iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
{
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
}
void
iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
{
GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
}
void
iflib_admin_intr_deferred(if_ctx_t ctx)
{
MPASS(ctx->ifc_admin_task.gt_taskqueue != NULL);
GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
}
void
iflib_iov_intr_deferred(if_ctx_t ctx)
{
GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
}
void
iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name)
{
taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
name);
}
void
iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
const char *name)
{
GROUPTASK_INIT(gtask, 0, fn, ctx);
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
name);
}
void
iflib_config_gtask_deinit(struct grouptask *gtask)
{
taskqgroup_detach(qgroup_if_config_tqg, gtask);
}
void
iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
{
if_t ifp = ctx->ifc_ifp;
iflib_txq_t txq = ctx->ifc_txqs;
if_setbaudrate(ifp, baudrate);
if (baudrate >= IF_Gbps(10)) {
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_PREFETCH;
STATE_UNLOCK(ctx);
}
/* If link down, disable watchdog */
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
}
ctx->ifc_link_state = link_state;
if_link_state_change(ifp, link_state);
}
static int
iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
{
int credits;
#ifdef INVARIANTS
int credits_pre = txq->ift_cidx_processed;
#endif
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_POSTREAD);
if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
return (0);
txq->ift_processed += credits;
txq->ift_cidx_processed += credits;
MPASS(credits_pre + credits == txq->ift_cidx_processed);
if (txq->ift_cidx_processed >= txq->ift_size)
txq->ift_cidx_processed -= txq->ift_size;
return (credits);
}
static int
iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
{
iflib_fl_t fl;
u_int i;
for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
budget));
}
void
iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
const char *description, if_int_delay_info_t info,
int offset, int value)
{
info->iidi_ctx = ctx;
info->iidi_offset = offset;
info->iidi_value = value;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
info, 0, iflib_sysctl_int_delay, "I", description);
}
struct sx *
iflib_ctx_lock_get(if_ctx_t ctx)
{
return (&ctx->ifc_ctx_sx);
}
static int
iflib_msix_init(if_ctx_t ctx)
{
device_t dev = ctx->ifc_dev;
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues;
int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors;
iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
if (bootverbose)
device_printf(dev, "msix_init qsets capped at %d\n",
imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
/* Override by tuneable */
if (scctx->isc_disable_msix)
goto msi;
/* First try MSI-X */
if ((msgs = pci_msix_count(dev)) == 0) {
if (bootverbose)
device_printf(dev, "MSI-X not supported or disabled\n");
goto msi;
}
bar = ctx->ifc_softc_ctx.isc_msix_bar;
/*
* bar == -1 => "trust me I know what I'm doing"
* Some drivers are for hardware that is so shoddily
* documented that no one knows which bars are which
* so the developer has to map all bars. This hack
* allows shoddy garbage to use MSI-X in this framework.
*/
if (bar != -1) {
ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &bar, RF_ACTIVE);
if (ctx->ifc_msix_mem == NULL) {
device_printf(dev, "Unable to map MSI-X table\n");
goto msi;
}
}
admincnt = sctx->isc_admin_intrcnt;
#if IFLIB_DEBUG
/* use only 1 qset in debug mode */
queuemsgs = min(msgs - admincnt, 1);
#else
queuemsgs = msgs - admincnt;
#endif
#ifdef RSS
queues = imin(queuemsgs, rss_getnumbuckets());
#else
queues = queuemsgs;
#endif
queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
if (bootverbose)
device_printf(dev,
"intr CPUs: %d queue msgs: %d admincnt: %d\n",
CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
rx_queues = iflib_num_rx_queues;
else
rx_queues = queues;
if (rx_queues > scctx->isc_nrxqsets)
rx_queues = scctx->isc_nrxqsets;
/*
* We want this to be all logical CPUs by default
*/
if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
tx_queues = iflib_num_tx_queues;
else
tx_queues = mp_ncpus;
if (tx_queues > scctx->isc_ntxqsets)
tx_queues = scctx->isc_ntxqsets;
if (ctx->ifc_sysctl_qs_eq_override == 0) {
#ifdef INVARIANTS
if (tx_queues != rx_queues)
device_printf(dev,
"queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
min(rx_queues, tx_queues), min(rx_queues, tx_queues));
#endif
tx_queues = min(rx_queues, tx_queues);
rx_queues = min(rx_queues, tx_queues);
}
vectors = rx_queues + admincnt;
if (msgs < vectors) {
device_printf(dev,
"insufficient number of MSI-X vectors "
"(supported %d, need %d)\n", msgs, vectors);
goto msi;
}
device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues,
tx_queues);
msgs = vectors;
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
if (vectors != msgs) {
device_printf(dev,
"Unable to allocate sufficient MSI-X vectors "
"(got %d, need %d)\n", vectors, msgs);
pci_release_msi(dev);
if (bar != -1) {
bus_release_resource(dev, SYS_RES_MEMORY, bar,
ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
}
goto msi;
}
device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
vectors);
scctx->isc_vectors = vectors;
scctx->isc_nrxqsets = rx_queues;
scctx->isc_ntxqsets = tx_queues;
scctx->isc_intr = IFLIB_INTR_MSIX;
return (vectors);
} else {
device_printf(dev,
"failed to allocate %d MSI-X vectors, err: %d\n", vectors,
err);
if (bar != -1) {
bus_release_resource(dev, SYS_RES_MEMORY, bar,
ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
}
}
msi:
vectors = pci_msi_count(dev);
scctx->isc_nrxqsets = 1;
scctx->isc_ntxqsets = 1;
scctx->isc_vectors = vectors;
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
device_printf(dev,"Using an MSI interrupt\n");
scctx->isc_intr = IFLIB_INTR_MSI;
} else {
scctx->isc_vectors = 1;
device_printf(dev,"Using a Legacy interrupt\n");
scctx->isc_intr = IFLIB_INTR_LEGACY;
}
return (vectors);
}
static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
static int
mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
{
int rc;
uint16_t *state = ((uint16_t *)oidp->oid_arg1);
struct sbuf *sb;
const char *ring_state = "UNKNOWN";
/* XXX needed ? */
rc = sysctl_wire_old_buffer(req, 0);
MPASS(rc == 0);
if (rc != 0)
return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
MPASS(sb != NULL);
if (sb == NULL)
return (ENOMEM);
if (state[3] <= 3)
ring_state = ring_states[state[3]];
sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
state[0], state[1], state[2], ring_state);
rc = sbuf_finish(sb);
sbuf_delete(sb);
return(rc);
}
enum iflib_ndesc_handler {
IFLIB_NTXD_HANDLER,
IFLIB_NRXD_HANDLER,
};
static int
mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
{
if_ctx_t ctx = (void *)arg1;
enum iflib_ndesc_handler type = arg2;
char buf[256] = {0};
qidx_t *ndesc;
char *p, *next;
int nqs, rc, i;
nqs = 8;
switch(type) {
case IFLIB_NTXD_HANDLER:
ndesc = ctx->ifc_sysctl_ntxds;
if (ctx->ifc_sctx)
nqs = ctx->ifc_sctx->isc_ntxqs;
break;
case IFLIB_NRXD_HANDLER:
ndesc = ctx->ifc_sysctl_nrxds;
if (ctx->ifc_sctx)
nqs = ctx->ifc_sctx->isc_nrxqs;
break;
default:
printf("%s: unhandled type\n", __func__);
return (EINVAL);
}
if (nqs == 0)
nqs = 8;
for (i=0; i<8; i++) {
if (i >= nqs)
break;
if (i)
strcat(buf, ",");
sprintf(strchr(buf, 0), "%d", ndesc[i]);
}
rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (rc || req->newptr == NULL)
return rc;
for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
i++, p = strsep(&next, " ,")) {
ndesc[i] = strtoul(p, NULL, 10);
}
return(rc);
}
#define NAME_BUFLEN 32
static void
iflib_add_device_sysctl_pre(if_ctx_t ctx)
{
device_t dev = iflib_get_dev(ctx);
struct sysctl_oid_list *child, *oid_list;
struct sysctl_ctx_list *ctx_list;
struct sysctl_oid *node;
ctx_list = device_get_sysctl_ctx(dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child,
OID_AUTO, "iflib", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"IFLIB fields");
oid_list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
"# of txqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
"# of rxqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
"permit #txq != #rxq");
SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
"disable MSI-X (default 0)");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the RX budget");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
"cause TX to abdicate instead of running to completion");
ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
"offset to start using cores at");
SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
"use separate cores for TX and RX");
SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "use_logical_cores",
CTLFLAG_RDTUN, &ctx->ifc_sysctl_use_logical_cores, 0,
"try to make use of logical cores for TX and RX");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "use_extra_msix_vectors",
CTLFLAG_RDTUN, &ctx->ifc_sysctl_extra_msix_vectors, 0,
"attempt to reserve the given number of extra MSI-X vectors during driver load for the creation of additional interfaces later");
SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "allocated_msix_vectors",
CTLFLAG_RDTUN, &ctx->ifc_softc_ctx.isc_vectors, 0,
"total # of MSI-X vectors allocated by driver");
/* XXX change for per-queue sizes */
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A",
"list of # of TX descriptors to use, 0 = use default #");
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A",
"list of # of RX descriptors to use, 0 = use default #");
}
static void
iflib_add_device_sysctl_post(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = iflib_get_dev(ctx);
struct sysctl_oid_list *child;
struct sysctl_ctx_list *ctx_list;
iflib_fl_t fl;
iflib_txq_t txq;
iflib_rxq_t rxq;
int i, j;
char namebuf[NAME_BUFLEN];
char *qfmt;
struct sysctl_oid *queue_node, *fl_node, *node;
struct sysctl_oid_list *queue_list, *fl_list;
ctx_list = device_get_sysctl_ctx(dev);
node = ctx->ifc_sysctl_node;
child = SYSCTL_CHILDREN(node);
if (scctx->isc_ntxqsets > 100)
qfmt = "txq%03d";
else if (scctx->isc_ntxqsets > 10)
qfmt = "txq%02d";
else
qfmt = "txq%d";
for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
CTLFLAG_RD, &txq->ift_task.gt_cpu, 0,
"cpu this queue is bound to");
#if MEMORY_LOGGING
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued");
#endif
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
CTLFLAG_RD, &txq->ift_mbuf_defrag,
"# of times m_defrag was called");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
CTLFLAG_RD, &txq->ift_pullups,
"# of times m_pullup was called");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
"mbuf_defrag_failed", CTLFLAG_RD,
&txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
"no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail,
"# of times no descriptors were available");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
"tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed,
"# of times DMA map failed");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
"txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig,
"# of times txd_encap returned EFBIG");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
"no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup,
"# of times map failed for other than EFBIG");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO,
"txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed,
1, "Consumer Index seen by credit update");
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
"txq_processed", CTLFLAG_RD, &txq->ift_processed,
"descriptors procesed for clean");
SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
CTLFLAG_RD, &txq->ift_cleaned, "total cleaned");
SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
__DEVOLATILE(uint64_t *, &txq->ift_br->state), 0,
mp_ring_state_handler, "A", "soft ring state");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
"r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues,
"# of enqueues to the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
"r_drops", CTLFLAG_RD, &txq->ift_br->drops,
"# of drops in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
"r_starts", CTLFLAG_RD, &txq->ift_br->starts,
"# of normal consumer starts in mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
"r_stalls", CTLFLAG_RD, &txq->ift_br->stalls,
"# of consumer stalls in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
"r_restarts", CTLFLAG_RD, &txq->ift_br->restarts,
"# of consumer restarts in the mp_ring for this queue");
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
"r_abdications", CTLFLAG_RD, &txq->ift_br->abdications,
"# of consumer abdications in the mp_ring for this queue");
}
if (scctx->isc_nrxqsets > 100)
qfmt = "rxq%03d";
else if (scctx->isc_nrxqsets > 10)
qfmt = "rxq%02d";
else
qfmt = "rxq%d";
for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
CTLFLAG_RD, &rxq->ifr_task.gt_cpu, 0,
"cpu this queue is bound to");
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO,
"rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1,
"Consumer Index");
}
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list,
OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE,
NULL, "freelist Name");
fl_list = SYSCTL_CHILDREN(fl_node);
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index");
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index");
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
CTLFLAG_RD, &fl->ifl_credits, 1,
"credits available");
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size",
CTLFLAG_RD, &fl->ifl_buf_size, 1, "buffer size");
#if MEMORY_LOGGING
SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
"fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued,
"mbufs allocated");
SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
"fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued,
"mbufs freed");
SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
"fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued,
"clusters allocated");
SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
"fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued,
"clusters freed");
#endif
}
}
}
void
iflib_request_reset(if_ctx_t ctx)
{
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_DO_RESET;
STATE_UNLOCK(ctx);
}
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
iflib_fixup_rx(struct mbuf *m)
{
struct mbuf *n;
if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
n = m;
} else {
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n == NULL) {
m_freem(m);
return (NULL);
}
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;
m->m_len -= ETHER_HDR_LEN;
n->m_len = ETHER_HDR_LEN;
M_MOVE_PKTHDR(n, m);
n->m_next = m;
}
return (n);
}
#endif
#ifdef DEBUGNET
static void
iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
{
if_ctx_t ctx;
ctx = if_getsoftc(ifp);
CTX_LOCK(ctx);
*nrxr = NRXQSETS(ctx);
*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
CTX_UNLOCK(ctx);
}
static void
iflib_debugnet_event(if_t ifp, enum debugnet_ev event)
{
if_ctx_t ctx;
if_softc_ctx_t scctx;
iflib_fl_t fl;
iflib_rxq_t rxq;
int i, j;
ctx = if_getsoftc(ifp);
scctx = &ctx->ifc_softc_ctx;
switch (event) {
case DEBUGNET_START:
for (i = 0; i < scctx->isc_nrxqsets; i++) {
rxq = &ctx->ifc_rxqs[i];
for (j = 0; j < rxq->ifr_nfl; j++) {
fl = rxq->ifr_fl;
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
}
}
iflib_no_tx_batch = 1;
break;
default:
break;
}
}
static int
iflib_debugnet_transmit(if_t ifp, struct mbuf *m)
{
if_ctx_t ctx;
iflib_txq_t txq;
int error;
ctx = if_getsoftc(ifp);
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
txq = &ctx->ifc_txqs[0];
error = iflib_encap(txq, &m);
if (error == 0)
(void)iflib_txd_db_check(txq, true);
return (error);
}
static int
iflib_debugnet_poll(if_t ifp, int count)
{
struct epoch_tracker et;
if_ctx_t ctx;
if_softc_ctx_t scctx;
iflib_txq_t txq;
int i;
ctx = if_getsoftc(ifp);
scctx = &ctx->ifc_softc_ctx;
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return (EBUSY);
txq = &ctx->ifc_txqs[0];
(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
NET_EPOCH_ENTER(et);
for (i = 0; i < scctx->isc_nrxqsets; i++)
(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
NET_EPOCH_EXIT(et);
return (0);
}
#endif /* DEBUGNET */
diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c
index ecb46e08713c..9f91e31d13a6 100644
--- a/sys/net80211/ieee80211.c
+++ b/sys/net80211/ieee80211.c
@@ -1,2685 +1,2681 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
/*
* IEEE 802.11 generic handler
*/
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/socket.h>
#include <sys/sbuf.h>
#include <machine/stdarg.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/ethernet.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#include <net80211/ieee80211_ratectl.h>
#include <net80211/ieee80211_vht.h>
#include <net/bpf.h>
const char *ieee80211_phymode_name[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = "auto",
[IEEE80211_MODE_11A] = "11a",
[IEEE80211_MODE_11B] = "11b",
[IEEE80211_MODE_11G] = "11g",
[IEEE80211_MODE_FH] = "FH",
[IEEE80211_MODE_TURBO_A] = "turboA",
[IEEE80211_MODE_TURBO_G] = "turboG",
[IEEE80211_MODE_STURBO_A] = "sturboA",
[IEEE80211_MODE_HALF] = "half",
[IEEE80211_MODE_QUARTER] = "quarter",
[IEEE80211_MODE_11NA] = "11na",
[IEEE80211_MODE_11NG] = "11ng",
[IEEE80211_MODE_VHT_2GHZ] = "11acg",
[IEEE80211_MODE_VHT_5GHZ] = "11ac",
};
/* map ieee80211_opmode to the corresponding capability bit */
const int ieee80211_opcap[IEEE80211_OPMODE_MAX] = {
[IEEE80211_M_IBSS] = IEEE80211_C_IBSS,
[IEEE80211_M_WDS] = IEEE80211_C_WDS,
[IEEE80211_M_STA] = IEEE80211_C_STA,
[IEEE80211_M_AHDEMO] = IEEE80211_C_AHDEMO,
[IEEE80211_M_HOSTAP] = IEEE80211_C_HOSTAP,
[IEEE80211_M_MONITOR] = IEEE80211_C_MONITOR,
#ifdef IEEE80211_SUPPORT_MESH
[IEEE80211_M_MBSS] = IEEE80211_C_MBSS,
#endif
};
const uint8_t ieee80211broadcastaddr[IEEE80211_ADDR_LEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
static void ieee80211_syncflag_locked(struct ieee80211com *ic, int flag);
static void ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag);
static void ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag);
static void ieee80211_syncflag_vht_locked(struct ieee80211com *ic, int flag);
static int ieee80211_media_setup(struct ieee80211com *ic,
struct ifmedia *media, int caps, int addsta,
ifm_change_cb_t media_change, ifm_stat_cb_t media_stat);
static int media_status(enum ieee80211_opmode,
const struct ieee80211_channel *);
static uint64_t ieee80211_get_counter(struct ifnet *, ift_counter);
MALLOC_DEFINE(M_80211_VAP, "80211vap", "802.11 vap state");
/*
* Default supported rates for 802.11 operation (in IEEE .5Mb units).
*/
#define B(r) ((r) | IEEE80211_RATE_BASIC)
static const struct ieee80211_rateset ieee80211_rateset_11a =
{ 8, { B(12), 18, B(24), 36, B(48), 72, 96, 108 } };
static const struct ieee80211_rateset ieee80211_rateset_half =
{ 8, { B(6), 9, B(12), 18, B(24), 36, 48, 54 } };
static const struct ieee80211_rateset ieee80211_rateset_quarter =
{ 8, { B(3), 4, B(6), 9, B(12), 18, 24, 27 } };
static const struct ieee80211_rateset ieee80211_rateset_11b =
{ 4, { B(2), B(4), B(11), B(22) } };
/* NB: OFDM rates are handled specially based on mode */
static const struct ieee80211_rateset ieee80211_rateset_11g =
{ 12, { B(2), B(4), B(11), B(22), 12, 18, 24, 36, 48, 72, 96, 108 } };
#undef B
static int set_vht_extchan(struct ieee80211_channel *c);
/*
* Fill in 802.11 available channel set, mark
* all available channels as active, and pick
* a default channel if not already specified.
*/
void
ieee80211_chan_init(struct ieee80211com *ic)
{
#define DEFAULTRATES(m, def) do { \
if (ic->ic_sup_rates[m].rs_nrates == 0) \
ic->ic_sup_rates[m] = def; \
} while (0)
struct ieee80211_channel *c;
int i;
KASSERT(0 < ic->ic_nchans && ic->ic_nchans <= IEEE80211_CHAN_MAX,
("invalid number of channels specified: %u", ic->ic_nchans));
memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail));
memset(ic->ic_modecaps, 0, sizeof(ic->ic_modecaps));
setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO);
for (i = 0; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
KASSERT(c->ic_flags != 0, ("channel with no flags"));
/*
* Help drivers that work only with frequencies by filling
* in IEEE channel #'s if not already calculated. Note this
* mimics similar work done in ieee80211_setregdomain when
* changing regulatory state.
*/
if (c->ic_ieee == 0)
c->ic_ieee = ieee80211_mhz2ieee(c->ic_freq,c->ic_flags);
/*
* Setup the HT40/VHT40 upper/lower bits.
* The VHT80/... math is done elsewhere.
*/
if (IEEE80211_IS_CHAN_HT40(c) && c->ic_extieee == 0)
c->ic_extieee = ieee80211_mhz2ieee(c->ic_freq +
(IEEE80211_IS_CHAN_HT40U(c) ? 20 : -20),
c->ic_flags);
/* Update VHT math */
/*
* XXX VHT again, note that this assumes VHT80/... channels
* are legit already.
*/
set_vht_extchan(c);
/* default max tx power to max regulatory */
if (c->ic_maxpower == 0)
c->ic_maxpower = 2*c->ic_maxregpower;
setbit(ic->ic_chan_avail, c->ic_ieee);
/*
* Identify mode capabilities.
*/
if (IEEE80211_IS_CHAN_A(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11A);
if (IEEE80211_IS_CHAN_B(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
if (IEEE80211_IS_CHAN_ANYG(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11G);
if (IEEE80211_IS_CHAN_FHSS(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_FH);
if (IEEE80211_IS_CHAN_108A(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_A);
if (IEEE80211_IS_CHAN_108G(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_G);
if (IEEE80211_IS_CHAN_ST(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_STURBO_A);
if (IEEE80211_IS_CHAN_HALF(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_HALF);
if (IEEE80211_IS_CHAN_QUARTER(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_QUARTER);
if (IEEE80211_IS_CHAN_HTA(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11NA);
if (IEEE80211_IS_CHAN_HTG(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11NG);
if (IEEE80211_IS_CHAN_VHTA(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_VHT_5GHZ);
if (IEEE80211_IS_CHAN_VHTG(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_VHT_2GHZ);
}
/* initialize candidate channels to all available */
memcpy(ic->ic_chan_active, ic->ic_chan_avail,
sizeof(ic->ic_chan_avail));
/* sort channel table to allow lookup optimizations */
ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
/* invalidate any previous state */
ic->ic_bsschan = IEEE80211_CHAN_ANYC;
ic->ic_prevchan = NULL;
ic->ic_csa_newchan = NULL;
/* arbitrarily pick the first channel */
ic->ic_curchan = &ic->ic_channels[0];
ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
/* fillin well-known rate sets if driver has not specified */
DEFAULTRATES(IEEE80211_MODE_11B, ieee80211_rateset_11b);
DEFAULTRATES(IEEE80211_MODE_11G, ieee80211_rateset_11g);
DEFAULTRATES(IEEE80211_MODE_11A, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_TURBO_A, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_TURBO_G, ieee80211_rateset_11g);
DEFAULTRATES(IEEE80211_MODE_STURBO_A, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_HALF, ieee80211_rateset_half);
DEFAULTRATES(IEEE80211_MODE_QUARTER, ieee80211_rateset_quarter);
DEFAULTRATES(IEEE80211_MODE_11NA, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_11NG, ieee80211_rateset_11g);
DEFAULTRATES(IEEE80211_MODE_VHT_2GHZ, ieee80211_rateset_11g);
DEFAULTRATES(IEEE80211_MODE_VHT_5GHZ, ieee80211_rateset_11a);
/*
* Setup required information to fill the mcsset field, if driver did
* not. Assume a 2T2R setup for historic reasons.
*/
if (ic->ic_rxstream == 0)
ic->ic_rxstream = 2;
if (ic->ic_txstream == 0)
ic->ic_txstream = 2;
ieee80211_init_suphtrates(ic);
/*
* Set auto mode to reset active channel state and any desired channel.
*/
(void) ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
#undef DEFAULTRATES
}
static void
null_update_mcast(struct ieee80211com *ic)
{
ic_printf(ic, "need multicast update callback\n");
}
static void
null_update_promisc(struct ieee80211com *ic)
{
ic_printf(ic, "need promiscuous mode update callback\n");
}
static void
null_update_chw(struct ieee80211com *ic)
{
ic_printf(ic, "%s: need callback\n", __func__);
}
int
ic_printf(struct ieee80211com *ic, const char * fmt, ...)
{
va_list ap;
int retval;
retval = printf("%s: ", ic->ic_name);
va_start(ap, fmt);
retval += vprintf(fmt, ap);
va_end(ap);
return (retval);
}
static LIST_HEAD(, ieee80211com) ic_head = LIST_HEAD_INITIALIZER(ic_head);
static struct mtx ic_list_mtx;
MTX_SYSINIT(ic_list, &ic_list_mtx, "ieee80211com list", MTX_DEF);
static int
sysctl_ieee80211coms(SYSCTL_HANDLER_ARGS)
{
struct ieee80211com *ic;
struct sbuf sb;
char *sp;
int error;
error = sysctl_wire_old_buffer(req, 0);
if (error)
return (error);
sbuf_new_for_sysctl(&sb, NULL, 8, req);
sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
sp = "";
mtx_lock(&ic_list_mtx);
LIST_FOREACH(ic, &ic_head, ic_next) {
sbuf_printf(&sb, "%s%s", sp, ic->ic_name);
sp = " ";
}
mtx_unlock(&ic_list_mtx);
error = sbuf_finish(&sb);
sbuf_delete(&sb);
return (error);
}
SYSCTL_PROC(_net_wlan, OID_AUTO, devices,
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
sysctl_ieee80211coms, "A", "names of available 802.11 devices");
/*
* Attach/setup the common net80211 state. Called by
* the driver on attach to prior to creating any vap's.
*/
void
ieee80211_ifattach(struct ieee80211com *ic)
{
IEEE80211_LOCK_INIT(ic, ic->ic_name);
IEEE80211_TX_LOCK_INIT(ic, ic->ic_name);
TAILQ_INIT(&ic->ic_vaps);
/* Create a taskqueue for all state changes */
ic->ic_tq = taskqueue_create("ic_taskq",
IEEE80211_M_WAITOK | IEEE80211_M_ZERO,
taskqueue_thread_enqueue, &ic->ic_tq);
taskqueue_start_threads(&ic->ic_tq, 1, PI_NET, "%s net80211 taskq",
ic->ic_name);
ic->ic_ierrors = counter_u64_alloc(IEEE80211_M_WAITOK);
ic->ic_oerrors = counter_u64_alloc(IEEE80211_M_WAITOK);
/*
* Fill in 802.11 available channel set, mark all
* available channels as active, and pick a default
* channel if not already specified.
*/
ieee80211_chan_init(ic);
ic->ic_update_mcast = null_update_mcast;
ic->ic_update_promisc = null_update_promisc;
ic->ic_update_chw = null_update_chw;
ic->ic_hash_key = arc4random();
ic->ic_bintval = IEEE80211_BINTVAL_DEFAULT;
ic->ic_lintval = ic->ic_bintval;
ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX;
ieee80211_crypto_attach(ic);
ieee80211_node_attach(ic);
ieee80211_power_attach(ic);
ieee80211_proto_attach(ic);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_attach(ic);
#endif
ieee80211_ht_attach(ic);
ieee80211_vht_attach(ic);
ieee80211_scan_attach(ic);
ieee80211_regdomain_attach(ic);
ieee80211_dfs_attach(ic);
ieee80211_sysctl_attach(ic);
mtx_lock(&ic_list_mtx);
LIST_INSERT_HEAD(&ic_head, ic, ic_next);
mtx_unlock(&ic_list_mtx);
}
/*
* Detach net80211 state on device detach. Tear down
* all vap's and reclaim all common state prior to the
* device state going away. Note we may call back into
* driver; it must be prepared for this.
*/
void
ieee80211_ifdetach(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
/*
* We use this as an indicator that ifattach never had a chance to be
* called, e.g. early driver attach failed and ifdetach was called
* during subsequent detach. Never fear, for we have nothing to do
* here.
*/
if (ic->ic_tq == NULL)
return;
mtx_lock(&ic_list_mtx);
LIST_REMOVE(ic, ic_next);
mtx_unlock(&ic_list_mtx);
taskqueue_drain(taskqueue_thread, &ic->ic_restart_task);
/*
* The VAP is responsible for setting and clearing
* the VIMAGE context.
*/
while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL) {
ieee80211_com_vdetach(vap);
ieee80211_vap_destroy(vap);
}
ieee80211_waitfor_parent(ic);
ieee80211_sysctl_detach(ic);
ieee80211_dfs_detach(ic);
ieee80211_regdomain_detach(ic);
ieee80211_scan_detach(ic);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_detach(ic);
#endif
ieee80211_vht_detach(ic);
ieee80211_ht_detach(ic);
/* NB: must be called before ieee80211_node_detach */
ieee80211_proto_detach(ic);
ieee80211_crypto_detach(ic);
ieee80211_power_detach(ic);
ieee80211_node_detach(ic);
counter_u64_free(ic->ic_ierrors);
counter_u64_free(ic->ic_oerrors);
taskqueue_free(ic->ic_tq);
IEEE80211_TX_LOCK_DESTROY(ic);
IEEE80211_LOCK_DESTROY(ic);
}
/*
* Called by drivers during attach to set the supported
* cipher set for software encryption.
*/
void
ieee80211_set_software_ciphers(struct ieee80211com *ic,
uint32_t cipher_suite)
{
ieee80211_crypto_set_supported_software_ciphers(ic, cipher_suite);
}
/*
* Called by drivers during attach to set the supported
* cipher set for hardware encryption.
*/
void
ieee80211_set_hardware_ciphers(struct ieee80211com *ic,
uint32_t cipher_suite)
{
ieee80211_crypto_set_supported_hardware_ciphers(ic, cipher_suite);
}
/*
* Called by drivers during attach to set the supported
* key management suites by the driver/hardware.
*/
void
ieee80211_set_driver_keymgmt_suites(struct ieee80211com *ic,
uint32_t keymgmt_set)
{
ieee80211_crypto_set_supported_driver_keymgmt(ic,
keymgmt_set);
}
struct ieee80211com *
ieee80211_find_com(const char *name)
{
struct ieee80211com *ic;
mtx_lock(&ic_list_mtx);
LIST_FOREACH(ic, &ic_head, ic_next)
if (strcmp(ic->ic_name, name) == 0)
break;
mtx_unlock(&ic_list_mtx);
return (ic);
}
void
ieee80211_iterate_coms(ieee80211_com_iter_func *f, void *arg)
{
struct ieee80211com *ic;
mtx_lock(&ic_list_mtx);
LIST_FOREACH(ic, &ic_head, ic_next)
(*f)(arg, ic);
mtx_unlock(&ic_list_mtx);
}
/*
* Default reset method for use with the ioctl support. This
* method is invoked after any state change in the 802.11
* layer that should be propagated to the hardware but not
* require re-initialization of the 802.11 state machine (e.g
* rescanning for an ap). We always return ENETRESET which
* should cause the driver to re-initialize the device. Drivers
* can override this method to implement more optimized support.
*/
static int
default_reset(struct ieee80211vap *vap, u_long cmd)
{
return ENETRESET;
}
/*
* Default for updating the VAP default TX key index.
*
* Drivers that support TX offload as well as hardware encryption offload
* may need to be informed of key index changes separate from the key
* update.
*/
static void
default_update_deftxkey(struct ieee80211vap *vap, ieee80211_keyix kid)
{
/* XXX assert validity */
/* XXX assert we're in a key update block */
vap->iv_def_txkey = kid;
}
/*
* Add underlying device errors to vap errors.
*/
static uint64_t
ieee80211_get_counter(struct ifnet *ifp, ift_counter cnt)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
uint64_t rv;
rv = if_get_counter_default(ifp, cnt);
switch (cnt) {
case IFCOUNTER_OERRORS:
rv += counter_u64_fetch(ic->ic_oerrors);
break;
case IFCOUNTER_IERRORS:
rv += counter_u64_fetch(ic->ic_ierrors);
break;
default:
break;
}
return (rv);
}
/*
* Prepare a vap for use. Drivers use this call to
* setup net80211 state in new vap's prior attaching
* them with ieee80211_vap_attach (below).
*/
int
ieee80211_vap_setup(struct ieee80211com *ic, struct ieee80211vap *vap,
const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode,
int flags, const uint8_t bssid[IEEE80211_ADDR_LEN])
{
struct ifnet *ifp;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- ic_printf(ic, "%s: unable to allocate ifnet\n", __func__);
- return ENOMEM;
- }
if_initname(ifp, name, unit);
ifp->if_softc = vap; /* back pointer */
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
ifp->if_transmit = ieee80211_vap_transmit;
ifp->if_qflush = ieee80211_vap_qflush;
ifp->if_ioctl = ieee80211_ioctl;
ifp->if_init = ieee80211_init;
ifp->if_get_counter = ieee80211_get_counter;
vap->iv_ifp = ifp;
vap->iv_ic = ic;
vap->iv_flags = ic->ic_flags; /* propagate common flags */
vap->iv_flags_ext = ic->ic_flags_ext;
vap->iv_flags_ven = ic->ic_flags_ven;
vap->iv_caps = ic->ic_caps &~ IEEE80211_C_OPMODE;
/* 11n capabilities - XXX methodize */
vap->iv_htcaps = ic->ic_htcaps;
vap->iv_htextcaps = ic->ic_htextcaps;
/* 11ac capabilities - XXX methodize */
vap->iv_vht_cap.vht_cap_info = ic->ic_vht_cap.vht_cap_info;
vap->iv_vhtextcaps = ic->ic_vhtextcaps;
vap->iv_opmode = opmode;
vap->iv_caps |= ieee80211_opcap[opmode];
IEEE80211_ADDR_COPY(vap->iv_myaddr, ic->ic_macaddr);
switch (opmode) {
case IEEE80211_M_WDS:
/*
* WDS links must specify the bssid of the far end.
* For legacy operation this is a static relationship.
* For non-legacy operation the station must associate
* and be authorized to pass traffic. Plumbing the
* vap to the proper node happens when the vap
* transitions to RUN state.
*/
IEEE80211_ADDR_COPY(vap->iv_des_bssid, bssid);
vap->iv_flags |= IEEE80211_F_DESBSSID;
if (flags & IEEE80211_CLONE_WDSLEGACY)
vap->iv_flags_ext |= IEEE80211_FEXT_WDSLEGACY;
break;
#ifdef IEEE80211_SUPPORT_TDMA
case IEEE80211_M_AHDEMO:
if (flags & IEEE80211_CLONE_TDMA) {
/* NB: checked before clone operation allowed */
KASSERT(ic->ic_caps & IEEE80211_C_TDMA,
("not TDMA capable, ic_caps 0x%x", ic->ic_caps));
/*
* Propagate TDMA capability to mark vap; this
* cannot be removed and is used to distinguish
* regular ahdemo operation from ahdemo+tdma.
*/
vap->iv_caps |= IEEE80211_C_TDMA;
}
break;
#endif
default:
break;
}
/* auto-enable s/w beacon miss support */
if (flags & IEEE80211_CLONE_NOBEACONS)
vap->iv_flags_ext |= IEEE80211_FEXT_SWBMISS;
/* auto-generated or user supplied MAC address */
if (flags & (IEEE80211_CLONE_BSSID|IEEE80211_CLONE_MACADDR))
vap->iv_flags_ext |= IEEE80211_FEXT_UNIQMAC;
/*
* Enable various functionality by default if we're
* capable; the driver can override us if it knows better.
*/
if (vap->iv_caps & IEEE80211_C_WME)
vap->iv_flags |= IEEE80211_F_WME;
if (vap->iv_caps & IEEE80211_C_BURST)
vap->iv_flags |= IEEE80211_F_BURST;
/* NB: bg scanning only makes sense for station mode right now */
if (vap->iv_opmode == IEEE80211_M_STA &&
(vap->iv_caps & IEEE80211_C_BGSCAN))
vap->iv_flags |= IEEE80211_F_BGSCAN;
vap->iv_flags |= IEEE80211_F_DOTH; /* XXX no cap, just ena */
/* NB: DFS support only makes sense for ap mode right now */
if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
(vap->iv_caps & IEEE80211_C_DFS))
vap->iv_flags_ext |= IEEE80211_FEXT_DFS;
/* NB: only flip on U-APSD for hostap/sta for now */
if ((vap->iv_opmode == IEEE80211_M_STA)
|| (vap->iv_opmode == IEEE80211_M_HOSTAP)) {
if (vap->iv_caps & IEEE80211_C_UAPSD)
vap->iv_flags_ext |= IEEE80211_FEXT_UAPSD;
}
vap->iv_des_chan = IEEE80211_CHAN_ANYC; /* any channel is ok */
vap->iv_bmissthreshold = IEEE80211_HWBMISS_DEFAULT;
vap->iv_dtim_period = IEEE80211_DTIM_DEFAULT;
/*
* Install a default reset method for the ioctl support;
* the driver can override this.
*/
vap->iv_reset = default_reset;
/*
* Install a default crypto key update method, the driver
* can override this.
*/
vap->iv_update_deftxkey = default_update_deftxkey;
ieee80211_sysctl_vattach(vap);
ieee80211_crypto_vattach(vap);
ieee80211_node_vattach(vap);
ieee80211_power_vattach(vap);
ieee80211_proto_vattach(vap);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_vattach(vap);
#endif
ieee80211_ht_vattach(vap);
ieee80211_vht_vattach(vap);
ieee80211_scan_vattach(vap);
ieee80211_regdomain_vattach(vap);
ieee80211_radiotap_vattach(vap);
ieee80211_vap_reset_erp(vap);
ieee80211_ratectl_set(vap, IEEE80211_RATECTL_NONE);
return 0;
}
/*
* Activate a vap. State should have been prepared with a
* call to ieee80211_vap_setup and by the driver. On return
* from this call the vap is ready for use.
*/
int
ieee80211_vap_attach(struct ieee80211vap *vap, ifm_change_cb_t media_change,
ifm_stat_cb_t media_stat, const uint8_t macaddr[IEEE80211_ADDR_LEN])
{
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211com *ic = vap->iv_ic;
struct ifmediareq imr;
int maxrate;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: %s parent %s flags 0x%x flags_ext 0x%x\n",
__func__, ieee80211_opmode_name[vap->iv_opmode],
ic->ic_name, vap->iv_flags, vap->iv_flags_ext);
/*
* Do late attach work that cannot happen until after
* the driver has had a chance to override defaults.
*/
ieee80211_node_latevattach(vap);
ieee80211_power_latevattach(vap);
maxrate = ieee80211_media_setup(ic, &vap->iv_media, vap->iv_caps,
vap->iv_opmode == IEEE80211_M_STA, media_change, media_stat);
ieee80211_media_status(ifp, &imr);
/* NB: strip explicit mode; we're actually in autoselect */
ifmedia_set(&vap->iv_media,
imr.ifm_active &~ (IFM_MMASK | IFM_IEEE80211_TURBO));
if (maxrate)
ifp->if_baudrate = IF_Mbps(maxrate);
ether_ifattach(ifp, macaddr);
IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
/* hook output method setup by ether_ifattach */
vap->iv_output = ifp->if_output;
ifp->if_output = ieee80211_output;
/* NB: if_mtu set by ether_ifattach to ETHERMTU */
IEEE80211_LOCK(ic);
TAILQ_INSERT_TAIL(&ic->ic_vaps, vap, iv_next);
ieee80211_syncflag_locked(ic, IEEE80211_F_WME);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP);
#endif
ieee80211_syncflag_locked(ic, IEEE80211_F_PCF);
ieee80211_syncflag_locked(ic, IEEE80211_F_BURST);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_VHT);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT40);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT160);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80P80);
IEEE80211_UNLOCK(ic);
return 1;
}
/*
* Tear down vap state and reclaim the ifnet.
* The driver is assumed to have prepared for
* this; e.g. by turning off interrupts for the
* underlying device.
*/
void
ieee80211_vap_detach(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
int i;
CURVNET_SET(ifp->if_vnet);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s parent %s\n",
__func__, ieee80211_opmode_name[vap->iv_opmode], ic->ic_name);
/* NB: bpfdetach is called by ether_ifdetach and claims all taps */
ether_ifdetach(ifp);
ieee80211_stop(vap);
/*
* Flush any deferred vap tasks.
*/
for (i = 0; i < NET80211_IV_NSTATE_NUM; i++)
ieee80211_draintask(ic, &vap->iv_nstate_task[i]);
ieee80211_draintask(ic, &vap->iv_swbmiss_task);
ieee80211_draintask(ic, &vap->iv_wme_task);
ieee80211_draintask(ic, &ic->ic_parent_task);
/* XXX band-aid until ifnet handles this for us */
taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
IEEE80211_LOCK(ic);
KASSERT(vap->iv_state == IEEE80211_S_INIT , ("vap still running"));
TAILQ_REMOVE(&ic->ic_vaps, vap, iv_next);
ieee80211_syncflag_locked(ic, IEEE80211_F_WME);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP);
#endif
ieee80211_syncflag_locked(ic, IEEE80211_F_PCF);
ieee80211_syncflag_locked(ic, IEEE80211_F_BURST);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_VHT);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT40);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT160);
ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80P80);
/* NB: this handles the bpfdetach done below */
ieee80211_syncflag_ext_locked(ic, IEEE80211_FEXT_BPF);
if (vap->iv_ifflags & IFF_PROMISC)
ieee80211_promisc(vap, false);
if (vap->iv_ifflags & IFF_ALLMULTI)
ieee80211_allmulti(vap, false);
IEEE80211_UNLOCK(ic);
ifmedia_removeall(&vap->iv_media);
ieee80211_radiotap_vdetach(vap);
ieee80211_regdomain_vdetach(vap);
ieee80211_scan_vdetach(vap);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_vdetach(vap);
#endif
ieee80211_vht_vdetach(vap);
ieee80211_ht_vdetach(vap);
/* NB: must be before ieee80211_node_vdetach */
ieee80211_proto_vdetach(vap);
ieee80211_crypto_vdetach(vap);
ieee80211_power_vdetach(vap);
ieee80211_node_vdetach(vap);
ieee80211_sysctl_vdetach(vap);
if_free(ifp);
CURVNET_RESTORE();
}
/*
* Count number of vaps in promisc, and issue promisc on
* parent respectively.
*/
void
ieee80211_promisc(struct ieee80211vap *vap, bool on)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK_ASSERT(ic);
if (on) {
if (++ic->ic_promisc == 1)
ieee80211_runtask(ic, &ic->ic_promisc_task);
} else {
KASSERT(ic->ic_promisc > 0, ("%s: ic %p not promisc",
__func__, ic));
if (--ic->ic_promisc == 0)
ieee80211_runtask(ic, &ic->ic_promisc_task);
}
}
/*
* Count number of vaps in allmulti, and issue allmulti on
* parent respectively.
*/
void
ieee80211_allmulti(struct ieee80211vap *vap, bool on)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK_ASSERT(ic);
if (on) {
if (++ic->ic_allmulti == 1)
ieee80211_runtask(ic, &ic->ic_mcast_task);
} else {
KASSERT(ic->ic_allmulti > 0, ("%s: ic %p not allmulti",
__func__, ic));
if (--ic->ic_allmulti == 0)
ieee80211_runtask(ic, &ic->ic_mcast_task);
}
}
/*
* Synchronize flag bit state in the com structure
* according to the state of all vap's. This is used,
* for example, to handle state changes via ioctls.
*/
static void
ieee80211_syncflag_locked(struct ieee80211com *ic, int flag)
{
struct ieee80211vap *vap;
int bit;
IEEE80211_LOCK_ASSERT(ic);
bit = 0;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_flags & flag) {
bit = 1;
break;
}
if (bit)
ic->ic_flags |= flag;
else
ic->ic_flags &= ~flag;
}
void
ieee80211_syncflag(struct ieee80211vap *vap, int flag)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (flag < 0) {
flag = -flag;
vap->iv_flags &= ~flag;
} else
vap->iv_flags |= flag;
ieee80211_syncflag_locked(ic, flag);
IEEE80211_UNLOCK(ic);
}
/*
* Synchronize flags_ht bit state in the com structure
* according to the state of all vap's. This is used,
* for example, to handle state changes via ioctls.
*/
static void
ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag)
{
struct ieee80211vap *vap;
int bit;
IEEE80211_LOCK_ASSERT(ic);
bit = 0;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_flags_ht & flag) {
bit = 1;
break;
}
if (bit)
ic->ic_flags_ht |= flag;
else
ic->ic_flags_ht &= ~flag;
}
void
ieee80211_syncflag_ht(struct ieee80211vap *vap, int flag)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (flag < 0) {
flag = -flag;
vap->iv_flags_ht &= ~flag;
} else
vap->iv_flags_ht |= flag;
ieee80211_syncflag_ht_locked(ic, flag);
IEEE80211_UNLOCK(ic);
}
/*
* Synchronize flags_vht bit state in the com structure
* according to the state of all vap's. This is used,
* for example, to handle state changes via ioctls.
*/
static void
ieee80211_syncflag_vht_locked(struct ieee80211com *ic, int flag)
{
struct ieee80211vap *vap;
int bit;
IEEE80211_LOCK_ASSERT(ic);
bit = 0;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_vht_flags & flag) {
bit = 1;
break;
}
if (bit)
ic->ic_vht_flags |= flag;
else
ic->ic_vht_flags &= ~flag;
}
void
ieee80211_syncflag_vht(struct ieee80211vap *vap, int flag)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (flag < 0) {
flag = -flag;
vap->iv_vht_flags &= ~flag;
} else
vap->iv_vht_flags |= flag;
ieee80211_syncflag_vht_locked(ic, flag);
IEEE80211_UNLOCK(ic);
}
/*
* Synchronize flags_ext bit state in the com structure
* according to the state of all vap's. This is used,
* for example, to handle state changes via ioctls.
*/
static void
ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag)
{
struct ieee80211vap *vap;
int bit;
IEEE80211_LOCK_ASSERT(ic);
bit = 0;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_flags_ext & flag) {
bit = 1;
break;
}
if (bit)
ic->ic_flags_ext |= flag;
else
ic->ic_flags_ext &= ~flag;
}
void
ieee80211_syncflag_ext(struct ieee80211vap *vap, int flag)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (flag < 0) {
flag = -flag;
vap->iv_flags_ext &= ~flag;
} else
vap->iv_flags_ext |= flag;
ieee80211_syncflag_ext_locked(ic, flag);
IEEE80211_UNLOCK(ic);
}
static __inline int
mapgsm(u_int freq, u_int flags)
{
freq *= 10;
if (flags & IEEE80211_CHAN_QUARTER)
freq += 5;
else if (flags & IEEE80211_CHAN_HALF)
freq += 10;
else
freq += 20;
/* NB: there is no 907/20 wide but leave room */
return (freq - 906*10) / 5;
}
static __inline int
mappsb(u_int freq, u_int flags)
{
return 37 + ((freq * 10) + ((freq % 5) == 2 ? 5 : 0) - 49400) / 5;
}
/*
* Convert MHz frequency to IEEE channel number.
*/
int
ieee80211_mhz2ieee(u_int freq, u_int flags)
{
#define IS_FREQ_IN_PSB(_freq) ((_freq) > 4940 && (_freq) < 4990)
if (flags & IEEE80211_CHAN_GSM)
return mapgsm(freq, flags);
if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */
if (freq == 2484)
return 14;
if (freq < 2484)
return ((int) freq - 2407) / 5;
else
return 15 + ((freq - 2512) / 20);
} else if (flags & IEEE80211_CHAN_5GHZ) { /* 5Ghz band */
if (freq <= 5000) {
/* XXX check regdomain? */
if (IS_FREQ_IN_PSB(freq))
return mappsb(freq, flags);
return (freq - 4000) / 5;
} else
return (freq - 5000) / 5;
} else { /* either, guess */
if (freq == 2484)
return 14;
if (freq < 2484) {
if (907 <= freq && freq <= 922)
return mapgsm(freq, flags);
return ((int) freq - 2407) / 5;
}
if (freq < 5000) {
if (IS_FREQ_IN_PSB(freq))
return mappsb(freq, flags);
else if (freq > 4900)
return (freq - 4000) / 5;
else
return 15 + ((freq - 2512) / 20);
}
return (freq - 5000) / 5;
}
#undef IS_FREQ_IN_PSB
}
/*
* Convert channel to IEEE channel number.
*/
int
ieee80211_chan2ieee(struct ieee80211com *ic, const struct ieee80211_channel *c)
{
if (c == NULL) {
ic_printf(ic, "invalid channel (NULL)\n");
return 0; /* XXX */
}
return (c == IEEE80211_CHAN_ANYC ? IEEE80211_CHAN_ANY : c->ic_ieee);
}
/*
* Convert IEEE channel number to MHz frequency.
*/
u_int
ieee80211_ieee2mhz(u_int chan, u_int flags)
{
if (flags & IEEE80211_CHAN_GSM)
return 907 + 5 * (chan / 10);
if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */
if (chan == 14)
return 2484;
if (chan < 14)
return 2407 + chan*5;
else
return 2512 + ((chan-15)*20);
} else if (flags & IEEE80211_CHAN_5GHZ) {/* 5Ghz band */
if (flags & (IEEE80211_CHAN_HALF|IEEE80211_CHAN_QUARTER)) {
chan -= 37;
return 4940 + chan*5 + (chan % 5 ? 2 : 0);
}
return 5000 + (chan*5);
} else { /* either, guess */
/* XXX can't distinguish PSB+GSM channels */
if (chan == 14)
return 2484;
if (chan < 14) /* 0-13 */
return 2407 + chan*5;
if (chan < 27) /* 15-26 */
return 2512 + ((chan-15)*20);
return 5000 + (chan*5);
}
}
static __inline void
set_extchan(struct ieee80211_channel *c)
{
/*
* IEEE Std 802.11-2012, page 1738, subclause 20.3.15.4:
* "the secondary channel number shall be 'N + [1,-1] * 4'
*/
if (c->ic_flags & IEEE80211_CHAN_HT40U)
c->ic_extieee = c->ic_ieee + 4;
else if (c->ic_flags & IEEE80211_CHAN_HT40D)
c->ic_extieee = c->ic_ieee - 4;
else
c->ic_extieee = 0;
}
/*
* Populate the freq1/freq2 fields as appropriate for VHT channels.
*
* This for now uses a hard-coded list of 80MHz wide channels.
*
* For HT20/HT40, freq1 just is the centre frequency of the 40MHz
* wide channel we've already decided upon.
*
* For VHT80 and VHT160, there are only a small number of fixed
* 80/160MHz wide channels, so we just use those.
*
* This is all likely very very wrong - both the regulatory code
* and this code needs to ensure that all four channels are
* available and valid before the VHT80 (and eight for VHT160) channel
* is created.
*/
struct vht_chan_range {
uint16_t freq_start;
uint16_t freq_end;
};
struct vht_chan_range vht80_chan_ranges[] = {
{ 5170, 5250 },
{ 5250, 5330 },
{ 5490, 5570 },
{ 5570, 5650 },
{ 5650, 5730 },
{ 5735, 5815 },
{ 0, 0 }
};
struct vht_chan_range vht160_chan_ranges[] = {
{ 5170, 5330 },
{ 5490, 5650 },
{ 0, 0 }
};
static int
set_vht_extchan(struct ieee80211_channel *c)
{
int i;
if (! IEEE80211_IS_CHAN_VHT(c))
return (0);
if (IEEE80211_IS_CHAN_VHT80P80(c)) {
printf("%s: TODO VHT80+80 channel (ieee=%d, flags=0x%08x)\n",
__func__, c->ic_ieee, c->ic_flags);
}
if (IEEE80211_IS_CHAN_VHT160(c)) {
for (i = 0; vht160_chan_ranges[i].freq_start != 0; i++) {
if (c->ic_freq >= vht160_chan_ranges[i].freq_start &&
c->ic_freq < vht160_chan_ranges[i].freq_end) {
int midpoint;
midpoint = vht160_chan_ranges[i].freq_start + 80;
c->ic_vht_ch_freq1 =
ieee80211_mhz2ieee(midpoint, c->ic_flags);
c->ic_vht_ch_freq2 = 0;
#if 0
printf("%s: %d, freq=%d, midpoint=%d, freq1=%d, freq2=%d\n",
__func__, c->ic_ieee, c->ic_freq, midpoint,
c->ic_vht_ch_freq1, c->ic_vht_ch_freq2);
#endif
return (1);
}
}
return (0);
}
if (IEEE80211_IS_CHAN_VHT80(c)) {
for (i = 0; vht80_chan_ranges[i].freq_start != 0; i++) {
if (c->ic_freq >= vht80_chan_ranges[i].freq_start &&
c->ic_freq < vht80_chan_ranges[i].freq_end) {
int midpoint;
midpoint = vht80_chan_ranges[i].freq_start + 40;
c->ic_vht_ch_freq1 =
ieee80211_mhz2ieee(midpoint, c->ic_flags);
c->ic_vht_ch_freq2 = 0;
#if 0
printf("%s: %d, freq=%d, midpoint=%d, freq1=%d, freq2=%d\n",
__func__, c->ic_ieee, c->ic_freq, midpoint,
c->ic_vht_ch_freq1, c->ic_vht_ch_freq2);
#endif
return (1);
}
}
return (0);
}
if (IEEE80211_IS_CHAN_VHT40(c)) {
if (IEEE80211_IS_CHAN_HT40U(c))
c->ic_vht_ch_freq1 = c->ic_ieee + 2;
else if (IEEE80211_IS_CHAN_HT40D(c))
c->ic_vht_ch_freq1 = c->ic_ieee - 2;
else
return (0);
return (1);
}
if (IEEE80211_IS_CHAN_VHT20(c)) {
c->ic_vht_ch_freq1 = c->ic_ieee;
return (1);
}
printf("%s: unknown VHT channel type (ieee=%d, flags=0x%08x)\n",
__func__, c->ic_ieee, c->ic_flags);
return (0);
}
/*
* Return whether the current channel could possibly be a part of
* a VHT80/VHT160 channel.
*
* This doesn't check that the whole range is in the allowed list
* according to regulatory.
*/
static bool
is_vht160_valid_freq(uint16_t freq)
{
int i;
for (i = 0; vht160_chan_ranges[i].freq_start != 0; i++) {
if (freq >= vht160_chan_ranges[i].freq_start &&
freq < vht160_chan_ranges[i].freq_end)
return (true);
}
return (false);
}
static int
is_vht80_valid_freq(uint16_t freq)
{
int i;
for (i = 0; vht80_chan_ranges[i].freq_start != 0; i++) {
if (freq >= vht80_chan_ranges[i].freq_start &&
freq < vht80_chan_ranges[i].freq_end)
return (1);
}
return (0);
}
static int
addchan(struct ieee80211_channel chans[], int maxchans, int *nchans,
uint8_t ieee, uint16_t freq, int8_t maxregpower, uint32_t flags)
{
struct ieee80211_channel *c;
if (*nchans >= maxchans)
return (ENOBUFS);
#if 0
printf("%s: %d of %d: ieee=%d, freq=%d, flags=0x%08x\n",
__func__, *nchans, maxchans, ieee, freq, flags);
#endif
c = &chans[(*nchans)++];
c->ic_ieee = ieee;
c->ic_freq = freq != 0 ? freq : ieee80211_ieee2mhz(ieee, flags);
c->ic_maxregpower = maxregpower;
c->ic_maxpower = 2 * maxregpower;
c->ic_flags = flags;
c->ic_vht_ch_freq1 = 0;
c->ic_vht_ch_freq2 = 0;
set_extchan(c);
set_vht_extchan(c);
return (0);
}
static int
copychan_prev(struct ieee80211_channel chans[], int maxchans, int *nchans,
uint32_t flags)
{
struct ieee80211_channel *c;
KASSERT(*nchans > 0, ("channel list is empty\n"));
if (*nchans >= maxchans)
return (ENOBUFS);
#if 0
printf("%s: %d of %d: flags=0x%08x\n",
__func__, *nchans, maxchans, flags);
#endif
c = &chans[(*nchans)++];
c[0] = c[-1];
c->ic_flags = flags;
c->ic_vht_ch_freq1 = 0;
c->ic_vht_ch_freq2 = 0;
set_extchan(c);
set_vht_extchan(c);
return (0);
}
/*
* XXX VHT-2GHz
*/
static void
getflags_2ghz(const uint8_t bands[], uint32_t flags[], int cbw_flags)
{
int nmodes;
nmodes = 0;
if (isset(bands, IEEE80211_MODE_11B))
flags[nmodes++] = IEEE80211_CHAN_B;
if (isset(bands, IEEE80211_MODE_11G))
flags[nmodes++] = IEEE80211_CHAN_G;
if (isset(bands, IEEE80211_MODE_11NG))
flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT20;
if (cbw_flags & NET80211_CBW_FLAG_HT40) {
flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT40U;
flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT40D;
}
flags[nmodes] = 0;
}
static void
getflags_5ghz(const uint8_t bands[], uint32_t flags[], int cbw_flags)
{
int nmodes;
/*
* The addchan_list() function seems to expect the flags array to
* be in channel width order, so the VHT bits are interspersed
* as appropriate to maintain said order.
*
* It also assumes HT40U is before HT40D.
*/
nmodes = 0;
/* 20MHz */
if (isset(bands, IEEE80211_MODE_11A))
flags[nmodes++] = IEEE80211_CHAN_A;
if (isset(bands, IEEE80211_MODE_11NA))
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT20;
if (isset(bands, IEEE80211_MODE_VHT_5GHZ)) {
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT20 |
IEEE80211_CHAN_VHT20;
}
/* 40MHz */
if (cbw_flags & NET80211_CBW_FLAG_HT40)
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U;
if ((cbw_flags & NET80211_CBW_FLAG_HT40) &&
isset(bands, IEEE80211_MODE_VHT_5GHZ))
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U |
IEEE80211_CHAN_VHT40U;
if (cbw_flags & NET80211_CBW_FLAG_HT40)
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D;
if ((cbw_flags & NET80211_CBW_FLAG_HT40) &&
isset(bands, IEEE80211_MODE_VHT_5GHZ))
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D |
IEEE80211_CHAN_VHT40D;
/* 80MHz */
if ((cbw_flags & NET80211_CBW_FLAG_VHT80) &&
isset(bands, IEEE80211_MODE_VHT_5GHZ)) {
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U |
IEEE80211_CHAN_VHT80;
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D |
IEEE80211_CHAN_VHT80;
}
/* VHT160 */
if ((cbw_flags & NET80211_CBW_FLAG_VHT160) &&
isset(bands, IEEE80211_MODE_VHT_5GHZ)) {
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U |
IEEE80211_CHAN_VHT160;
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D |
IEEE80211_CHAN_VHT160;
}
/* VHT80+80 */
if ((cbw_flags & NET80211_CBW_FLAG_VHT80P80) &&
isset(bands, IEEE80211_MODE_VHT_5GHZ)) {
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U |
IEEE80211_CHAN_VHT80P80;
flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D |
IEEE80211_CHAN_VHT80P80;
}
flags[nmodes] = 0;
}
static void
getflags(const uint8_t bands[], uint32_t flags[], int cbw_flags)
{
flags[0] = 0;
if (isset(bands, IEEE80211_MODE_11A) ||
isset(bands, IEEE80211_MODE_11NA) ||
isset(bands, IEEE80211_MODE_VHT_5GHZ)) {
if (isset(bands, IEEE80211_MODE_11B) ||
isset(bands, IEEE80211_MODE_11G) ||
isset(bands, IEEE80211_MODE_11NG) ||
isset(bands, IEEE80211_MODE_VHT_2GHZ))
return;
getflags_5ghz(bands, flags, cbw_flags);
} else
getflags_2ghz(bands, flags, cbw_flags);
}
/*
* Add one 20 MHz channel into specified channel list.
* You MUST NOT mix bands when calling this. It will not add 5ghz
* channels if you have any B/G/N band bit set.
* The _cbw() variant does also support HT40/VHT80/160/80+80.
*/
int
ieee80211_add_channel_cbw(struct ieee80211_channel chans[], int maxchans,
int *nchans, uint8_t ieee, uint16_t freq, int8_t maxregpower,
uint32_t chan_flags, const uint8_t bands[], int cbw_flags)
{
uint32_t flags[IEEE80211_MODE_MAX];
int i, error;
getflags(bands, flags, cbw_flags);
KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__));
error = addchan(chans, maxchans, nchans, ieee, freq, maxregpower,
flags[0] | chan_flags);
for (i = 1; flags[i] != 0 && error == 0; i++) {
error = copychan_prev(chans, maxchans, nchans,
flags[i] | chan_flags);
}
return (error);
}
int
ieee80211_add_channel(struct ieee80211_channel chans[], int maxchans,
int *nchans, uint8_t ieee, uint16_t freq, int8_t maxregpower,
uint32_t chan_flags, const uint8_t bands[])
{
return (ieee80211_add_channel_cbw(chans, maxchans, nchans, ieee, freq,
maxregpower, chan_flags, bands, 0));
}
static struct ieee80211_channel *
findchannel(struct ieee80211_channel chans[], int nchans, uint16_t freq,
uint32_t flags)
{
struct ieee80211_channel *c;
int i;
flags &= IEEE80211_CHAN_ALLTURBO;
/* brute force search */
for (i = 0; i < nchans; i++) {
c = &chans[i];
if (c->ic_freq == freq &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
}
return NULL;
}
/*
* Add 40 MHz channel pair into specified channel list.
*/
/* XXX VHT */
int
ieee80211_add_channel_ht40(struct ieee80211_channel chans[], int maxchans,
int *nchans, uint8_t ieee, int8_t maxregpower, uint32_t flags)
{
struct ieee80211_channel *cent, *extc;
uint16_t freq;
int error;
freq = ieee80211_ieee2mhz(ieee, flags);
/*
* Each entry defines an HT40 channel pair; find the
* center channel, then the extension channel above.
*/
flags |= IEEE80211_CHAN_HT20;
cent = findchannel(chans, *nchans, freq, flags);
if (cent == NULL)
return (EINVAL);
extc = findchannel(chans, *nchans, freq + 20, flags);
if (extc == NULL)
return (ENOENT);
flags &= ~IEEE80211_CHAN_HT;
error = addchan(chans, maxchans, nchans, cent->ic_ieee, cent->ic_freq,
maxregpower, flags | IEEE80211_CHAN_HT40U);
if (error != 0)
return (error);
error = addchan(chans, maxchans, nchans, extc->ic_ieee, extc->ic_freq,
maxregpower, flags | IEEE80211_CHAN_HT40D);
return (error);
}
/*
* Fetch the center frequency for the primary channel.
*/
uint32_t
ieee80211_get_channel_center_freq(const struct ieee80211_channel *c)
{
return (c->ic_freq);
}
/*
* Fetch the center frequency for the primary BAND channel.
*
* For 5, 10, 20MHz channels it'll be the normally configured channel
* frequency.
*
* For 40MHz, 80MHz, 160MHz channels it will be the centre of the
* wide channel, not the centre of the primary channel (that's ic_freq).
*
* For 80+80MHz channels this will be the centre of the primary
* 80MHz channel; the secondary 80MHz channel will be center_freq2().
*/
uint32_t
ieee80211_get_channel_center_freq1(const struct ieee80211_channel *c)
{
/*
* VHT - use the pre-calculated centre frequency
* of the given channel.
*/
if (IEEE80211_IS_CHAN_VHT(c))
return (ieee80211_ieee2mhz(c->ic_vht_ch_freq1, c->ic_flags));
if (IEEE80211_IS_CHAN_HT40U(c)) {
return (c->ic_freq + 10);
}
if (IEEE80211_IS_CHAN_HT40D(c)) {
return (c->ic_freq - 10);
}
return (c->ic_freq);
}
/*
* For now, no 80+80 support; it will likely always return 0.
*/
uint32_t
ieee80211_get_channel_center_freq2(const struct ieee80211_channel *c)
{
if (IEEE80211_IS_CHAN_VHT(c) && (c->ic_vht_ch_freq2 != 0))
return (ieee80211_ieee2mhz(c->ic_vht_ch_freq2, c->ic_flags));
return (0);
}
/*
* Adds channels into specified channel list (ieee[] array must be sorted).
* Channels are already sorted.
*/
static int
add_chanlist(struct ieee80211_channel chans[], int maxchans, int *nchans,
const uint8_t ieee[], int nieee, uint32_t flags[])
{
uint16_t freq;
int i, j, error;
int is_vht;
for (i = 0; i < nieee; i++) {
freq = ieee80211_ieee2mhz(ieee[i], flags[0]);
for (j = 0; flags[j] != 0; j++) {
/*
* Notes:
* + HT40 and VHT40 channels occur together, so
* we need to be careful that we actually allow that.
* + VHT80, VHT160 will coexist with HT40/VHT40, so
* make sure it's not skipped because of the overlap
* check used for (V)HT40.
*/
is_vht = !! (flags[j] & IEEE80211_CHAN_VHT);
/* XXX TODO FIXME VHT80P80. */
/* Test for VHT160 analogue to the VHT80 below. */
if (is_vht && flags[j] & IEEE80211_CHAN_VHT160)
if (! is_vht160_valid_freq(freq))
continue;
/*
* Test for VHT80.
* XXX This is all very broken right now.
* What we /should/ do is:
*
* + check that the frequency is in the list of
* allowed VHT80 ranges; and
* + the other 3 channels in the list are actually
* also available.
*/
if (is_vht && flags[j] & IEEE80211_CHAN_VHT80)
if (! is_vht80_valid_freq(freq))
continue;
/*
* Test for (V)HT40.
*
* This is also a fall through from VHT80; as we only
* allow a VHT80 channel if the VHT40 combination is
* also valid. If the VHT40 form is not valid then
* we certainly can't do VHT80..
*/
if (flags[j] & IEEE80211_CHAN_HT40D)
/*
* Can't have a "lower" channel if we are the
* first channel.
*
* Can't have a "lower" channel if it's below/
* within 20MHz of the first channel.
*
* Can't have a "lower" channel if the channel
* below it is not 20MHz away.
*/
if (i == 0 || ieee[i] < ieee[0] + 4 ||
freq - 20 !=
ieee80211_ieee2mhz(ieee[i] - 4, flags[j]))
continue;
if (flags[j] & IEEE80211_CHAN_HT40U)
/*
* Can't have an "upper" channel if we are
* the last channel.
*
* Can't have an "upper" channel be above the
* last channel in the list.
*
* Can't have an "upper" channel if the next
* channel according to the math isn't 20MHz
* away. (Likely for channel 13/14.)
*/
if (i == nieee - 1 ||
ieee[i] + 4 > ieee[nieee - 1] ||
freq + 20 !=
ieee80211_ieee2mhz(ieee[i] + 4, flags[j]))
continue;
if (j == 0) {
error = addchan(chans, maxchans, nchans,
ieee[i], freq, 0, flags[j]);
} else {
error = copychan_prev(chans, maxchans, nchans,
flags[j]);
}
if (error != 0)
return (error);
}
}
return (0);
}
int
ieee80211_add_channel_list_2ghz(struct ieee80211_channel chans[], int maxchans,
int *nchans, const uint8_t ieee[], int nieee, const uint8_t bands[],
int cbw_flags)
{
uint32_t flags[IEEE80211_MODE_MAX];
/* XXX no VHT for now */
getflags_2ghz(bands, flags, cbw_flags);
KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__));
return (add_chanlist(chans, maxchans, nchans, ieee, nieee, flags));
}
int
ieee80211_add_channels_default_2ghz(struct ieee80211_channel chans[],
int maxchans, int *nchans, const uint8_t bands[], int cbw_flags)
{
const uint8_t default_chan_list[] =
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 };
return (ieee80211_add_channel_list_2ghz(chans, maxchans, nchans,
default_chan_list, nitems(default_chan_list), bands, cbw_flags));
}
int
ieee80211_add_channel_list_5ghz(struct ieee80211_channel chans[], int maxchans,
int *nchans, const uint8_t ieee[], int nieee, const uint8_t bands[],
int cbw_flags)
{
/*
* XXX-BZ with HT and VHT there is no 1:1 mapping anymore. Review all
* uses of IEEE80211_MODE_MAX and add a new #define name for array size.
*/
uint32_t flags[2 * IEEE80211_MODE_MAX];
getflags_5ghz(bands, flags, cbw_flags);
KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__));
return (add_chanlist(chans, maxchans, nchans, ieee, nieee, flags));
}
/*
* Locate a channel given a frequency+flags. We cache
* the previous lookup to optimize switching between two
* channels--as happens with dynamic turbo.
*/
struct ieee80211_channel *
ieee80211_find_channel(struct ieee80211com *ic, int freq, int flags)
{
struct ieee80211_channel *c;
flags &= IEEE80211_CHAN_ALLTURBO;
c = ic->ic_prevchan;
if (c != NULL && c->ic_freq == freq &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
/* brute force search */
return (findchannel(ic->ic_channels, ic->ic_nchans, freq, flags));
}
/*
* Locate a channel given a channel number+flags. We cache
* the previous lookup to optimize switching between two
* channels--as happens with dynamic turbo.
*/
struct ieee80211_channel *
ieee80211_find_channel_byieee(struct ieee80211com *ic, int ieee, int flags)
{
struct ieee80211_channel *c;
int i;
flags &= IEEE80211_CHAN_ALLTURBO;
c = ic->ic_prevchan;
if (c != NULL && c->ic_ieee == ieee &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
/* brute force search */
for (i = 0; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
if (c->ic_ieee == ieee &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
}
return NULL;
}
/*
* Lookup a channel suitable for the given rx status.
*
* This is used to find a channel for a frame (eg beacon, probe
* response) based purely on the received PHY information.
*
* For now it tries to do it based on R_FREQ / R_IEEE.
* This is enough for 11bg and 11a (and thus 11ng/11na)
* but it will not be enough for GSM, PSB channels and the
* like. It also doesn't know about legacy-turbog and
* legacy-turbo modes, which some offload NICs actually
* support in weird ways.
*
* Takes the ic and rxstatus; returns the channel or NULL
* if not found.
*
* XXX TODO: Add support for that when the need arises.
*/
struct ieee80211_channel *
ieee80211_lookup_channel_rxstatus(struct ieee80211vap *vap,
const struct ieee80211_rx_stats *rxs)
{
struct ieee80211com *ic = vap->iv_ic;
uint32_t flags;
struct ieee80211_channel *c;
if (rxs == NULL)
return (NULL);
/*
* Strictly speaking we only use freq for now,
* however later on we may wish to just store
* the ieee for verification.
*/
if ((rxs->r_flags & IEEE80211_R_FREQ) == 0)
return (NULL);
if ((rxs->r_flags & IEEE80211_R_IEEE) == 0)
return (NULL);
if ((rxs->r_flags & IEEE80211_R_BAND) == 0)
return (NULL);
/*
* If the rx status contains a valid ieee/freq, then
* ensure we populate the correct channel information
* in rxchan before passing it up to the scan infrastructure.
* Offload NICs will pass up beacons from all channels
* during background scans.
*/
/* Determine a band */
switch (rxs->c_band) {
case IEEE80211_CHAN_2GHZ:
flags = IEEE80211_CHAN_G;
break;
case IEEE80211_CHAN_5GHZ:
flags = IEEE80211_CHAN_A;
break;
default:
if (rxs->c_freq < 3000) {
flags = IEEE80211_CHAN_G;
} else {
flags = IEEE80211_CHAN_A;
}
break;
}
/* Channel lookup */
c = ieee80211_find_channel(ic, rxs->c_freq, flags);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT,
"%s: freq=%d, ieee=%d, flags=0x%08x; c=%p\n",
__func__, (int) rxs->c_freq, (int) rxs->c_ieee, flags, c);
return (c);
}
static void
addmedia(struct ifmedia *media, int caps, int addsta, int mode, int mword)
{
#define ADD(_ic, _s, _o) \
ifmedia_add(media, \
IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL)
static const u_int mopts[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = IFM_AUTO,
[IEEE80211_MODE_11A] = IFM_IEEE80211_11A,
[IEEE80211_MODE_11B] = IFM_IEEE80211_11B,
[IEEE80211_MODE_11G] = IFM_IEEE80211_11G,
[IEEE80211_MODE_FH] = IFM_IEEE80211_FH,
[IEEE80211_MODE_TURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO,
[IEEE80211_MODE_TURBO_G] = IFM_IEEE80211_11G|IFM_IEEE80211_TURBO,
[IEEE80211_MODE_STURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO,
[IEEE80211_MODE_HALF] = IFM_IEEE80211_11A, /* XXX */
[IEEE80211_MODE_QUARTER] = IFM_IEEE80211_11A, /* XXX */
[IEEE80211_MODE_11NA] = IFM_IEEE80211_11NA,
[IEEE80211_MODE_11NG] = IFM_IEEE80211_11NG,
[IEEE80211_MODE_VHT_2GHZ] = IFM_IEEE80211_VHT2G,
[IEEE80211_MODE_VHT_5GHZ] = IFM_IEEE80211_VHT5G,
};
u_int mopt;
mopt = mopts[mode];
if (addsta)
ADD(ic, mword, mopt); /* STA mode has no cap */
if (caps & IEEE80211_C_IBSS)
ADD(media, mword, mopt | IFM_IEEE80211_ADHOC);
if (caps & IEEE80211_C_HOSTAP)
ADD(media, mword, mopt | IFM_IEEE80211_HOSTAP);
if (caps & IEEE80211_C_AHDEMO)
ADD(media, mword, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0);
if (caps & IEEE80211_C_MONITOR)
ADD(media, mword, mopt | IFM_IEEE80211_MONITOR);
if (caps & IEEE80211_C_WDS)
ADD(media, mword, mopt | IFM_IEEE80211_WDS);
if (caps & IEEE80211_C_MBSS)
ADD(media, mword, mopt | IFM_IEEE80211_MBSS);
#undef ADD
}
/*
* Setup the media data structures according to the channel and
* rate tables.
*/
static int
ieee80211_media_setup(struct ieee80211com *ic,
struct ifmedia *media, int caps, int addsta,
ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
{
int i, j, rate, maxrate, mword, r;
enum ieee80211_phymode mode;
const struct ieee80211_rateset *rs;
struct ieee80211_rateset allrates;
/*
* Fill in media characteristics.
*/
ifmedia_init(media, 0, media_change, media_stat);
maxrate = 0;
/*
* Add media for legacy operating modes.
*/
memset(&allrates, 0, sizeof(allrates));
for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_11NA; mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
addmedia(media, caps, addsta, mode, IFM_AUTO);
if (mode == IEEE80211_MODE_AUTO)
continue;
rs = &ic->ic_sup_rates[mode];
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i];
mword = ieee80211_rate2media(ic, rate, mode);
if (mword == 0)
continue;
addmedia(media, caps, addsta, mode, mword);
/*
* Add legacy rate to the collection of all rates.
*/
r = rate & IEEE80211_RATE_VAL;
for (j = 0; j < allrates.rs_nrates; j++)
if (allrates.rs_rates[j] == r)
break;
if (j == allrates.rs_nrates) {
/* unique, add to the set */
allrates.rs_rates[j] = r;
allrates.rs_nrates++;
}
rate = (rate & IEEE80211_RATE_VAL) / 2;
if (rate > maxrate)
maxrate = rate;
}
}
for (i = 0; i < allrates.rs_nrates; i++) {
mword = ieee80211_rate2media(ic, allrates.rs_rates[i],
IEEE80211_MODE_AUTO);
if (mword == 0)
continue;
/* NB: remove media options from mword */
addmedia(media, caps, addsta,
IEEE80211_MODE_AUTO, IFM_SUBTYPE(mword));
}
/*
* Add HT/11n media. Note that we do not have enough
* bits in the media subtype to express the MCS so we
* use a "placeholder" media subtype and any fixed MCS
* must be specified with a different mechanism.
*/
for (; mode <= IEEE80211_MODE_11NG; mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
addmedia(media, caps, addsta, mode, IFM_AUTO);
addmedia(media, caps, addsta, mode, IFM_IEEE80211_MCS);
}
if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA) ||
isset(ic->ic_modecaps, IEEE80211_MODE_11NG)) {
addmedia(media, caps, addsta,
IEEE80211_MODE_AUTO, IFM_IEEE80211_MCS);
i = ic->ic_txstream * 8 - 1;
if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) &&
(ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40))
rate = ieee80211_htrates[i].ht40_rate_400ns;
else if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40))
rate = ieee80211_htrates[i].ht40_rate_800ns;
else if ((ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20))
rate = ieee80211_htrates[i].ht20_rate_400ns;
else
rate = ieee80211_htrates[i].ht20_rate_800ns;
if (rate > maxrate)
maxrate = rate;
}
/*
* Add VHT media.
* XXX-BZ skip "VHT_2GHZ" for now.
*/
for (mode = IEEE80211_MODE_VHT_5GHZ; mode <= IEEE80211_MODE_VHT_5GHZ;
mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
addmedia(media, caps, addsta, mode, IFM_AUTO);
addmedia(media, caps, addsta, mode, IFM_IEEE80211_VHT);
}
if (isset(ic->ic_modecaps, IEEE80211_MODE_VHT_5GHZ)) {
addmedia(media, caps, addsta,
IEEE80211_MODE_AUTO, IFM_IEEE80211_VHT);
/* XXX TODO: VHT maxrate */
}
return maxrate;
}
/* XXX inline or eliminate? */
const struct ieee80211_rateset *
ieee80211_get_suprates(struct ieee80211com *ic, const struct ieee80211_channel *c)
{
/* XXX does this work for 11ng basic rates? */
return &ic->ic_sup_rates[ieee80211_chan2mode(c)];
}
/* XXX inline or eliminate? */
const struct ieee80211_htrateset *
ieee80211_get_suphtrates(struct ieee80211com *ic,
const struct ieee80211_channel *c)
{
return &ic->ic_sup_htrates;
}
void
ieee80211_announce(struct ieee80211com *ic)
{
int i, rate, mword;
enum ieee80211_phymode mode;
const struct ieee80211_rateset *rs;
/* NB: skip AUTO since it has no rates */
for (mode = IEEE80211_MODE_AUTO+1; mode < IEEE80211_MODE_11NA; mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
ic_printf(ic, "%s rates: ", ieee80211_phymode_name[mode]);
rs = &ic->ic_sup_rates[mode];
for (i = 0; i < rs->rs_nrates; i++) {
mword = ieee80211_rate2media(ic, rs->rs_rates[i], mode);
if (mword == 0)
continue;
rate = ieee80211_media2rate(mword);
printf("%s%d%sMbps", (i != 0 ? " " : ""),
rate / 2, ((rate & 0x1) != 0 ? ".5" : ""));
}
printf("\n");
}
ieee80211_ht_announce(ic);
ieee80211_vht_announce(ic);
}
void
ieee80211_announce_channels(struct ieee80211com *ic)
{
const struct ieee80211_channel *c;
char type;
int i, cw;
printf("Chan Freq CW RegPwr MinPwr MaxPwr\n");
for (i = 0; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
if (IEEE80211_IS_CHAN_ST(c))
type = 'S';
else if (IEEE80211_IS_CHAN_108A(c))
type = 'T';
else if (IEEE80211_IS_CHAN_108G(c))
type = 'G';
else if (IEEE80211_IS_CHAN_HT(c))
type = 'n';
else if (IEEE80211_IS_CHAN_A(c))
type = 'a';
else if (IEEE80211_IS_CHAN_ANYG(c))
type = 'g';
else if (IEEE80211_IS_CHAN_B(c))
type = 'b';
else
type = 'f';
if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_TURBO(c))
cw = 40;
else if (IEEE80211_IS_CHAN_HALF(c))
cw = 10;
else if (IEEE80211_IS_CHAN_QUARTER(c))
cw = 5;
else
cw = 20;
printf("%4d %4d%c %2d%c %6d %4d.%d %4d.%d\n"
, c->ic_ieee, c->ic_freq, type
, cw
, IEEE80211_IS_CHAN_HT40U(c) ? '+' :
IEEE80211_IS_CHAN_HT40D(c) ? '-' : ' '
, c->ic_maxregpower
, c->ic_minpower / 2, c->ic_minpower & 1 ? 5 : 0
, c->ic_maxpower / 2, c->ic_maxpower & 1 ? 5 : 0
);
}
}
static int
media2mode(const struct ifmedia_entry *ime, uint32_t flags, uint16_t *mode)
{
switch (IFM_MODE(ime->ifm_media)) {
case IFM_IEEE80211_11A:
*mode = IEEE80211_MODE_11A;
break;
case IFM_IEEE80211_11B:
*mode = IEEE80211_MODE_11B;
break;
case IFM_IEEE80211_11G:
*mode = IEEE80211_MODE_11G;
break;
case IFM_IEEE80211_FH:
*mode = IEEE80211_MODE_FH;
break;
case IFM_IEEE80211_11NA:
*mode = IEEE80211_MODE_11NA;
break;
case IFM_IEEE80211_11NG:
*mode = IEEE80211_MODE_11NG;
break;
case IFM_IEEE80211_VHT2G:
*mode = IEEE80211_MODE_VHT_2GHZ;
break;
case IFM_IEEE80211_VHT5G:
*mode = IEEE80211_MODE_VHT_5GHZ;
break;
case IFM_AUTO:
*mode = IEEE80211_MODE_AUTO;
break;
default:
return 0;
}
/*
* Turbo mode is an ``option''.
* XXX does not apply to AUTO
*/
if (ime->ifm_media & IFM_IEEE80211_TURBO) {
if (*mode == IEEE80211_MODE_11A) {
if (flags & IEEE80211_F_TURBOP)
*mode = IEEE80211_MODE_TURBO_A;
else
*mode = IEEE80211_MODE_STURBO_A;
} else if (*mode == IEEE80211_MODE_11G)
*mode = IEEE80211_MODE_TURBO_G;
else
return 0;
}
/* XXX HT40 +/- */
return 1;
}
/*
* Handle a media change request on the vap interface.
*/
int
ieee80211_media_change(struct ifnet *ifp)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ifmedia_entry *ime = vap->iv_media.ifm_cur;
uint16_t newmode;
if (!media2mode(ime, vap->iv_flags, &newmode))
return EINVAL;
if (vap->iv_des_mode != newmode) {
vap->iv_des_mode = newmode;
/* XXX kick state machine if up+running */
}
return 0;
}
/*
* Common code to calculate the media status word
* from the operating mode and channel state.
*/
static int
media_status(enum ieee80211_opmode opmode, const struct ieee80211_channel *chan)
{
int status;
status = IFM_IEEE80211;
switch (opmode) {
case IEEE80211_M_STA:
break;
case IEEE80211_M_IBSS:
status |= IFM_IEEE80211_ADHOC;
break;
case IEEE80211_M_HOSTAP:
status |= IFM_IEEE80211_HOSTAP;
break;
case IEEE80211_M_MONITOR:
status |= IFM_IEEE80211_MONITOR;
break;
case IEEE80211_M_AHDEMO:
status |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
break;
case IEEE80211_M_WDS:
status |= IFM_IEEE80211_WDS;
break;
case IEEE80211_M_MBSS:
status |= IFM_IEEE80211_MBSS;
break;
}
if (IEEE80211_IS_CHAN_VHT_5GHZ(chan)) {
status |= IFM_IEEE80211_VHT5G;
} else if (IEEE80211_IS_CHAN_VHT_2GHZ(chan)) {
status |= IFM_IEEE80211_VHT2G;
} else if (IEEE80211_IS_CHAN_HTA(chan)) {
status |= IFM_IEEE80211_11NA;
} else if (IEEE80211_IS_CHAN_HTG(chan)) {
status |= IFM_IEEE80211_11NG;
} else if (IEEE80211_IS_CHAN_A(chan)) {
status |= IFM_IEEE80211_11A;
} else if (IEEE80211_IS_CHAN_B(chan)) {
status |= IFM_IEEE80211_11B;
} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
status |= IFM_IEEE80211_11G;
} else if (IEEE80211_IS_CHAN_FHSS(chan)) {
status |= IFM_IEEE80211_FH;
}
/* XXX else complain? */
if (IEEE80211_IS_CHAN_TURBO(chan))
status |= IFM_IEEE80211_TURBO;
#if 0
if (IEEE80211_IS_CHAN_HT20(chan))
status |= IFM_IEEE80211_HT20;
if (IEEE80211_IS_CHAN_HT40(chan))
status |= IFM_IEEE80211_HT40;
#endif
return status;
}
void
ieee80211_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
enum ieee80211_phymode mode;
imr->ifm_status = IFM_AVALID;
/*
* NB: use the current channel's mode to lock down a xmit
* rate only when running; otherwise we may have a mismatch
* in which case the rate will not be convertible.
*/
if (vap->iv_state == IEEE80211_S_RUN ||
vap->iv_state == IEEE80211_S_SLEEP) {
imr->ifm_status |= IFM_ACTIVE;
mode = ieee80211_chan2mode(ic->ic_curchan);
} else
mode = IEEE80211_MODE_AUTO;
imr->ifm_active = media_status(vap->iv_opmode, ic->ic_curchan);
/*
* Calculate a current rate if possible.
*/
if (vap->iv_txparms[mode].ucastrate != IEEE80211_FIXED_RATE_NONE) {
/*
* A fixed rate is set, report that.
*/
imr->ifm_active |= ieee80211_rate2media(ic,
vap->iv_txparms[mode].ucastrate, mode);
} else if (vap->iv_opmode == IEEE80211_M_STA) {
/*
* In station mode report the current transmit rate.
*/
imr->ifm_active |= ieee80211_rate2media(ic,
vap->iv_bss->ni_txrate, mode);
} else
imr->ifm_active |= IFM_AUTO;
if (imr->ifm_status & IFM_ACTIVE)
imr->ifm_current = imr->ifm_active;
}
/*
* Set the current phy mode and recalculate the active channel
* set based on the available channels for this mode. Also
* select a new default/current channel if the current one is
* inappropriate for this mode.
*/
int
ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode)
{
/*
* Adjust basic rates in 11b/11g supported rate set.
* Note that if operating on a hal/quarter rate channel
* this is a noop as those rates sets are different
* and used instead.
*/
if (mode == IEEE80211_MODE_11G || mode == IEEE80211_MODE_11B)
ieee80211_setbasicrates(&ic->ic_sup_rates[mode], mode);
ic->ic_curmode = mode;
ieee80211_reset_erp(ic); /* reset global ERP state */
return 0;
}
/*
* Return the phy mode for with the specified channel.
*/
enum ieee80211_phymode
ieee80211_chan2mode(const struct ieee80211_channel *chan)
{
if (IEEE80211_IS_CHAN_VHT_2GHZ(chan))
return IEEE80211_MODE_VHT_2GHZ;
else if (IEEE80211_IS_CHAN_VHT_5GHZ(chan))
return IEEE80211_MODE_VHT_5GHZ;
else if (IEEE80211_IS_CHAN_HTA(chan))
return IEEE80211_MODE_11NA;
else if (IEEE80211_IS_CHAN_HTG(chan))
return IEEE80211_MODE_11NG;
else if (IEEE80211_IS_CHAN_108G(chan))
return IEEE80211_MODE_TURBO_G;
else if (IEEE80211_IS_CHAN_ST(chan))
return IEEE80211_MODE_STURBO_A;
else if (IEEE80211_IS_CHAN_TURBO(chan))
return IEEE80211_MODE_TURBO_A;
else if (IEEE80211_IS_CHAN_HALF(chan))
return IEEE80211_MODE_HALF;
else if (IEEE80211_IS_CHAN_QUARTER(chan))
return IEEE80211_MODE_QUARTER;
else if (IEEE80211_IS_CHAN_A(chan))
return IEEE80211_MODE_11A;
else if (IEEE80211_IS_CHAN_ANYG(chan))
return IEEE80211_MODE_11G;
else if (IEEE80211_IS_CHAN_B(chan))
return IEEE80211_MODE_11B;
else if (IEEE80211_IS_CHAN_FHSS(chan))
return IEEE80211_MODE_FH;
/* NB: should not get here */
printf("%s: cannot map channel to mode; freq %u flags 0x%x\n",
__func__, chan->ic_freq, chan->ic_flags);
return IEEE80211_MODE_11B;
}
struct ratemedia {
u_int match; /* rate + mode */
u_int media; /* if_media rate */
};
static int
findmedia(const struct ratemedia rates[], int n, u_int match)
{
int i;
for (i = 0; i < n; i++)
if (rates[i].match == match)
return rates[i].media;
return IFM_AUTO;
}
/*
* Convert IEEE80211 rate value to ifmedia subtype.
* Rate is either a legacy rate in units of 0.5Mbps
* or an MCS index.
*/
int
ieee80211_rate2media(struct ieee80211com *ic, int rate, enum ieee80211_phymode mode)
{
static const struct ratemedia rates[] = {
{ 2 | IFM_IEEE80211_FH, IFM_IEEE80211_FH1 },
{ 4 | IFM_IEEE80211_FH, IFM_IEEE80211_FH2 },
{ 2 | IFM_IEEE80211_11B, IFM_IEEE80211_DS1 },
{ 4 | IFM_IEEE80211_11B, IFM_IEEE80211_DS2 },
{ 11 | IFM_IEEE80211_11B, IFM_IEEE80211_DS5 },
{ 22 | IFM_IEEE80211_11B, IFM_IEEE80211_DS11 },
{ 44 | IFM_IEEE80211_11B, IFM_IEEE80211_DS22 },
{ 12 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM6 },
{ 18 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM9 },
{ 24 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM12 },
{ 36 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM18 },
{ 48 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM24 },
{ 72 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM36 },
{ 96 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM48 },
{ 108 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM54 },
{ 2 | IFM_IEEE80211_11G, IFM_IEEE80211_DS1 },
{ 4 | IFM_IEEE80211_11G, IFM_IEEE80211_DS2 },
{ 11 | IFM_IEEE80211_11G, IFM_IEEE80211_DS5 },
{ 22 | IFM_IEEE80211_11G, IFM_IEEE80211_DS11 },
{ 12 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM6 },
{ 18 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM9 },
{ 24 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM12 },
{ 36 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM18 },
{ 48 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM24 },
{ 72 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM36 },
{ 96 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM48 },
{ 108 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM54 },
{ 6 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM3 },
{ 9 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM4 },
{ 54 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM27 },
/* NB: OFDM72 doesn't really exist so we don't handle it */
};
static const struct ratemedia htrates[] = {
{ 0, IFM_IEEE80211_MCS },
{ 1, IFM_IEEE80211_MCS },
{ 2, IFM_IEEE80211_MCS },
{ 3, IFM_IEEE80211_MCS },
{ 4, IFM_IEEE80211_MCS },
{ 5, IFM_IEEE80211_MCS },
{ 6, IFM_IEEE80211_MCS },
{ 7, IFM_IEEE80211_MCS },
{ 8, IFM_IEEE80211_MCS },
{ 9, IFM_IEEE80211_MCS },
{ 10, IFM_IEEE80211_MCS },
{ 11, IFM_IEEE80211_MCS },
{ 12, IFM_IEEE80211_MCS },
{ 13, IFM_IEEE80211_MCS },
{ 14, IFM_IEEE80211_MCS },
{ 15, IFM_IEEE80211_MCS },
{ 16, IFM_IEEE80211_MCS },
{ 17, IFM_IEEE80211_MCS },
{ 18, IFM_IEEE80211_MCS },
{ 19, IFM_IEEE80211_MCS },
{ 20, IFM_IEEE80211_MCS },
{ 21, IFM_IEEE80211_MCS },
{ 22, IFM_IEEE80211_MCS },
{ 23, IFM_IEEE80211_MCS },
{ 24, IFM_IEEE80211_MCS },
{ 25, IFM_IEEE80211_MCS },
{ 26, IFM_IEEE80211_MCS },
{ 27, IFM_IEEE80211_MCS },
{ 28, IFM_IEEE80211_MCS },
{ 29, IFM_IEEE80211_MCS },
{ 30, IFM_IEEE80211_MCS },
{ 31, IFM_IEEE80211_MCS },
{ 32, IFM_IEEE80211_MCS },
{ 33, IFM_IEEE80211_MCS },
{ 34, IFM_IEEE80211_MCS },
{ 35, IFM_IEEE80211_MCS },
{ 36, IFM_IEEE80211_MCS },
{ 37, IFM_IEEE80211_MCS },
{ 38, IFM_IEEE80211_MCS },
{ 39, IFM_IEEE80211_MCS },
{ 40, IFM_IEEE80211_MCS },
{ 41, IFM_IEEE80211_MCS },
{ 42, IFM_IEEE80211_MCS },
{ 43, IFM_IEEE80211_MCS },
{ 44, IFM_IEEE80211_MCS },
{ 45, IFM_IEEE80211_MCS },
{ 46, IFM_IEEE80211_MCS },
{ 47, IFM_IEEE80211_MCS },
{ 48, IFM_IEEE80211_MCS },
{ 49, IFM_IEEE80211_MCS },
{ 50, IFM_IEEE80211_MCS },
{ 51, IFM_IEEE80211_MCS },
{ 52, IFM_IEEE80211_MCS },
{ 53, IFM_IEEE80211_MCS },
{ 54, IFM_IEEE80211_MCS },
{ 55, IFM_IEEE80211_MCS },
{ 56, IFM_IEEE80211_MCS },
{ 57, IFM_IEEE80211_MCS },
{ 58, IFM_IEEE80211_MCS },
{ 59, IFM_IEEE80211_MCS },
{ 60, IFM_IEEE80211_MCS },
{ 61, IFM_IEEE80211_MCS },
{ 62, IFM_IEEE80211_MCS },
{ 63, IFM_IEEE80211_MCS },
{ 64, IFM_IEEE80211_MCS },
{ 65, IFM_IEEE80211_MCS },
{ 66, IFM_IEEE80211_MCS },
{ 67, IFM_IEEE80211_MCS },
{ 68, IFM_IEEE80211_MCS },
{ 69, IFM_IEEE80211_MCS },
{ 70, IFM_IEEE80211_MCS },
{ 71, IFM_IEEE80211_MCS },
{ 72, IFM_IEEE80211_MCS },
{ 73, IFM_IEEE80211_MCS },
{ 74, IFM_IEEE80211_MCS },
{ 75, IFM_IEEE80211_MCS },
{ 76, IFM_IEEE80211_MCS },
};
static const struct ratemedia vhtrates[] = {
{ 0, IFM_IEEE80211_VHT },
{ 1, IFM_IEEE80211_VHT },
{ 2, IFM_IEEE80211_VHT },
{ 3, IFM_IEEE80211_VHT },
{ 4, IFM_IEEE80211_VHT },
{ 5, IFM_IEEE80211_VHT },
{ 6, IFM_IEEE80211_VHT },
{ 7, IFM_IEEE80211_VHT },
{ 8, IFM_IEEE80211_VHT }, /* Optional. */
{ 9, IFM_IEEE80211_VHT }, /* Optional. */
#if 0
/* Some QCA and BRCM seem to support this; offspec. */
{ 10, IFM_IEEE80211_VHT },
{ 11, IFM_IEEE80211_VHT },
#endif
};
int m;
/*
* Check 11ac/11n rates first for match as an MCS.
*/
if (mode == IEEE80211_MODE_VHT_5GHZ) {
if (rate & IFM_IEEE80211_VHT) {
rate &= ~IFM_IEEE80211_VHT;
m = findmedia(vhtrates, nitems(vhtrates), rate);
if (m != IFM_AUTO)
return (m | IFM_IEEE80211_VHT);
}
} else if (mode == IEEE80211_MODE_11NA) {
if (rate & IEEE80211_RATE_MCS) {
rate &= ~IEEE80211_RATE_MCS;
m = findmedia(htrates, nitems(htrates), rate);
if (m != IFM_AUTO)
return m | IFM_IEEE80211_11NA;
}
} else if (mode == IEEE80211_MODE_11NG) {
/* NB: 12 is ambiguous, it will be treated as an MCS */
if (rate & IEEE80211_RATE_MCS) {
rate &= ~IEEE80211_RATE_MCS;
m = findmedia(htrates, nitems(htrates), rate);
if (m != IFM_AUTO)
return m | IFM_IEEE80211_11NG;
}
}
rate &= IEEE80211_RATE_VAL;
switch (mode) {
case IEEE80211_MODE_11A:
case IEEE80211_MODE_HALF: /* XXX good 'nuf */
case IEEE80211_MODE_QUARTER:
case IEEE80211_MODE_11NA:
case IEEE80211_MODE_TURBO_A:
case IEEE80211_MODE_STURBO_A:
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_11A);
case IEEE80211_MODE_11B:
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_11B);
case IEEE80211_MODE_FH:
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_FH);
case IEEE80211_MODE_AUTO:
/* NB: ic may be NULL for some drivers */
if (ic != NULL && ic->ic_phytype == IEEE80211_T_FH)
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_FH);
/* NB: hack, 11g matches both 11b+11a rates */
/* fall thru... */
case IEEE80211_MODE_11G:
case IEEE80211_MODE_11NG:
case IEEE80211_MODE_TURBO_G:
return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11G);
case IEEE80211_MODE_VHT_2GHZ:
case IEEE80211_MODE_VHT_5GHZ:
/* XXX TODO: need to figure out mapping for VHT rates */
return IFM_AUTO;
}
return IFM_AUTO;
}
int
ieee80211_media2rate(int mword)
{
static const int ieeerates[] = {
-1, /* IFM_AUTO */
0, /* IFM_MANUAL */
0, /* IFM_NONE */
2, /* IFM_IEEE80211_FH1 */
4, /* IFM_IEEE80211_FH2 */
2, /* IFM_IEEE80211_DS1 */
4, /* IFM_IEEE80211_DS2 */
11, /* IFM_IEEE80211_DS5 */
22, /* IFM_IEEE80211_DS11 */
44, /* IFM_IEEE80211_DS22 */
12, /* IFM_IEEE80211_OFDM6 */
18, /* IFM_IEEE80211_OFDM9 */
24, /* IFM_IEEE80211_OFDM12 */
36, /* IFM_IEEE80211_OFDM18 */
48, /* IFM_IEEE80211_OFDM24 */
72, /* IFM_IEEE80211_OFDM36 */
96, /* IFM_IEEE80211_OFDM48 */
108, /* IFM_IEEE80211_OFDM54 */
144, /* IFM_IEEE80211_OFDM72 */
0, /* IFM_IEEE80211_DS354k */
0, /* IFM_IEEE80211_DS512k */
6, /* IFM_IEEE80211_OFDM3 */
9, /* IFM_IEEE80211_OFDM4 */
54, /* IFM_IEEE80211_OFDM27 */
-1, /* IFM_IEEE80211_MCS */
-1, /* IFM_IEEE80211_VHT */
};
return IFM_SUBTYPE(mword) < nitems(ieeerates) ?
ieeerates[IFM_SUBTYPE(mword)] : 0;
}
/*
* The following hash function is adapted from "Hash Functions" by Bob Jenkins
* ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
*/
#define mix(a, b, c) \
do { \
a -= b; a -= c; a ^= (c >> 13); \
b -= c; b -= a; b ^= (a << 8); \
c -= a; c -= b; c ^= (b >> 13); \
a -= b; a -= c; a ^= (c >> 12); \
b -= c; b -= a; b ^= (a << 16); \
c -= a; c -= b; c ^= (b >> 5); \
a -= b; a -= c; a ^= (c >> 3); \
b -= c; b -= a; b ^= (a << 10); \
c -= a; c -= b; c ^= (b >> 15); \
} while (/*CONSTCOND*/0)
uint32_t
ieee80211_mac_hash(const struct ieee80211com *ic,
const uint8_t addr[IEEE80211_ADDR_LEN])
{
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = ic->ic_hash_key;
b += addr[5] << 8;
b += addr[4];
a += addr[3] << 24;
a += addr[2] << 16;
a += addr[1] << 8;
a += addr[0];
mix(a, b, c);
return c;
}
#undef mix
char
ieee80211_channel_type_char(const struct ieee80211_channel *c)
{
if (IEEE80211_IS_CHAN_ST(c))
return 'S';
if (IEEE80211_IS_CHAN_108A(c))
return 'T';
if (IEEE80211_IS_CHAN_108G(c))
return 'G';
if (IEEE80211_IS_CHAN_VHT(c))
return 'v';
if (IEEE80211_IS_CHAN_HT(c))
return 'n';
if (IEEE80211_IS_CHAN_A(c))
return 'a';
if (IEEE80211_IS_CHAN_ANYG(c))
return 'g';
if (IEEE80211_IS_CHAN_B(c))
return 'b';
return 'f';
}
diff --git a/sys/netgraph/ng_eiface.c b/sys/netgraph/ng_eiface.c
index a35650f15edd..4598291f3d60 100644
--- a/sys/netgraph/ng_eiface.c
+++ b/sys/netgraph/ng_eiface.c
@@ -1,686 +1,681 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
*
* Copyright (c) 1999-2001, Vitaly V Belekhov
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/errno.h>
#include <sys/proc.h>
#include <sys/sockio.h>
#include <sys/socket.h>
#include <sys/syslog.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/netisr.h>
#include <net/route.h>
#include <net/vnet.h>
#include <netgraph/ng_message.h>
#include <netgraph/netgraph.h>
#include <netgraph/ng_parse.h>
#include <netgraph/ng_eiface.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if_arp.h>
static const struct ng_cmdlist ng_eiface_cmdlist[] = {
{
NGM_EIFACE_COOKIE,
NGM_EIFACE_GET_IFNAME,
"getifname",
NULL,
&ng_parse_string_type
},
{
NGM_EIFACE_COOKIE,
NGM_EIFACE_SET,
"set",
&ng_parse_enaddr_type,
NULL
},
{ 0 }
};
/* Node private data */
struct ng_eiface_private {
struct ifnet *ifp; /* per-interface network data */
struct ifmedia media; /* (fake) media information */
int link_status; /* fake */
int unit; /* Interface unit number */
node_p node; /* Our netgraph node */
hook_p ether; /* Hook for ethernet stream */
};
typedef struct ng_eiface_private *priv_p;
/* Interface methods */
static void ng_eiface_init(void *xsc);
static void ng_eiface_start(struct ifnet *ifp);
static int ng_eiface_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
#ifdef DEBUG
static void ng_eiface_print_ioctl(struct ifnet *ifp, int cmd, caddr_t data);
#endif
/* Netgraph methods */
static int ng_eiface_mod_event(module_t, int, void *);
static ng_constructor_t ng_eiface_constructor;
static ng_rcvmsg_t ng_eiface_rcvmsg;
static ng_shutdown_t ng_eiface_rmnode;
static ng_newhook_t ng_eiface_newhook;
static ng_rcvdata_t ng_eiface_rcvdata;
static ng_disconnect_t ng_eiface_disconnect;
/* Node type descriptor */
static struct ng_type typestruct = {
.version = NG_ABI_VERSION,
.name = NG_EIFACE_NODE_TYPE,
.mod_event = ng_eiface_mod_event,
.constructor = ng_eiface_constructor,
.rcvmsg = ng_eiface_rcvmsg,
.shutdown = ng_eiface_rmnode,
.newhook = ng_eiface_newhook,
.rcvdata = ng_eiface_rcvdata,
.disconnect = ng_eiface_disconnect,
.cmdlist = ng_eiface_cmdlist
};
NETGRAPH_INIT(eiface, &typestruct);
VNET_DEFINE_STATIC(struct unrhdr *, ng_eiface_unit);
#define V_ng_eiface_unit VNET(ng_eiface_unit)
/************************************************************************
INTERFACE STUFF
************************************************************************/
/*
* Process an ioctl for the virtual interface
*/
static int
ng_eiface_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
const priv_p priv = (priv_p)ifp->if_softc;
struct ifreq *const ifr = (struct ifreq *)data;
int error = 0;
#ifdef DEBUG
ng_eiface_print_ioctl(ifp, command, data);
#endif
switch (command) {
/* These two are mostly handled at a higher layer */
case SIOCSIFADDR:
error = ether_ioctl(ifp, command, data);
break;
case SIOCGIFADDR:
break;
/* Set flags */
case SIOCSIFFLAGS:
/*
* If the interface is marked up and stopped, then start it.
* If it is marked down and running, then stop it.
*/
if (ifp->if_flags & IFF_UP) {
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING |
IFF_DRV_OACTIVE);
}
break;
/* Set the interface MTU */
case SIOCSIFMTU:
if (ifr->ifr_mtu > NG_EIFACE_MTU_MAX ||
ifr->ifr_mtu < NG_EIFACE_MTU_MIN)
error = EINVAL;
else
ifp->if_mtu = ifr->ifr_mtu;
break;
/* (Fake) media type manipulation */
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
break;
/* Stuff that's not supported */
case SIOCADDMULTI:
case SIOCDELMULTI:
error = 0;
break;
case SIOCSIFPHYS:
error = EOPNOTSUPP;
break;
default:
error = EINVAL;
break;
}
return (error);
}
static void
ng_eiface_init(void *xsc)
{
priv_p sc = xsc;
struct ifnet *ifp = sc->ifp;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
/*
* We simply relay the packet to the "ether" hook, if it is connected.
* We have been through the netgraph locking and are guaranteed to
* be the only code running in this node at this time.
*/
static void
ng_eiface_start2(node_p node, hook_p hook, void *arg1, int arg2)
{
struct ifnet *ifp = arg1;
const priv_p priv = (priv_p)ifp->if_softc;
int error = 0;
struct mbuf *m;
/* Check interface flags */
if (!((ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)))
return;
for (;;) {
/*
* Grab a packet to transmit.
*/
IF_DEQUEUE(&ifp->if_snd, m);
/* If there's nothing to send, break. */
if (m == NULL)
break;
/* Peel the mbuf off any stale tags */
m_tag_delete_chain(m, NULL);
/*
* Berkeley packet filter.
* Pass packet to bpf if there is a listener.
* XXX is this safe? locking?
*/
BPF_MTAP(ifp, m);
if (ifp->if_flags & IFF_MONITOR) {
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m_freem(m);
continue;
}
/*
* Send packet; if hook is not connected, mbuf will get
* freed.
*/
NG_OUTBOUND_THREAD_REF();
CURVNET_SET_QUIET(priv->node->nd_vnet);
NG_SEND_DATA_ONLY(error, priv->ether, m);
CURVNET_RESTORE();
NG_OUTBOUND_THREAD_UNREF();
/* Update stats */
if (error == 0)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
return;
}
/*
* This routine is called to deliver a packet out the interface.
* We simply queue the netgraph version to be called when netgraph locking
* allows it to happen.
* Until we know what the rest of the networking code is doing for
* locking, we don't know how we will interact with it.
* Take comfort from the fact that the ifnet struct is part of our
* private info and can't go away while we are queued.
* [Though we don't know it is still there now....]
* it is possible we don't gain anything from this because
* we would like to get the mbuf and queue it as data
* somehow, but we can't and if we did would we solve anything?
*/
static void
ng_eiface_start(struct ifnet *ifp)
{
const priv_p priv = (priv_p)ifp->if_softc;
/* Don't do anything if output is active */
if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
return;
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
if (ng_send_fn(priv->node, NULL, &ng_eiface_start2, ifp, 0) != 0)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
#ifdef DEBUG
/*
* Display an ioctl to the virtual interface
*/
static void
ng_eiface_print_ioctl(struct ifnet *ifp, int command, caddr_t data)
{
char *str;
switch (command & IOC_DIRMASK) {
case IOC_VOID:
str = "IO";
break;
case IOC_OUT:
str = "IOR";
break;
case IOC_IN:
str = "IOW";
break;
case IOC_INOUT:
str = "IORW";
break;
default:
str = "IO??";
}
log(LOG_DEBUG, "%s: %s('%c', %d, char[%d])\n",
ifp->if_xname,
str,
IOCGROUP(command),
command & 0xff,
IOCPARM_LEN(command));
}
#endif /* DEBUG */
/*
* ifmedia stuff
*/
static int
ng_eiface_mediachange(struct ifnet *ifp)
{
const priv_p priv = (priv_p)ifp->if_softc;
struct ifmedia *ifm = &priv->media;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
ifp->if_baudrate = ifmedia_baudrate(IFM_ETHER | IFM_1000_T);
else
ifp->if_baudrate = ifmedia_baudrate(ifm->ifm_media);
return (0);
}
static void
ng_eiface_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
const priv_p priv = (priv_p)ifp->if_softc;
struct ifmedia *ifm = &priv->media;
if (ifm->ifm_cur->ifm_media == (IFM_ETHER | IFM_AUTO) &&
(priv->link_status & IFM_ACTIVE))
ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
else
ifmr->ifm_active = ifm->ifm_cur->ifm_media;
ifmr->ifm_status = priv->link_status;
return;
}
/************************************************************************
NETGRAPH NODE STUFF
************************************************************************/
/*
* Constructor for a node
*/
static int
ng_eiface_constructor(node_p node)
{
struct ifnet *ifp;
priv_p priv;
struct ether_addr eaddr;
/* Allocate node and interface private structures */
priv = malloc(sizeof(*priv), M_NETGRAPH, M_WAITOK | M_ZERO);
-
ifp = priv->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- free(priv, M_NETGRAPH);
- return (ENOSPC);
- }
/* Link them together */
ifp->if_softc = priv;
/* Get an interface unit number */
priv->unit = alloc_unr(V_ng_eiface_unit);
/* Link together node and private info */
NG_NODE_SET_PRIVATE(node, priv);
priv->node = node;
/* Initialize interface structure */
if_initname(ifp, NG_EIFACE_EIFACE_NAME, priv->unit);
ifp->if_init = ng_eiface_init;
ifp->if_output = ether_output;
ifp->if_start = ng_eiface_start;
ifp->if_ioctl = ng_eiface_ioctl;
ifp->if_snd.ifq_maxlen = ifqmaxlen;
ifp->if_flags = (IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST);
ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
ifp->if_capenable = IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
ifmedia_init(&priv->media, 0, ng_eiface_mediachange,
ng_eiface_mediastatus);
ifmedia_add(&priv->media, IFM_ETHER | IFM_10_T, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_100_TX, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_1000_T, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
priv->link_status = IFM_AVALID;
/* Give this node the same name as the interface (if possible) */
if (ng_name_node(node, ifp->if_xname) != 0)
log(LOG_WARNING, "%s: can't acquire netgraph name\n",
ifp->if_xname);
/* Attach the interface */
ether_gen_addr(ifp, &eaddr);
ether_ifattach(ifp, eaddr.octet);
ifp->if_baudrate = ifmedia_baudrate(IFM_ETHER | IFM_1000_T);
/* Done */
return (0);
}
/*
* Give our ok for a hook to be added
*/
static int
ng_eiface_newhook(node_p node, hook_p hook, const char *name)
{
priv_p priv = NG_NODE_PRIVATE(node);
struct ifnet *ifp = priv->ifp;
if (strcmp(name, NG_EIFACE_HOOK_ETHER))
return (EPFNOSUPPORT);
if (priv->ether != NULL)
return (EISCONN);
priv->ether = hook;
NG_HOOK_SET_PRIVATE(hook, &priv->ether);
NG_HOOK_SET_TO_INBOUND(hook);
priv->link_status |= IFM_ACTIVE;
CURVNET_SET_QUIET(ifp->if_vnet);
if_link_state_change(ifp, LINK_STATE_UP);
CURVNET_RESTORE();
return (0);
}
/*
* Receive a control message
*/
static int
ng_eiface_rcvmsg(node_p node, item_p item, hook_p lasthook)
{
const priv_p priv = NG_NODE_PRIVATE(node);
struct ifnet *const ifp = priv->ifp;
struct ng_mesg *resp = NULL;
int error = 0;
struct ng_mesg *msg;
NGI_GET_MSG(item, msg);
switch (msg->header.typecookie) {
case NGM_EIFACE_COOKIE:
switch (msg->header.cmd) {
case NGM_EIFACE_SET:
{
if (msg->header.arglen != ETHER_ADDR_LEN) {
error = EINVAL;
break;
}
error = if_setlladdr(priv->ifp,
(u_char *)msg->data, ETHER_ADDR_LEN);
break;
}
case NGM_EIFACE_GET_IFNAME:
NG_MKRESPONSE(resp, msg, IFNAMSIZ, M_NOWAIT);
if (resp == NULL) {
error = ENOMEM;
break;
}
strlcpy(resp->data, ifp->if_xname, IFNAMSIZ);
break;
case NGM_EIFACE_GET_IFADDRS:
{
struct epoch_tracker et;
struct ifaddr *ifa;
caddr_t ptr;
int buflen;
/* Determine size of response and allocate it */
buflen = 0;
NET_EPOCH_ENTER(et);
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
buflen += SA_SIZE(ifa->ifa_addr);
NG_MKRESPONSE(resp, msg, buflen, M_NOWAIT);
if (resp == NULL) {
NET_EPOCH_EXIT(et);
error = ENOMEM;
break;
}
/* Add addresses */
ptr = resp->data;
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
const int len = SA_SIZE(ifa->ifa_addr);
if (buflen < len) {
log(LOG_ERR, "%s: len changed?\n",
ifp->if_xname);
break;
}
bcopy(ifa->ifa_addr, ptr, len);
ptr += len;
buflen -= len;
}
NET_EPOCH_EXIT(et);
break;
}
default:
error = EINVAL;
break;
} /* end of inner switch() */
break;
case NGM_FLOW_COOKIE:
CURVNET_SET_QUIET(ifp->if_vnet);
switch (msg->header.cmd) {
case NGM_LINK_IS_UP:
priv->link_status |= IFM_ACTIVE;
if_link_state_change(ifp, LINK_STATE_UP);
break;
case NGM_LINK_IS_DOWN:
priv->link_status &= ~IFM_ACTIVE;
if_link_state_change(ifp, LINK_STATE_DOWN);
break;
default:
break;
}
CURVNET_RESTORE();
break;
default:
error = EINVAL;
break;
}
NG_RESPOND_MSG(error, node, item, resp);
NG_FREE_MSG(msg);
return (error);
}
/*
* Receive data from a hook. Pass the packet to the ether_input routine.
*/
static int
ng_eiface_rcvdata(hook_p hook, item_p item)
{
const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
struct ifnet *const ifp = priv->ifp;
struct mbuf *m;
NGI_GET_M(item, m);
NG_FREE_ITEM(item);
if (!((ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
NG_FREE_M(m);
return (ENETDOWN);
}
if (m->m_len < ETHER_HDR_LEN) {
m = m_pullup(m, ETHER_HDR_LEN);
if (m == NULL)
return (EINVAL);
}
/* Note receiving interface */
m->m_pkthdr.rcvif = ifp;
/* Update interface stats */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
(*ifp->if_input)(ifp, m);
/* Done */
return (0);
}
/*
* Shutdown processing.
*/
static int
ng_eiface_rmnode(node_p node)
{
const priv_p priv = NG_NODE_PRIVATE(node);
struct ifnet *const ifp = priv->ifp;
/*
* the ifnet may be in a different vnet than the netgraph node,
* hence we have to change the current vnet context here.
*/
CURVNET_SET_QUIET(ifp->if_vnet);
ether_ifdetach(ifp);
ifmedia_removeall(&priv->media);
if_free(ifp);
CURVNET_RESTORE();
free_unr(V_ng_eiface_unit, priv->unit);
free(priv, M_NETGRAPH);
NG_NODE_SET_PRIVATE(node, NULL);
NG_NODE_UNREF(node);
return (0);
}
/*
* Hook disconnection
*/
static int
ng_eiface_disconnect(hook_p hook)
{
const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
priv->ether = NULL;
priv->link_status &= ~IFM_ACTIVE;
CURVNET_SET_QUIET(priv->ifp->if_vnet);
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
CURVNET_RESTORE();
return (0);
}
/*
* Handle loading and unloading for this node type.
*/
static int
ng_eiface_mod_event(module_t mod, int event, void *data)
{
int error = 0;
switch (event) {
case MOD_LOAD:
case MOD_UNLOAD:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static void
vnet_ng_eiface_init(const void *unused)
{
V_ng_eiface_unit = new_unrhdr(0, 0xffff, NULL);
}
VNET_SYSINIT(vnet_ng_eiface_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
vnet_ng_eiface_init, NULL);
static void
vnet_ng_eiface_uninit(const void *unused)
{
delete_unrhdr(V_ng_eiface_unit);
}
VNET_SYSUNINIT(vnet_ng_eiface_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_ng_eiface_uninit, NULL);
diff --git a/sys/netgraph/ng_iface.c b/sys/netgraph/ng_iface.c
index e9f97ff0fdec..d42e74d5fd22 100644
--- a/sys/netgraph/ng_iface.c
+++ b/sys/netgraph/ng_iface.c
@@ -1,817 +1,813 @@
/*
* ng_iface.c
*/
/*-
* Copyright (c) 1996-1999 Whistle Communications, Inc.
* All rights reserved.
*
* Subject to the following obligations and disclaimer of warranty, use and
* redistribution of this software, in source or object code forms, with or
* without modifications are expressly permitted by Whistle Communications;
* provided, however, that:
* 1. Any and all reproductions of the source or object code must include the
* copyright notice above and the following disclaimer of warranties; and
* 2. No rights are granted, in any manner or form, to use Whistle
* Communications, Inc. trademarks, including the mark "WHISTLE
* COMMUNICATIONS" on advertising, endorsements, or otherwise except as
* such appears in the above copyright notice or in the software.
*
* THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
* REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
* INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
* WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
* REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
* SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
* IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
* RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
* WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* Author: Archie Cobbs <archie@freebsd.org>
* $Whistle: ng_iface.c,v 1.33 1999/11/01 09:24:51 julian Exp $
*/
/*
* This node is also a system networking interface. It has
* a hook for each protocol (IP, AppleTalk, etc). Packets
* are simply relayed between the interface and the hooks.
*
* Interfaces are named ng0, ng1, etc. New nodes take the
* first available interface name.
*
* This node also includes Berkeley packet filter support.
*/
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/errno.h>
#include <sys/proc.h>
#include <sys/random.h>
#include <sys/rmlock.h>
#include <sys/sockio.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/libkern.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/bpf.h>
#include <net/netisr.h>
#include <net/route.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netgraph/ng_message.h>
#include <netgraph/netgraph.h>
#include <netgraph/ng_parse.h>
#include <netgraph/ng_iface.h>
#ifdef NG_SEPARATE_MALLOC
static MALLOC_DEFINE(M_NETGRAPH_IFACE, "netgraph_iface", "netgraph iface node");
#else
#define M_NETGRAPH_IFACE M_NETGRAPH
#endif
static SYSCTL_NODE(_net_graph, OID_AUTO, iface, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Point to point netgraph interface");
VNET_DEFINE_STATIC(int, ng_iface_max_nest) = 2;
#define V_ng_iface_max_nest VNET(ng_iface_max_nest)
SYSCTL_INT(_net_graph_iface, OID_AUTO, max_nesting, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(ng_iface_max_nest), 0, "Max nested tunnels");
/* This struct describes one address family */
struct iffam {
sa_family_t family; /* Address family */
const char *hookname; /* Name for hook */
};
typedef const struct iffam *iffam_p;
/* List of address families supported by our interface */
const static struct iffam gFamilies[] = {
{ AF_INET, NG_IFACE_HOOK_INET },
{ AF_INET6, NG_IFACE_HOOK_INET6 },
};
#define NUM_FAMILIES nitems(gFamilies)
/* Node private data */
struct ng_iface_private {
struct ifnet *ifp; /* Our interface */
int unit; /* Interface unit number */
node_p node; /* Our netgraph node */
hook_p hooks[NUM_FAMILIES]; /* Hook for each address family */
struct rmlock lock; /* Protect private data changes */
};
typedef struct ng_iface_private *priv_p;
#define PRIV_RLOCK(priv, t) rm_rlock(&priv->lock, t)
#define PRIV_RUNLOCK(priv, t) rm_runlock(&priv->lock, t)
#define PRIV_WLOCK(priv) rm_wlock(&priv->lock)
#define PRIV_WUNLOCK(priv) rm_wunlock(&priv->lock)
/* Interface methods */
static void ng_iface_start(struct ifnet *ifp);
static int ng_iface_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
static int ng_iface_output(struct ifnet *ifp, struct mbuf *m0,
const struct sockaddr *dst, struct route *ro);
static void ng_iface_bpftap(struct ifnet *ifp,
struct mbuf *m, sa_family_t family);
static int ng_iface_send(struct ifnet *ifp, struct mbuf *m,
sa_family_t sa);
#ifdef DEBUG
static void ng_iface_print_ioctl(struct ifnet *ifp, int cmd, caddr_t data);
#endif
/* Netgraph methods */
static int ng_iface_mod_event(module_t, int, void *);
static ng_constructor_t ng_iface_constructor;
static ng_rcvmsg_t ng_iface_rcvmsg;
static ng_shutdown_t ng_iface_shutdown;
static ng_newhook_t ng_iface_newhook;
static ng_rcvdata_t ng_iface_rcvdata;
static ng_disconnect_t ng_iface_disconnect;
/* Helper stuff */
static iffam_p get_iffam_from_af(sa_family_t family);
static iffam_p get_iffam_from_hook(priv_p priv, hook_p hook);
static iffam_p get_iffam_from_name(const char *name);
static hook_p *get_hook_from_iffam(priv_p priv, iffam_p iffam);
/* List of commands and how to convert arguments to/from ASCII */
static const struct ng_cmdlist ng_iface_cmds[] = {
{
NGM_IFACE_COOKIE,
NGM_IFACE_GET_IFNAME,
"getifname",
NULL,
&ng_parse_string_type
},
{
NGM_IFACE_COOKIE,
NGM_IFACE_POINT2POINT,
"point2point",
NULL,
NULL
},
{
NGM_IFACE_COOKIE,
NGM_IFACE_BROADCAST,
"broadcast",
NULL,
NULL
},
{
NGM_IFACE_COOKIE,
NGM_IFACE_GET_IFINDEX,
"getifindex",
NULL,
&ng_parse_uint32_type
},
{ 0 }
};
/* Node type descriptor */
static struct ng_type typestruct = {
.version = NG_ABI_VERSION,
.name = NG_IFACE_NODE_TYPE,
.mod_event = ng_iface_mod_event,
.constructor = ng_iface_constructor,
.rcvmsg = ng_iface_rcvmsg,
.shutdown = ng_iface_shutdown,
.newhook = ng_iface_newhook,
.rcvdata = ng_iface_rcvdata,
.disconnect = ng_iface_disconnect,
.cmdlist = ng_iface_cmds,
};
NETGRAPH_INIT(iface, &typestruct);
VNET_DEFINE_STATIC(struct unrhdr *, ng_iface_unit);
#define V_ng_iface_unit VNET(ng_iface_unit)
/************************************************************************
HELPER STUFF
************************************************************************/
/*
* Get the family descriptor from the family ID
*/
static __inline iffam_p
get_iffam_from_af(sa_family_t family)
{
iffam_p iffam;
int k;
for (k = 0; k < NUM_FAMILIES; k++) {
iffam = &gFamilies[k];
if (iffam->family == family)
return (iffam);
}
return (NULL);
}
/*
* Get the family descriptor from the hook
*/
static __inline iffam_p
get_iffam_from_hook(priv_p priv, hook_p hook)
{
int k;
for (k = 0; k < NUM_FAMILIES; k++)
if (priv->hooks[k] == hook)
return (&gFamilies[k]);
return (NULL);
}
/*
* Get the hook from the iffam descriptor
*/
static __inline hook_p *
get_hook_from_iffam(priv_p priv, iffam_p iffam)
{
return (&priv->hooks[iffam - gFamilies]);
}
/*
* Get the iffam descriptor from the name
*/
static __inline iffam_p
get_iffam_from_name(const char *name)
{
iffam_p iffam;
int k;
for (k = 0; k < NUM_FAMILIES; k++) {
iffam = &gFamilies[k];
if (!strcmp(iffam->hookname, name))
return (iffam);
}
return (NULL);
}
/************************************************************************
INTERFACE STUFF
************************************************************************/
/*
* Process an ioctl for the virtual interface
*/
static int
ng_iface_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
struct ifreq *const ifr = (struct ifreq *) data;
int error = 0;
#ifdef DEBUG
ng_iface_print_ioctl(ifp, command, data);
#endif
switch (command) {
/* These two are mostly handled at a higher layer */
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
break;
case SIOCGIFADDR:
break;
/* Set flags */
case SIOCSIFFLAGS:
/*
* If the interface is marked up and stopped, then start it.
* If it is marked down and running, then stop it.
*/
if (ifr->ifr_flags & IFF_UP) {
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING |
IFF_DRV_OACTIVE);
}
break;
/* Set the interface MTU */
case SIOCSIFMTU:
if (ifr->ifr_mtu > NG_IFACE_MTU_MAX
|| ifr->ifr_mtu < NG_IFACE_MTU_MIN)
error = EINVAL;
else
ifp->if_mtu = ifr->ifr_mtu;
break;
/* Stuff that's not supported */
case SIOCADDMULTI:
case SIOCDELMULTI:
error = 0;
break;
case SIOCSIFPHYS:
error = EOPNOTSUPP;
break;
default:
error = EINVAL;
break;
}
return (error);
}
/*
* This routine is called to deliver a packet out the interface.
* We simply look at the address family and relay the packet to
* the corresponding hook, if it exists and is connected.
*/
static int
ng_iface_output(struct ifnet *ifp, struct mbuf *m,
const struct sockaddr *dst, struct route *ro)
{
uint32_t af;
int error;
/* Check interface flags */
if (!((ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
m_freem(m);
return (ENETDOWN);
}
/* Protect from deadly infinite recursion. */
error = if_tunnel_check_nesting(ifp, m, NGM_IFACE_COOKIE,
V_ng_iface_max_nest);
if (error) {
m_freem(m);
return (error);
}
/* BPF writes need to be handled specially. */
if (dst->sa_family == AF_UNSPEC || dst->sa_family == pseudo_AF_HDRCMPLT)
bcopy(dst->sa_data, &af, sizeof(af));
else
af = RO_GET_FAMILY(ro, dst);
/* Berkeley packet filter */
ng_iface_bpftap(ifp, m, af);
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
M_PREPEND(m, sizeof(sa_family_t), M_NOWAIT);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
return (ENOBUFS);
}
*(sa_family_t *)m->m_data = af;
error = (ifp->if_transmit)(ifp, m);
} else
error = ng_iface_send(ifp, m, af);
return (error);
}
/*
* Start method is used only when ALTQ is enabled.
*/
static void
ng_iface_start(struct ifnet *ifp)
{
struct mbuf *m;
sa_family_t sa;
KASSERT(ALTQ_IS_ENABLED(&ifp->if_snd), ("%s without ALTQ", __func__));
for(;;) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
break;
sa = *mtod(m, sa_family_t *);
m_adj(m, sizeof(sa_family_t));
ng_iface_send(ifp, m, sa);
}
}
/*
* Flash a packet by the BPF (requires prepending 4 byte AF header)
* Note the phoney mbuf; this is OK because BPF treats it read-only.
*/
static void
ng_iface_bpftap(struct ifnet *ifp, struct mbuf *m, sa_family_t family)
{
KASSERT(family != AF_UNSPEC, ("%s: family=AF_UNSPEC", __func__));
if (bpf_peers_present(ifp->if_bpf)) {
int32_t family4 = (int32_t)family;
bpf_mtap2(ifp->if_bpf, &family4, sizeof(family4), m);
}
}
/*
* This routine does actual delivery of the packet into the
* netgraph(4). It is called from ng_iface_start() and
* ng_iface_output().
*/
static int
ng_iface_send(struct ifnet *ifp, struct mbuf *m, sa_family_t sa)
{
struct rm_priotracker priv_tracker;
const priv_p priv = (priv_p) ifp->if_softc;
const iffam_p iffam = get_iffam_from_af(sa);
hook_p hook;
int error;
int len;
/* Check address family to determine hook (if known) */
if (iffam == NULL) {
m_freem(m);
log(LOG_WARNING, "%s: can't handle af%d\n", ifp->if_xname, sa);
return (EAFNOSUPPORT);
}
/* Copy length before the mbuf gets invalidated. */
len = m->m_pkthdr.len;
PRIV_RLOCK(priv, &priv_tracker);
hook = *get_hook_from_iffam(priv, iffam);
if (hook == NULL) {
NG_FREE_M(m);
PRIV_RUNLOCK(priv, &priv_tracker);
return ENETDOWN;
}
NG_HOOK_REF(hook);
PRIV_RUNLOCK(priv, &priv_tracker);
NG_OUTBOUND_THREAD_REF();
NG_SEND_DATA_ONLY(error, hook, m);
NG_OUTBOUND_THREAD_UNREF();
NG_HOOK_UNREF(hook);
/* Update stats. */
if (error == 0) {
if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
return (error);
}
#ifdef DEBUG
/*
* Display an ioctl to the virtual interface
*/
static void
ng_iface_print_ioctl(struct ifnet *ifp, int command, caddr_t data)
{
char *str;
switch (command & IOC_DIRMASK) {
case IOC_VOID:
str = "IO";
break;
case IOC_OUT:
str = "IOR";
break;
case IOC_IN:
str = "IOW";
break;
case IOC_INOUT:
str = "IORW";
break;
default:
str = "IO??";
}
log(LOG_DEBUG, "%s: %s('%c', %d, char[%d])\n",
ifp->if_xname,
str,
IOCGROUP(command),
command & 0xff,
IOCPARM_LEN(command));
}
#endif /* DEBUG */
/************************************************************************
NETGRAPH NODE STUFF
************************************************************************/
/*
* Constructor for a node
*/
static int
ng_iface_constructor(node_p node)
{
struct ifnet *ifp;
priv_p priv;
/* Allocate node and interface private structures */
priv = malloc(sizeof(*priv), M_NETGRAPH_IFACE, M_WAITOK | M_ZERO);
ifp = if_alloc(IFT_PROPVIRTUAL);
- if (ifp == NULL) {
- free(priv, M_NETGRAPH_IFACE);
- return (ENOMEM);
- }
rm_init(&priv->lock, "ng_iface private rmlock");
/* Link them together */
ifp->if_softc = priv;
priv->ifp = ifp;
/* Get an interface unit number */
priv->unit = alloc_unr(V_ng_iface_unit);
/* Link together node and private info */
NG_NODE_SET_PRIVATE(node, priv);
priv->node = node;
/* Initialize interface structure */
if_initname(ifp, NG_IFACE_IFACE_NAME, priv->unit);
ifp->if_output = ng_iface_output;
ifp->if_start = ng_iface_start;
ifp->if_ioctl = ng_iface_ioctl;
ifp->if_mtu = NG_IFACE_MTU_DEFAULT;
ifp->if_flags = (IFF_SIMPLEX|IFF_POINTOPOINT|IFF_NOARP|IFF_MULTICAST);
ifp->if_type = IFT_PROPVIRTUAL; /* XXX */
ifp->if_addrlen = 0; /* XXX */
ifp->if_hdrlen = 0; /* XXX */
ifp->if_baudrate = 64000; /* XXX */
IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
IFQ_SET_READY(&ifp->if_snd);
/* Give this node the same name as the interface (if possible) */
if (ng_name_node(node, ifp->if_xname) != 0)
log(LOG_WARNING, "%s: can't acquire netgraph name\n",
ifp->if_xname);
/* Attach the interface */
if_attach(ifp);
bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
/* Done */
return (0);
}
/*
* Give our ok for a hook to be added
*/
static int
ng_iface_newhook(node_p node, hook_p hook, const char *name)
{
const iffam_p iffam = get_iffam_from_name(name);
const priv_p priv = NG_NODE_PRIVATE(node);
hook_p *hookptr;
if (iffam == NULL)
return (EPFNOSUPPORT);
PRIV_WLOCK(priv);
hookptr = get_hook_from_iffam(priv, iffam);
if (*hookptr != NULL) {
PRIV_WUNLOCK(priv);
return (EISCONN);
}
*hookptr = hook;
NG_HOOK_HI_STACK(hook);
NG_HOOK_SET_TO_INBOUND(hook);
PRIV_WUNLOCK(priv);
return (0);
}
/*
* Receive a control message
*/
static int
ng_iface_rcvmsg(node_p node, item_p item, hook_p lasthook)
{
const priv_p priv = NG_NODE_PRIVATE(node);
struct ifnet *const ifp = priv->ifp;
struct ng_mesg *resp = NULL;
int error = 0;
struct ng_mesg *msg;
NGI_GET_MSG(item, msg);
switch (msg->header.typecookie) {
case NGM_IFACE_COOKIE:
switch (msg->header.cmd) {
case NGM_IFACE_GET_IFNAME:
NG_MKRESPONSE(resp, msg, IFNAMSIZ, M_NOWAIT);
if (resp == NULL) {
error = ENOMEM;
break;
}
strlcpy(resp->data, ifp->if_xname, IFNAMSIZ);
break;
case NGM_IFACE_POINT2POINT:
case NGM_IFACE_BROADCAST:
{
/* Deny request if interface is UP */
if ((ifp->if_flags & IFF_UP) != 0)
return (EBUSY);
/* Change flags */
switch (msg->header.cmd) {
case NGM_IFACE_POINT2POINT:
ifp->if_flags |= IFF_POINTOPOINT;
ifp->if_flags &= ~IFF_BROADCAST;
break;
case NGM_IFACE_BROADCAST:
ifp->if_flags &= ~IFF_POINTOPOINT;
ifp->if_flags |= IFF_BROADCAST;
break;
}
break;
}
case NGM_IFACE_GET_IFINDEX:
NG_MKRESPONSE(resp, msg, sizeof(uint32_t), M_NOWAIT);
if (resp == NULL) {
error = ENOMEM;
break;
}
*((uint32_t *)resp->data) = priv->ifp->if_index;
break;
default:
error = EINVAL;
break;
}
break;
case NGM_FLOW_COOKIE:
switch (msg->header.cmd) {
case NGM_LINK_IS_UP:
if_link_state_change(ifp, LINK_STATE_UP);
break;
case NGM_LINK_IS_DOWN:
if_link_state_change(ifp, LINK_STATE_DOWN);
break;
default:
break;
}
break;
default:
error = EINVAL;
break;
}
NG_RESPOND_MSG(error, node, item, resp);
NG_FREE_MSG(msg);
return (error);
}
/*
* Recive data from a hook. Pass the packet to the correct input routine.
*/
static int
ng_iface_rcvdata(hook_p hook, item_p item)
{
const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
const iffam_p iffam = get_iffam_from_hook(priv, hook);
struct ifnet *const ifp = priv->ifp;
struct epoch_tracker et;
struct mbuf *m;
int isr;
NGI_GET_M(item, m);
NG_FREE_ITEM(item);
/* Sanity checks */
KASSERT(iffam != NULL, ("%s: iffam", __func__));
M_ASSERTPKTHDR(m);
if ((ifp->if_flags & IFF_UP) == 0) {
NG_FREE_M(m);
return (ENETDOWN);
}
/* Update interface stats */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
/* Note receiving interface */
m->m_pkthdr.rcvif = ifp;
/* Berkeley packet filter */
ng_iface_bpftap(ifp, m, iffam->family);
/* Send packet */
switch (iffam->family) {
#ifdef INET
case AF_INET:
isr = NETISR_IP;
break;
#endif
#ifdef INET6
case AF_INET6:
isr = NETISR_IPV6;
break;
#endif
default:
m_freem(m);
return (EAFNOSUPPORT);
}
random_harvest_queue(m, sizeof(*m), RANDOM_NET_NG);
M_SETFIB(m, ifp->if_fib);
CURVNET_SET(ifp->if_vnet);
NET_EPOCH_ENTER(et);
netisr_dispatch(isr, m);
NET_EPOCH_EXIT(et);
CURVNET_RESTORE();
return (0);
}
/*
* Shutdown and remove the node and its associated interface.
*/
static int
ng_iface_shutdown(node_p node)
{
const priv_p priv = NG_NODE_PRIVATE(node);
/*
* The ifnet may be in a different vnet than the netgraph node,
* hence we have to change the current vnet context here.
*/
CURVNET_SET_QUIET(priv->ifp->if_vnet);
bpfdetach(priv->ifp);
if_detach(priv->ifp);
if_free(priv->ifp);
CURVNET_RESTORE();
priv->ifp = NULL;
free_unr(V_ng_iface_unit, priv->unit);
rm_destroy(&priv->lock);
free(priv, M_NETGRAPH_IFACE);
NG_NODE_SET_PRIVATE(node, NULL);
NG_NODE_UNREF(node);
return (0);
}
/*
* Hook disconnection. Note that we do *not* shutdown when all
* hooks have been disconnected.
*/
static int
ng_iface_disconnect(hook_p hook)
{
const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
const iffam_p iffam = get_iffam_from_hook(priv, hook);
if (iffam == NULL)
panic("%s", __func__);
PRIV_WLOCK(priv);
*get_hook_from_iffam(priv, iffam) = NULL;
PRIV_WUNLOCK(priv);
return (0);
}
/*
* Handle loading and unloading for this node type.
*/
static int
ng_iface_mod_event(module_t mod, int event, void *data)
{
int error = 0;
switch (event) {
case MOD_LOAD:
case MOD_UNLOAD:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static void
vnet_ng_iface_init(const void *unused)
{
V_ng_iface_unit = new_unrhdr(0, 0xffff, NULL);
}
VNET_SYSINIT(vnet_ng_iface_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
vnet_ng_iface_init, NULL);
static void
vnet_ng_iface_uninit(const void *unused)
{
delete_unrhdr(V_ng_iface_unit);
}
VNET_SYSUNINIT(vnet_ng_iface_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
vnet_ng_iface_uninit, NULL);
diff --git a/sys/netpfil/ipfw/ip_fw_bpf.c b/sys/netpfil/ipfw/ip_fw_bpf.c
index 216b39238b03..155e269214ec 100644
--- a/sys/netpfil/ipfw/ip_fw_bpf.c
+++ b/sys/netpfil/ipfw/ip_fw_bpf.c
@@ -1,206 +1,202 @@
/*-
* Copyright (c) 2016 Yandex LLC
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/socket.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_pflog.h>
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/vnet.h>
#include <net/bpf.h>
#include <netinet/in.h>
#include <netinet/ip_fw.h>
#include <netinet/ip_var.h>
#include <netpfil/ipfw/ip_fw_private.h>
VNET_DEFINE_STATIC(struct ifnet *, log_if);
VNET_DEFINE_STATIC(struct ifnet *, pflog_if);
VNET_DEFINE_STATIC(struct if_clone *, ipfw_cloner);
VNET_DEFINE_STATIC(struct if_clone *, ipfwlog_cloner);
#define V_ipfw_cloner VNET(ipfw_cloner)
#define V_ipfwlog_cloner VNET(ipfwlog_cloner)
#define V_log_if VNET(log_if)
#define V_pflog_if VNET(pflog_if)
static const char ipfwname[] = "ipfw";
static const char ipfwlogname[] = "ipfwlog";
static int
ipfw_bpf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
{
return (EINVAL);
}
static int
ipfw_bpf_output(struct ifnet *ifp, struct mbuf *m,
const struct sockaddr *dst, struct route *ro)
{
if (m != NULL)
FREE_PKT(m);
return (0);
}
static void
ipfw_clone_destroy(struct ifnet *ifp)
{
if (ifp->if_hdrlen == ETHER_HDR_LEN)
V_log_if = NULL;
else
V_pflog_if = NULL;
NET_EPOCH_WAIT();
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
}
static int
ipfw_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct ifnet *ifp;
ifp = if_alloc(IFT_PFLOG);
- if (ifp == NULL)
- return (ENOSPC);
if_initname(ifp, ipfwname, unit);
ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_mtu = 65536;
ifp->if_ioctl = ipfw_bpf_ioctl;
ifp->if_output = ipfw_bpf_output;
ifp->if_hdrlen = ETHER_HDR_LEN;
if_attach(ifp);
bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
if (V_log_if != NULL) {
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
return (EEXIST);
}
V_log_if = ifp;
return (0);
}
static int
ipfwlog_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct ifnet *ifp;
ifp = if_alloc(IFT_PFLOG);
- if (ifp == NULL)
- return (ENOSPC);
if_initname(ifp, ipfwlogname, unit);
ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_mtu = 65536;
ifp->if_ioctl = ipfw_bpf_ioctl;
ifp->if_output = ipfw_bpf_output;
ifp->if_hdrlen = PFLOG_HDRLEN;
if_attach(ifp);
bpfattach(ifp, DLT_PFLOG, PFLOG_HDRLEN);
if (V_pflog_if != NULL) {
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
return (EEXIST);
}
V_pflog_if = ifp;
return (0);
}
void
ipfw_bpf_tap(u_char *pkt, u_int pktlen)
{
struct ifnet *ifp = V_log_if;
NET_EPOCH_ASSERT();
if (ifp != NULL)
BPF_TAP(ifp, pkt, pktlen);
}
void
ipfw_bpf_mtap(struct mbuf *m)
{
struct ifnet *ifp = V_log_if;
NET_EPOCH_ASSERT();
if (ifp != NULL)
BPF_MTAP(ifp, m);
}
void
ipfw_bpf_mtap2(void *data, u_int dlen, struct mbuf *m)
{
struct ifnet *logif;
NET_EPOCH_ASSERT();
switch (dlen) {
case (ETHER_HDR_LEN):
logif = V_log_if;
break;
case (PFLOG_HDRLEN):
logif = V_pflog_if;
break;
default:
#ifdef INVARIANTS
panic("%s: unsupported len %d", __func__, dlen);
#endif
logif = NULL;
}
if (logif != NULL)
BPF_MTAP2(logif, data, dlen, m);
}
void
ipfw_bpf_init(int first __unused)
{
V_log_if = NULL;
V_pflog_if = NULL;
V_ipfw_cloner = if_clone_simple(ipfwname, ipfw_clone_create,
ipfw_clone_destroy, 0);
V_ipfwlog_cloner = if_clone_simple(ipfwlogname, ipfwlog_clone_create,
ipfw_clone_destroy, 0);
}
void
ipfw_bpf_uninit(int last __unused)
{
if_clone_detach(V_ipfw_cloner);
if_clone_detach(V_ipfwlog_cloner);
}
diff --git a/sys/netpfil/pf/if_pflog.c b/sys/netpfil/pf/if_pflog.c
index 1e73d5f51851..8b849b0b9376 100644
--- a/sys/netpfil/pf/if_pflog.c
+++ b/sys/netpfil/pf/if_pflog.c
@@ -1,336 +1,333 @@
/*-
* SPDX-License-Identifier: ISC
*
* The authors of this code are John Ioannidis (ji@tla.org),
* Angelos D. Keromytis (kermit@csd.uch.gr) and
* Niels Provos (provos@physnet.uni-hamburg.de).
*
* This code was written by John Ioannidis for BSD/OS in Athens, Greece,
* in November 1995.
*
* Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
* by Angelos D. Keromytis.
*
* Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
* and Niels Provos.
*
* Copyright (C) 1995, 1996, 1997, 1998 by John Ioannidis, Angelos D. Keromytis
* and Niels Provos.
* Copyright (c) 2001, Angelos D. Keromytis, Niels Provos.
*
* Permission to use, copy, and modify this software with or without fee
* is hereby granted, provided that this entire notice is included in
* all copies of any software which is or includes a copy or
* modification of this software.
* You may use this code under the GNU public license if you so wish. Please
* contribute changes back to the authors under this freer than GPL license
* so that we may further the use of strong encryption without limitations to
* all.
*
* THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
* REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
* MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
* PURPOSE.
*
* $OpenBSD: if_pflog.c,v 1.26 2007/10/18 21:58:18 mpf Exp $
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_bpf.h"
#include "opt_pf.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_pflog.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/vnet.h>
#include <net/pfvar.h>
#if defined(INET) || defined(INET6)
#include <netinet/in.h>
#endif
#ifdef INET
#include <netinet/in_var.h>
#include <netinet/ip.h>
#endif
#ifdef INET6
#include <netinet6/in6_var.h>
#include <netinet6/nd6.h>
#endif /* INET6 */
#ifdef INET
#include <machine/in_cksum.h>
#endif /* INET */
#define PFLOGMTU (32768 + MHLEN + MLEN)
#ifdef PFLOGDEBUG
#define DPRINTF(x) do { if (pflogdebug) printf x ; } while (0)
#else
#define DPRINTF(x)
#endif
static int pflogoutput(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
static void pflogattach(int);
static int pflogioctl(struct ifnet *, u_long, caddr_t);
static void pflogstart(struct ifnet *);
static int pflog_clone_create(struct if_clone *, char *, size_t,
struct ifc_data *, struct ifnet **);
static int pflog_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
static const char pflogname[] = "pflog";
VNET_DEFINE_STATIC(struct if_clone *, pflog_cloner);
#define V_pflog_cloner VNET(pflog_cloner)
VNET_DEFINE(struct ifnet *, pflogifs[PFLOGIFS_MAX]); /* for fast access */
#define V_pflogifs VNET(pflogifs)
static void
pflogattach(int npflog __unused)
{
int i;
for (i = 0; i < PFLOGIFS_MAX; i++)
V_pflogifs[i] = NULL;
struct if_clone_addreq req = {
.create_f = pflog_clone_create,
.destroy_f = pflog_clone_destroy,
.flags = IFC_F_AUTOUNIT,
};
V_pflog_cloner = ifc_attach_cloner(pflogname, &req);
struct ifc_data ifd = { .unit = 0 };
ifc_create_ifp(pflogname, &ifd, NULL);
}
static int
pflog_clone_create(struct if_clone *ifc, char *name, size_t maxlen,
struct ifc_data *ifd, struct ifnet **ifpp)
{
struct ifnet *ifp;
if (ifd->unit >= PFLOGIFS_MAX)
return (EINVAL);
ifp = if_alloc(IFT_PFLOG);
- if (ifp == NULL) {
- return (ENOSPC);
- }
if_initname(ifp, pflogname, ifd->unit);
ifp->if_mtu = PFLOGMTU;
ifp->if_ioctl = pflogioctl;
ifp->if_output = pflogoutput;
ifp->if_start = pflogstart;
ifp->if_snd.ifq_maxlen = ifqmaxlen;
ifp->if_hdrlen = PFLOG_HDRLEN;
if_attach(ifp);
bpfattach(ifp, DLT_PFLOG, PFLOG_HDRLEN);
V_pflogifs[ifd->unit] = ifp;
*ifpp = ifp;
return (0);
}
static int
pflog_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
{
int i;
if (ifp->if_dunit == 0 && (flags & IFC_F_FORCE) == 0)
return (EINVAL);
for (i = 0; i < PFLOGIFS_MAX; i++)
if (V_pflogifs[i] == ifp)
V_pflogifs[i] = NULL;
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
return (0);
}
/*
* Start output on the pflog interface.
*/
static void
pflogstart(struct ifnet *ifp)
{
struct mbuf *m;
for (;;) {
IF_LOCK(&ifp->if_snd);
_IF_DEQUEUE(&ifp->if_snd, m);
IF_UNLOCK(&ifp->if_snd);
if (m == NULL)
return;
else
m_freem(m);
}
}
static int
pflogoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *rt)
{
m_freem(m);
return (0);
}
/* ARGSUSED */
static int
pflogioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
switch (cmd) {
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP)
ifp->if_drv_flags |= IFF_DRV_RUNNING;
else
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
break;
default:
return (ENOTTY);
}
return (0);
}
static int
pflog_packet(struct pfi_kkif *kif, struct mbuf *m, sa_family_t af,
uint8_t action, u_int8_t reason, struct pf_krule *rm, struct pf_krule *am,
struct pf_kruleset *ruleset, struct pf_pdesc *pd, int lookupsafe)
{
struct ifnet *ifn;
struct pfloghdr hdr;
if (kif == NULL || m == NULL || rm == NULL || pd == NULL)
return (1);
ifn = V_pflogifs[rm->logif];
if (ifn == NULL || !bpf_peers_present(ifn->if_bpf))
return (0);
bzero(&hdr, sizeof(hdr));
hdr.length = PFLOG_REAL_HDRLEN;
hdr.af = af;
hdr.action = action;
hdr.reason = reason;
memcpy(hdr.ifname, kif->pfik_name, sizeof(hdr.ifname));
if (am == NULL) {
hdr.rulenr = htonl(rm->nr);
hdr.subrulenr = -1;
} else {
hdr.rulenr = htonl(am->nr);
hdr.subrulenr = htonl(rm->nr);
if (ruleset != NULL && ruleset->anchor != NULL)
strlcpy(hdr.ruleset, ruleset->anchor->name,
sizeof(hdr.ruleset));
}
hdr.ridentifier = htonl(rm->ridentifier);
/*
* XXXGL: we avoid pf_socket_lookup() when we are holding
* state lock, since this leads to unsafe LOR.
* These conditions are very very rare, however.
*/
if (rm->log & PF_LOG_SOCKET_LOOKUP && !pd->lookup.done && lookupsafe)
pd->lookup.done = pf_socket_lookup(pd, m);
if (pd->lookup.done > 0)
hdr.uid = pd->lookup.uid;
else
hdr.uid = UID_MAX;
hdr.pid = NO_PID;
hdr.rule_uid = rm->cuid;
hdr.rule_pid = rm->cpid;
hdr.dir = pd->dir;
#ifdef INET
if (af == AF_INET && pd->dir == PF_OUT) {
struct ip *ip;
ip = mtod(m, struct ip *);
ip->ip_sum = 0;
ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
}
#endif /* INET */
if_inc_counter(ifn, IFCOUNTER_OPACKETS, 1);
if_inc_counter(ifn, IFCOUNTER_OBYTES, m->m_pkthdr.len);
bpf_mtap2(ifn->if_bpf, &hdr, PFLOG_HDRLEN, m);
return (0);
}
static void
vnet_pflog_init(const void *unused __unused)
{
pflogattach(1);
}
VNET_SYSINIT(vnet_pflog_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
vnet_pflog_init, NULL);
static void
vnet_pflog_uninit(const void *unused __unused)
{
ifc_detach_cloner(V_pflog_cloner);
}
/*
* Detach after pf is gone; otherwise we might touch pflog memory
* from within pf after freeing pflog.
*/
VNET_SYSUNINIT(vnet_pflog_uninit, SI_SUB_INIT_IF, SI_ORDER_SECOND,
vnet_pflog_uninit, NULL);
static int
pflog_modevent(module_t mod, int type, void *data)
{
int error = 0;
switch (type) {
case MOD_LOAD:
PF_RULES_WLOCK();
pflog_packet_ptr = pflog_packet;
PF_RULES_WUNLOCK();
break;
case MOD_UNLOAD:
PF_RULES_WLOCK();
pflog_packet_ptr = NULL;
PF_RULES_WUNLOCK();
break;
default:
error = EOPNOTSUPP;
break;
}
return error;
}
static moduledata_t pflog_mod = { pflogname, pflog_modevent, 0 };
#define PFLOG_MODVER 1
/* Do not run before pf is initialized as we depend on its locks. */
DECLARE_MODULE(pflog, pflog_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
MODULE_VERSION(pflog, PFLOG_MODVER);
MODULE_DEPEND(pflog, pf, PF_MODVER, PF_MODVER, PF_MODVER);
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index 80d6fddc709c..080938700e1d 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -1,3201 +1,3197 @@
/*-
* SPDX-License-Identifier: (BSD-2-Clause AND ISC)
*
* Copyright (c) 2002 Michael Shalayeff
* Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
*
* Revisions picked from OpenBSD after revision 1.110 import:
* 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
* 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
* 1.120, 1.175 - use monotonic time_uptime
* 1.122 - reduce number of updates for non-TCP sessions
* 1.125, 1.127 - rewrite merge or stale processing
* 1.128 - cleanups
* 1.146 - bzero() mbuf before sparsely filling it with data
* 1.170 - SIOCSIFMTU checks
* 1.126, 1.142 - deferred packets processing
* 1.173 - correct expire time processing
*/
#include <sys/cdefs.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_pf.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/nv.h>
#include <sys/priv.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_private.h>
#include <net/if_types.h>
#include <net/vnet.h>
#include <net/pfvar.h>
#include <net/route.h>
#include <net/if_pfsync.h>
#include <netinet/if_ether.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet6/in6_var.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/ip_carp.h>
#include <netinet/ip_var.h>
#include <netinet/tcp.h>
#include <netinet/tcp_fsm.h>
#include <netinet/tcp_seq.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet6/scope6_var.h>
#include <netpfil/pf/pfsync_nv.h>
struct pfsync_bucket;
struct pfsync_softc;
union inet_template {
struct ip ipv4;
struct ip6_hdr ipv6;
};
#define PFSYNC_MINPKT ( \
sizeof(union inet_template) + \
sizeof(struct pfsync_header) + \
sizeof(struct pfsync_subheader) )
static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *,
struct pfsync_state_peer *);
static int pfsync_in_clr(struct mbuf *, int, int, int, int);
static int pfsync_in_ins(struct mbuf *, int, int, int, int);
static int pfsync_in_iack(struct mbuf *, int, int, int, int);
static int pfsync_in_upd(struct mbuf *, int, int, int, int);
static int pfsync_in_upd_c(struct mbuf *, int, int, int, int);
static int pfsync_in_ureq(struct mbuf *, int, int, int, int);
static int pfsync_in_del_c(struct mbuf *, int, int, int, int);
static int pfsync_in_bus(struct mbuf *, int, int, int, int);
static int pfsync_in_tdb(struct mbuf *, int, int, int, int);
static int pfsync_in_eof(struct mbuf *, int, int, int, int);
static int pfsync_in_error(struct mbuf *, int, int, int, int);
static int (*pfsync_acts[])(struct mbuf *, int, int, int, int) = {
pfsync_in_clr, /* PFSYNC_ACT_CLR */
pfsync_in_ins, /* PFSYNC_ACT_INS_1301 */
pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
pfsync_in_upd, /* PFSYNC_ACT_UPD_1301 */
pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
pfsync_in_error, /* PFSYNC_ACT_DEL */
pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
pfsync_in_error, /* PFSYNC_ACT_INS_F */
pfsync_in_error, /* PFSYNC_ACT_DEL_F */
pfsync_in_bus, /* PFSYNC_ACT_BUS */
pfsync_in_tdb, /* PFSYNC_ACT_TDB */
pfsync_in_eof, /* PFSYNC_ACT_EOF */
pfsync_in_ins, /* PFSYNC_ACT_INS_1400 */
pfsync_in_upd, /* PFSYNC_ACT_UPD_1400 */
};
struct pfsync_q {
void (*write)(struct pf_kstate *, void *);
size_t len;
u_int8_t action;
};
/* We have the following sync queues */
enum pfsync_q_id {
PFSYNC_Q_INS_1301,
PFSYNC_Q_INS_1400,
PFSYNC_Q_IACK,
PFSYNC_Q_UPD_1301,
PFSYNC_Q_UPD_1400,
PFSYNC_Q_UPD_C,
PFSYNC_Q_DEL_C,
PFSYNC_Q_COUNT,
};
/* Functions for building messages for given queue */
static void pfsync_out_state_1301(struct pf_kstate *, void *);
static void pfsync_out_state_1400(struct pf_kstate *, void *);
static void pfsync_out_iack(struct pf_kstate *, void *);
static void pfsync_out_upd_c(struct pf_kstate *, void *);
static void pfsync_out_del_c(struct pf_kstate *, void *);
/* Attach those functions to queue */
static struct pfsync_q pfsync_qs[] = {
{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_INS_1301 },
{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_INS_1400 },
{ pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_UPD_1301 },
{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_UPD_1400 },
{ pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
{ pfsync_out_del_c, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
};
/* Map queue to pf_kstate->sync_state */
static u_int8_t pfsync_qid_sstate[] = {
PFSYNC_S_INS, /* PFSYNC_Q_INS_1301 */
PFSYNC_S_INS, /* PFSYNC_Q_INS_1400 */
PFSYNC_S_IACK, /* PFSYNC_Q_IACK */
PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1301 */
PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1400 */
PFSYNC_S_UPD_C, /* PFSYNC_Q_UPD_C */
PFSYNC_S_DEL_C, /* PFSYNC_Q_DEL_C */
};
/* Map pf_kstate->sync_state to queue */
static enum pfsync_q_id pfsync_sstate_to_qid(u_int8_t);
static void pfsync_q_ins(struct pf_kstate *, int sync_state, bool);
static void pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *);
static void pfsync_update_state(struct pf_kstate *);
static void pfsync_tx(struct pfsync_softc *, struct mbuf *);
struct pfsync_upd_req_item {
TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
struct pfsync_upd_req ur_msg;
};
struct pfsync_deferral {
struct pfsync_softc *pd_sc;
TAILQ_ENTRY(pfsync_deferral) pd_entry;
struct callout pd_tmo;
struct pf_kstate *pd_st;
struct mbuf *pd_m;
};
struct pfsync_bucket
{
int b_id;
struct pfsync_softc *b_sc;
struct mtx b_mtx;
struct callout b_tmo;
int b_flags;
#define PFSYNCF_BUCKET_PUSH 0x00000001
size_t b_len;
TAILQ_HEAD(, pf_kstate) b_qs[PFSYNC_Q_COUNT];
TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list;
TAILQ_HEAD(, pfsync_deferral) b_deferrals;
u_int b_deferred;
uint8_t *b_plus;
size_t b_pluslen;
struct ifaltq b_snd;
};
struct pfsync_softc {
/* Configuration */
struct ifnet *sc_ifp;
struct ifnet *sc_sync_if;
struct ip_moptions sc_imo;
struct ip6_moptions sc_im6o;
struct sockaddr_storage sc_sync_peer;
uint32_t sc_flags;
uint8_t sc_maxupdates;
union inet_template sc_template;
struct mtx sc_mtx;
uint32_t sc_version;
/* Queued data */
struct pfsync_bucket *sc_buckets;
/* Bulk update info */
struct mtx sc_bulk_mtx;
uint32_t sc_ureq_sent;
int sc_bulk_tries;
uint32_t sc_ureq_received;
int sc_bulk_hashid;
uint64_t sc_bulk_stateid;
uint32_t sc_bulk_creatorid;
struct callout sc_bulk_tmo;
struct callout sc_bulkfail_tmo;
};
#define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
#define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx)
#define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx)
#define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED)
#define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx)
#define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx)
#define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
#define PFSYNC_DEFER_TIMEOUT 20
static const char pfsyncname[] = "pfsync";
static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
VNET_DEFINE_STATIC(struct pfsync_softc *, pfsyncif) = NULL;
#define V_pfsyncif VNET(pfsyncif)
VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL;
#define V_pfsync_swi_cookie VNET(pfsync_swi_cookie)
VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie);
#define V_pfsync_swi_ie VNET(pfsync_swi_ie)
VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats);
#define V_pfsyncstats VNET(pfsyncstats)
VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW;
#define V_pfsync_carp_adj VNET(pfsync_carp_adj)
VNET_DEFINE_STATIC(unsigned int, pfsync_defer_timeout) = PFSYNC_DEFER_TIMEOUT;
#define V_pfsync_defer_timeout VNET(pfsync_defer_timeout)
static void pfsync_timeout(void *);
static void pfsync_push(struct pfsync_bucket *);
static void pfsync_push_all(struct pfsync_softc *);
static void pfsyncintr(void *);
static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
struct in_mfilter *, struct in6_mfilter *);
static void pfsync_multicast_cleanup(struct pfsync_softc *);
static void pfsync_pointers_init(void);
static void pfsync_pointers_uninit(void);
static int pfsync_init(void);
static void pfsync_uninit(void);
static unsigned long pfsync_buckets;
SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"PFSYNC");
SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(pfsyncstats), pfsyncstats,
"PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN,
&pfsync_buckets, 0, "Number of pfsync hash buckets");
SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(pfsync_defer_timeout), 0, "Deferred packet timeout (in ms)");
static int pfsync_clone_create(struct if_clone *, int, caddr_t);
static void pfsync_clone_destroy(struct ifnet *);
static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
struct pf_state_peer *);
static int pfsyncoutput(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
static int pfsyncioctl(struct ifnet *, u_long, caddr_t);
static int pfsync_defer(struct pf_kstate *, struct mbuf *);
static void pfsync_undefer(struct pfsync_deferral *, int);
static void pfsync_undefer_state_locked(struct pf_kstate *, int);
static void pfsync_undefer_state(struct pf_kstate *, int);
static void pfsync_defer_tmo(void *);
static void pfsync_request_update(u_int32_t, u_int64_t);
static bool pfsync_update_state_req(struct pf_kstate *);
static void pfsync_drop(struct pfsync_softc *);
static void pfsync_sendout(int, int);
static void pfsync_send_plus(void *, size_t);
static void pfsync_bulk_start(void);
static void pfsync_bulk_status(u_int8_t);
static void pfsync_bulk_update(void *);
static void pfsync_bulk_fail(void *);
static void pfsync_detach_ifnet(struct ifnet *);
static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *,
struct pfsync_kstatus *);
static int pfsync_kstatus_to_softc(struct pfsync_kstatus *,
struct pfsync_softc *);
#ifdef IPSEC
static void pfsync_update_net_tdb(struct pfsync_tdb *);
#endif
static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *,
struct pf_kstate *);
#define PFSYNC_MAX_BULKTRIES 12
VNET_DEFINE(struct if_clone *, pfsync_cloner);
#define V_pfsync_cloner VNET(pfsync_cloner)
const struct in6_addr in6addr_linklocal_pfsync_group =
{{{ 0xff, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0 }}};
static int
pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
{
struct pfsync_softc *sc;
struct ifnet *ifp;
struct pfsync_bucket *b;
int c;
enum pfsync_q_id q;
if (unit != 0)
return (EINVAL);
if (! pfsync_buckets)
pfsync_buckets = mp_ncpus * 2;
sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
sc->sc_flags |= PFSYNCF_OK;
sc->sc_maxupdates = 128;
sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT;
ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
- if (ifp == NULL) {
- free(sc, M_PFSYNC);
- return (ENOSPC);
- }
if_initname(ifp, pfsyncname, unit);
ifp->if_softc = sc;
ifp->if_ioctl = pfsyncioctl;
ifp->if_output = pfsyncoutput;
ifp->if_type = IFT_PFSYNC;
ifp->if_hdrlen = sizeof(struct pfsync_header);
ifp->if_mtu = ETHERMTU;
mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
if_attach(ifp);
bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets),
M_PFSYNC, M_ZERO | M_WAITOK);
for (c = 0; c < pfsync_buckets; c++) {
b = &sc->sc_buckets[c];
mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF);
b->b_id = c;
b->b_sc = sc;
b->b_len = PFSYNC_MINPKT;
for (q = 0; q < PFSYNC_Q_COUNT; q++)
TAILQ_INIT(&b->b_qs[q]);
TAILQ_INIT(&b->b_upd_req_list);
TAILQ_INIT(&b->b_deferrals);
callout_init(&b->b_tmo, 1);
b->b_snd.ifq_maxlen = ifqmaxlen;
}
V_pfsyncif = sc;
return (0);
}
static void
pfsync_clone_destroy(struct ifnet *ifp)
{
struct pfsync_softc *sc = ifp->if_softc;
struct pfsync_bucket *b;
int c, ret;
for (c = 0; c < pfsync_buckets; c++) {
b = &sc->sc_buckets[c];
/*
* At this stage, everything should have already been
* cleared by pfsync_uninit(), and we have only to
* drain callouts.
*/
PFSYNC_BUCKET_LOCK(b);
while (b->b_deferred > 0) {
struct pfsync_deferral *pd =
TAILQ_FIRST(&b->b_deferrals);
ret = callout_stop(&pd->pd_tmo);
PFSYNC_BUCKET_UNLOCK(b);
if (ret > 0) {
pfsync_undefer(pd, 1);
} else {
callout_drain(&pd->pd_tmo);
}
PFSYNC_BUCKET_LOCK(b);
}
MPASS(b->b_deferred == 0);
MPASS(TAILQ_EMPTY(&b->b_deferrals));
PFSYNC_BUCKET_UNLOCK(b);
free(b->b_plus, M_PFSYNC);
b->b_plus = NULL;
b->b_pluslen = 0;
callout_drain(&b->b_tmo);
}
callout_drain(&sc->sc_bulkfail_tmo);
callout_drain(&sc->sc_bulk_tmo);
if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
(*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
bpfdetach(ifp);
if_detach(ifp);
pfsync_drop(sc);
if_free(ifp);
pfsync_multicast_cleanup(sc);
mtx_destroy(&sc->sc_mtx);
mtx_destroy(&sc->sc_bulk_mtx);
free(sc->sc_buckets, M_PFSYNC);
free(sc, M_PFSYNC);
V_pfsyncif = NULL;
}
static int
pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
struct pf_state_peer *d)
{
if (s->scrub.scrub_flag && d->scrub == NULL) {
d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
if (d->scrub == NULL)
return (ENOMEM);
}
return (0);
}
static int
pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
{
struct pfsync_softc *sc = V_pfsyncif;
#ifndef __NO_STRICT_ALIGNMENT
struct pfsync_state_key key[2];
#endif
struct pfsync_state_key *kw, *ks;
struct pf_kstate *st = NULL;
struct pf_state_key *skw = NULL, *sks = NULL;
struct pf_krule *r = NULL;
struct pfi_kkif *kif;
int error;
PF_RULES_RASSERT();
if (sp->pfs_1301.creatorid == 0) {
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("%s: invalid creator id: %08x\n", __func__,
ntohl(sp->pfs_1301.creatorid));
return (EINVAL);
}
if ((kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) {
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("%s: unknown interface: %s\n", __func__,
sp->pfs_1301.ifname);
if (flags & PFSYNC_SI_IOCTL)
return (EINVAL);
return (0); /* skip this state */
}
/*
* If the ruleset checksums match or the state is coming from the ioctl,
* it's safe to associate the state with the rule of that number.
*/
if (sp->pfs_1301.rule != htonl(-1) && sp->pfs_1301.anchor == htonl(-1) &&
(flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->pfs_1301.rule) <
pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
r = pf_main_ruleset.rules[
PF_RULESET_FILTER].active.ptr_array[ntohl(sp->pfs_1301.rule)];
else
r = &V_pf_default_rule;
if ((r->max_states &&
counter_u64_fetch(r->states_cur) >= r->max_states))
goto cleanup;
/*
* XXXGL: consider M_WAITOK in ioctl path after.
*/
st = pf_alloc_state(M_NOWAIT);
if (__predict_false(st == NULL))
goto cleanup;
if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
goto cleanup;
#ifndef __NO_STRICT_ALIGNMENT
bcopy(&sp->pfs_1301.key, key, sizeof(struct pfsync_state_key) * 2);
kw = &key[PF_SK_WIRE];
ks = &key[PF_SK_STACK];
#else
kw = &sp->pfs_1301.key[PF_SK_WIRE];
ks = &sp->pfs_1301.key[PF_SK_STACK];
#endif
if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->pfs_1301.af) ||
PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->pfs_1301.af) ||
kw->port[0] != ks->port[0] ||
kw->port[1] != ks->port[1]) {
sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
if (sks == NULL)
goto cleanup;
} else
sks = skw;
/* allocate memory for scrub info */
if (pfsync_alloc_scrub_memory(&sp->pfs_1301.src, &st->src) ||
pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst))
goto cleanup;
/* Copy to state key(s). */
skw->addr[0] = kw->addr[0];
skw->addr[1] = kw->addr[1];
skw->port[0] = kw->port[0];
skw->port[1] = kw->port[1];
skw->proto = sp->pfs_1301.proto;
skw->af = sp->pfs_1301.af;
if (sks != skw) {
sks->addr[0] = ks->addr[0];
sks->addr[1] = ks->addr[1];
sks->port[0] = ks->port[0];
sks->port[1] = ks->port[1];
sks->proto = sp->pfs_1301.proto;
sks->af = sp->pfs_1301.af;
}
/* copy to state */
bcopy(&sp->pfs_1301.rt_addr, &st->rt_addr, sizeof(st->rt_addr));
st->creation = (time_uptime - ntohl(sp->pfs_1301.creation)) * 1000;
st->expire = pf_get_uptime();
if (sp->pfs_1301.expire) {
uint32_t timeout;
timeout = r->timeout[sp->pfs_1301.timeout];
if (!timeout)
timeout = V_pf_default_rule.timeout[sp->pfs_1301.timeout];
/* sp->expire may have been adaptively scaled by export. */
st->expire -= (timeout - ntohl(sp->pfs_1301.expire)) * 1000;
}
st->direction = sp->pfs_1301.direction;
st->act.log = sp->pfs_1301.log;
st->timeout = sp->pfs_1301.timeout;
switch (msg_version) {
case PFSYNC_MSG_VERSION_1301:
st->state_flags = sp->pfs_1301.state_flags;
/*
* In FreeBSD 13 pfsync lacks many attributes. Copy them
* from the rule if possible. If rule can't be matched
* clear any set options as we can't recover their
* parameters.
*/
if (r == &V_pf_default_rule) {
st->state_flags &= ~PFSTATE_SETMASK;
} else {
/*
* Similar to pf_rule_to_actions(). This code
* won't set the actions properly if they come
* from multiple "match" rules as only rule
* creating the state is send over pfsync.
*/
st->act.qid = r->qid;
st->act.pqid = r->pqid;
st->act.rtableid = r->rtableid;
if (r->scrub_flags & PFSTATE_SETTOS)
st->act.set_tos = r->set_tos;
st->act.min_ttl = r->min_ttl;
st->act.max_mss = r->max_mss;
st->state_flags |= (r->scrub_flags &
(PFSTATE_NODF|PFSTATE_RANDOMID|
PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|
PFSTATE_SETPRIO));
if (r->dnpipe || r->dnrpipe) {
if (r->free_flags & PFRULE_DN_IS_PIPE)
st->state_flags |= PFSTATE_DN_IS_PIPE;
else
st->state_flags &= ~PFSTATE_DN_IS_PIPE;
}
st->act.dnpipe = r->dnpipe;
st->act.dnrpipe = r->dnrpipe;
}
break;
case PFSYNC_MSG_VERSION_1400:
st->state_flags = ntohs(sp->pfs_1400.state_flags);
st->act.qid = ntohs(sp->pfs_1400.qid);
st->act.pqid = ntohs(sp->pfs_1400.pqid);
st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe);
st->act.dnrpipe = ntohs(sp->pfs_1400.dnrpipe);
st->act.rtableid = ntohl(sp->pfs_1400.rtableid);
st->act.min_ttl = sp->pfs_1400.min_ttl;
st->act.set_tos = sp->pfs_1400.set_tos;
st->act.max_mss = ntohs(sp->pfs_1400.max_mss);
st->act.set_prio[0] = sp->pfs_1400.set_prio[0];
st->act.set_prio[1] = sp->pfs_1400.set_prio[1];
st->rt = sp->pfs_1400.rt;
if (st->rt && (st->rt_kif = pfi_kkif_find(sp->pfs_1400.rt_ifname)) == NULL) {
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("%s: unknown route interface: %s\n",
__func__, sp->pfs_1400.rt_ifname);
if (flags & PFSYNC_SI_IOCTL)
error = EINVAL;
else
error = 0;
goto cleanup_keys;
}
break;
default:
panic("%s: Unsupported pfsync_msg_version %d",
__func__, msg_version);
}
st->id = sp->pfs_1301.id;
st->creatorid = sp->pfs_1301.creatorid;
pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src);
pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
st->rule.ptr = r;
st->nat_rule.ptr = NULL;
st->anchor.ptr = NULL;
st->pfsync_time = time_uptime;
st->sync_state = PFSYNC_S_NONE;
if (!(flags & PFSYNC_SI_IOCTL))
st->state_flags |= PFSTATE_NOSYNC;
if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0)
goto cleanup_state;
/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
counter_u64_add(r->states_cur, 1);
counter_u64_add(r->states_tot, 1);
if (!(flags & PFSYNC_SI_IOCTL)) {
st->state_flags &= ~PFSTATE_NOSYNC;
if (st->state_flags & PFSTATE_ACK) {
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PFSYNC_BUCKET_LOCK(b);
pfsync_q_ins(st, PFSYNC_S_IACK, true);
PFSYNC_BUCKET_UNLOCK(b);
pfsync_push_all(sc);
}
}
st->state_flags &= ~PFSTATE_ACK;
PF_STATE_UNLOCK(st);
return (0);
cleanup:
error = ENOMEM;
cleanup_keys:
if (skw == sks)
sks = NULL;
uma_zfree(V_pf_state_key_z, skw);
uma_zfree(V_pf_state_key_z, sks);
cleanup_state: /* pf_state_insert() frees the state keys. */
if (st) {
st->timeout = PFTM_UNLINKED; /* appease an assert */
pf_free_state(st);
}
return (error);
}
#ifdef INET
static int
pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused)
{
struct pfsync_softc *sc = V_pfsyncif;
struct mbuf *m = *mp;
struct ip *ip = mtod(m, struct ip *);
struct pfsync_header *ph;
struct pfsync_subheader subh;
int offset, len, flags = 0;
int rv;
uint16_t count;
PF_RULES_RLOCK_TRACKER;
*mp = NULL;
V_pfsyncstats.pfsyncs_ipackets++;
/* Verify that we have a sync interface configured. */
if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
goto done;
/* verify that the packet came in on the right interface */
if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
V_pfsyncstats.pfsyncs_badif++;
goto done;
}
if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
/* verify that the IP TTL is 255. */
if (ip->ip_ttl != PFSYNC_DFLTTL) {
V_pfsyncstats.pfsyncs_badttl++;
goto done;
}
offset = ip->ip_hl << 2;
if (m->m_pkthdr.len < offset + sizeof(*ph)) {
V_pfsyncstats.pfsyncs_hdrops++;
goto done;
}
if (offset + sizeof(*ph) > m->m_len) {
if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
V_pfsyncstats.pfsyncs_hdrops++;
return (IPPROTO_DONE);
}
ip = mtod(m, struct ip *);
}
ph = (struct pfsync_header *)((char *)ip + offset);
/* verify the version */
if (ph->version != PFSYNC_VERSION) {
V_pfsyncstats.pfsyncs_badver++;
goto done;
}
len = ntohs(ph->len) + offset;
if (m->m_pkthdr.len < len) {
V_pfsyncstats.pfsyncs_badlen++;
goto done;
}
/*
* Trusting pf_chksum during packet processing, as well as seeking
* in interface name tree, require holding PF_RULES_RLOCK().
*/
PF_RULES_RLOCK();
if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
flags = PFSYNC_SI_CKSUM;
offset += sizeof(*ph);
while (offset <= len - sizeof(subh)) {
m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
offset += sizeof(subh);
if (subh.action >= PFSYNC_ACT_MAX) {
V_pfsyncstats.pfsyncs_badact++;
PF_RULES_RUNLOCK();
goto done;
}
count = ntohs(subh.count);
V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action);
if (rv == -1) {
PF_RULES_RUNLOCK();
return (IPPROTO_DONE);
}
offset += rv;
}
PF_RULES_RUNLOCK();
done:
m_freem(m);
return (IPPROTO_DONE);
}
#endif
#ifdef INET6
static int
pfsync6_input(struct mbuf **mp, int *offp __unused, int proto __unused)
{
struct pfsync_softc *sc = V_pfsyncif;
struct mbuf *m = *mp;
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
struct pfsync_header *ph;
struct pfsync_subheader subh;
int offset, len, flags = 0;
int rv;
uint16_t count;
PF_RULES_RLOCK_TRACKER;
*mp = NULL;
V_pfsyncstats.pfsyncs_ipackets++;
/* Verify that we have a sync interface configured. */
if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
goto done;
/* verify that the packet came in on the right interface */
if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
V_pfsyncstats.pfsyncs_badif++;
goto done;
}
if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
/* verify that the IP TTL is 255. */
if (ip6->ip6_hlim != PFSYNC_DFLTTL) {
V_pfsyncstats.pfsyncs_badttl++;
goto done;
}
offset = sizeof(*ip6);
if (m->m_pkthdr.len < offset + sizeof(*ph)) {
V_pfsyncstats.pfsyncs_hdrops++;
goto done;
}
if (offset + sizeof(*ph) > m->m_len) {
if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
V_pfsyncstats.pfsyncs_hdrops++;
return (IPPROTO_DONE);
}
ip6 = mtod(m, struct ip6_hdr *);
}
ph = (struct pfsync_header *)((char *)ip6 + offset);
/* verify the version */
if (ph->version != PFSYNC_VERSION) {
V_pfsyncstats.pfsyncs_badver++;
goto done;
}
len = ntohs(ph->len) + offset;
if (m->m_pkthdr.len < len) {
V_pfsyncstats.pfsyncs_badlen++;
goto done;
}
/*
* Trusting pf_chksum during packet processing, as well as seeking
* in interface name tree, require holding PF_RULES_RLOCK().
*/
PF_RULES_RLOCK();
if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
flags = PFSYNC_SI_CKSUM;
offset += sizeof(*ph);
while (offset <= len - sizeof(subh)) {
m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
offset += sizeof(subh);
if (subh.action >= PFSYNC_ACT_MAX) {
V_pfsyncstats.pfsyncs_badact++;
PF_RULES_RUNLOCK();
goto done;
}
count = ntohs(subh.count);
V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action);
if (rv == -1) {
PF_RULES_RUNLOCK();
return (IPPROTO_DONE);
}
offset += rv;
}
PF_RULES_RUNLOCK();
done:
m_freem(m);
return (IPPROTO_DONE);
}
#endif
static int
pfsync_in_clr(struct mbuf *m, int offset, int count, int flags, int action)
{
struct pfsync_clr *clr;
struct mbuf *mp;
int len = sizeof(*clr) * count;
int i, offp;
u_int32_t creatorid;
mp = m_pulldown(m, offset, len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
clr = (struct pfsync_clr *)(mp->m_data + offp);
for (i = 0; i < count; i++) {
creatorid = clr[i].creatorid;
if (clr[i].ifname[0] != '\0' &&
pfi_kkif_find(clr[i].ifname) == NULL)
continue;
for (int i = 0; i <= pf_hashmask; i++) {
struct pf_idhash *ih = &V_pf_idhash[i];
struct pf_kstate *s;
relock:
PF_HASHROW_LOCK(ih);
LIST_FOREACH(s, &ih->states, entry) {
if (s->creatorid == creatorid) {
s->state_flags |= PFSTATE_NOSYNC;
pf_unlink_state(s);
goto relock;
}
}
PF_HASHROW_UNLOCK(ih);
}
}
return (len);
}
static int
pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action)
{
struct mbuf *mp;
union pfsync_state_union *sa, *sp;
int i, offp, total_len, msg_version, msg_len;
switch (action) {
case PFSYNC_ACT_INS_1301:
msg_len = sizeof(struct pfsync_state_1301);
total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1301;
break;
case PFSYNC_ACT_INS_1400:
msg_len = sizeof(struct pfsync_state_1400);
total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1400;
break;
default:
V_pfsyncstats.pfsyncs_badact++;
return (-1);
}
mp = m_pulldown(m, offset, total_len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
sa = (union pfsync_state_union *)(mp->m_data + offp);
for (i = 0; i < count; i++) {
sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
/* Check for invalid values. */
if (sp->pfs_1301.timeout >= PFTM_MAX ||
sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST ||
sp->pfs_1301.direction > PF_OUT ||
(sp->pfs_1301.af != AF_INET &&
sp->pfs_1301.af != AF_INET6)) {
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("%s: invalid value\n", __func__);
V_pfsyncstats.pfsyncs_badval++;
continue;
}
if (pfsync_state_import(sp, flags, msg_version) == ENOMEM)
/* Drop out, but process the rest of the actions. */
break;
}
return (total_len);
}
static int
pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action)
{
struct pfsync_ins_ack *ia, *iaa;
struct pf_kstate *st;
struct mbuf *mp;
int len = count * sizeof(*ia);
int offp, i;
mp = m_pulldown(m, offset, len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
for (i = 0; i < count; i++) {
ia = &iaa[i];
st = pf_find_state_byid(ia->id, ia->creatorid);
if (st == NULL)
continue;
if (st->state_flags & PFSTATE_ACK) {
pfsync_undefer_state(st, 0);
}
PF_STATE_UNLOCK(st);
}
/*
* XXX this is not yet implemented, but we know the size of the
* message so we can skip it.
*/
return (count * sizeof(struct pfsync_ins_ack));
}
static int
pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src,
struct pfsync_state_peer *dst)
{
int sync = 0;
PF_STATE_LOCK_ASSERT(st);
/*
* The state should never go backwards except
* for syn-proxy states. Neither should the
* sequence window slide backwards.
*/
if ((st->src.state > src->state &&
(st->src.state < PF_TCPS_PROXY_SRC ||
src->state >= PF_TCPS_PROXY_SRC)) ||
(st->src.state == src->state &&
SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
sync++;
else
pf_state_peer_ntoh(src, &st->src);
if ((st->dst.state > dst->state) ||
(st->dst.state >= TCPS_SYN_SENT &&
SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
sync++;
else
pf_state_peer_ntoh(dst, &st->dst);
return (sync);
}
static int
pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action)
{
struct pfsync_softc *sc = V_pfsyncif;
union pfsync_state_union *sa, *sp;
struct pf_kstate *st;
struct mbuf *mp;
int sync, offp, i, total_len, msg_len, msg_version;
switch (action) {
case PFSYNC_ACT_UPD_1301:
msg_len = sizeof(struct pfsync_state_1301);
total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1301;
break;
case PFSYNC_ACT_UPD_1400:
msg_len = sizeof(struct pfsync_state_1400);
total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1400;
break;
default:
V_pfsyncstats.pfsyncs_badact++;
return (-1);
}
mp = m_pulldown(m, offset, total_len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
sa = (union pfsync_state_union *)(mp->m_data + offp);
for (i = 0; i < count; i++) {
sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
/* check for invalid values */
if (sp->pfs_1301.timeout >= PFTM_MAX ||
sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST) {
if (V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pfsync_input: PFSYNC_ACT_UPD: "
"invalid value\n");
}
V_pfsyncstats.pfsyncs_badval++;
continue;
}
st = pf_find_state_byid(sp->pfs_1301.id, sp->pfs_1301.creatorid);
if (st == NULL) {
/* insert the update */
if (pfsync_state_import(sp, flags, msg_version))
V_pfsyncstats.pfsyncs_badstate++;
continue;
}
if (st->state_flags & PFSTATE_ACK) {
pfsync_undefer_state(st, 1);
}
if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
sync = pfsync_upd_tcp(st, &sp->pfs_1301.src, &sp->pfs_1301.dst);
else {
sync = 0;
/*
* Non-TCP protocol state machine always go
* forwards
*/
if (st->src.state > sp->pfs_1301.src.state)
sync++;
else
pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src);
if (st->dst.state > sp->pfs_1301.dst.state)
sync++;
else
pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
}
if (sync < 2) {
pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst);
pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
st->expire = pf_get_uptime();
st->timeout = sp->pfs_1301.timeout;
}
st->pfsync_time = time_uptime;
if (sync) {
V_pfsyncstats.pfsyncs_stale++;
pfsync_update_state(st);
PF_STATE_UNLOCK(st);
pfsync_push_all(sc);
continue;
}
PF_STATE_UNLOCK(st);
}
return (total_len);
}
static int
pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags, int action)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_upd_c *ua, *up;
struct pf_kstate *st;
int len = count * sizeof(*up);
int sync;
struct mbuf *mp;
int offp, i;
mp = m_pulldown(m, offset, len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
ua = (struct pfsync_upd_c *)(mp->m_data + offp);
for (i = 0; i < count; i++) {
up = &ua[i];
/* check for invalid values */
if (up->timeout >= PFTM_MAX ||
up->src.state > PF_TCPS_PROXY_DST ||
up->dst.state > PF_TCPS_PROXY_DST) {
if (V_pf_status.debug >= PF_DEBUG_MISC) {
printf("pfsync_input: "
"PFSYNC_ACT_UPD_C: "
"invalid value\n");
}
V_pfsyncstats.pfsyncs_badval++;
continue;
}
st = pf_find_state_byid(up->id, up->creatorid);
if (st == NULL) {
/* We don't have this state. Ask for it. */
PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
pfsync_request_update(up->creatorid, up->id);
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
continue;
}
if (st->state_flags & PFSTATE_ACK) {
pfsync_undefer_state(st, 1);
}
if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
sync = pfsync_upd_tcp(st, &up->src, &up->dst);
else {
sync = 0;
/*
* Non-TCP protocol state machine always go
* forwards
*/
if (st->src.state > up->src.state)
sync++;
else
pf_state_peer_ntoh(&up->src, &st->src);
if (st->dst.state > up->dst.state)
sync++;
else
pf_state_peer_ntoh(&up->dst, &st->dst);
}
if (sync < 2) {
pfsync_alloc_scrub_memory(&up->dst, &st->dst);
pf_state_peer_ntoh(&up->dst, &st->dst);
st->expire = pf_get_uptime();
st->timeout = up->timeout;
}
st->pfsync_time = time_uptime;
if (sync) {
V_pfsyncstats.pfsyncs_stale++;
pfsync_update_state(st);
PF_STATE_UNLOCK(st);
pfsync_push_all(sc);
continue;
}
PF_STATE_UNLOCK(st);
}
return (len);
}
static int
pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags, int action)
{
struct pfsync_upd_req *ur, *ura;
struct mbuf *mp;
int len = count * sizeof(*ur);
int i, offp;
struct pf_kstate *st;
mp = m_pulldown(m, offset, len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
ura = (struct pfsync_upd_req *)(mp->m_data + offp);
for (i = 0; i < count; i++) {
ur = &ura[i];
if (ur->id == 0 && ur->creatorid == 0)
pfsync_bulk_start();
else {
st = pf_find_state_byid(ur->id, ur->creatorid);
if (st == NULL) {
V_pfsyncstats.pfsyncs_badstate++;
continue;
}
if (st->state_flags & PFSTATE_NOSYNC) {
PF_STATE_UNLOCK(st);
continue;
}
pfsync_update_state_req(st);
PF_STATE_UNLOCK(st);
}
}
return (len);
}
static int
pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags, int action)
{
struct mbuf *mp;
struct pfsync_del_c *sa, *sp;
struct pf_kstate *st;
int len = count * sizeof(*sp);
int offp, i;
mp = m_pulldown(m, offset, len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
sa = (struct pfsync_del_c *)(mp->m_data + offp);
for (i = 0; i < count; i++) {
sp = &sa[i];
st = pf_find_state_byid(sp->id, sp->creatorid);
if (st == NULL) {
V_pfsyncstats.pfsyncs_badstate++;
continue;
}
st->state_flags |= PFSTATE_NOSYNC;
pf_unlink_state(st);
}
return (len);
}
static int
pfsync_in_bus(struct mbuf *m, int offset, int count, int flags, int action)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_bus *bus;
struct mbuf *mp;
int len = count * sizeof(*bus);
int offp;
PFSYNC_BLOCK(sc);
/* If we're not waiting for a bulk update, who cares. */
if (sc->sc_ureq_sent == 0) {
PFSYNC_BUNLOCK(sc);
return (len);
}
mp = m_pulldown(m, offset, len, &offp);
if (mp == NULL) {
PFSYNC_BUNLOCK(sc);
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
bus = (struct pfsync_bus *)(mp->m_data + offp);
switch (bus->status) {
case PFSYNC_BUS_START:
callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
V_pf_limits[PF_LIMIT_STATES].limit /
((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
sizeof(union pfsync_state_union)),
pfsync_bulk_fail, sc);
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received bulk update start\n");
break;
case PFSYNC_BUS_END:
if (time_uptime - ntohl(bus->endtime) >=
sc->sc_ureq_sent) {
/* that's it, we're happy */
sc->sc_ureq_sent = 0;
sc->sc_bulk_tries = 0;
callout_stop(&sc->sc_bulkfail_tmo);
if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
(*carp_demote_adj_p)(-V_pfsync_carp_adj,
"pfsync bulk done");
sc->sc_flags |= PFSYNCF_OK;
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received valid "
"bulk update end\n");
} else {
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received invalid "
"bulk update end: bad timestamp\n");
}
break;
}
PFSYNC_BUNLOCK(sc);
return (len);
}
static int
pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags, int action)
{
int len = count * sizeof(struct pfsync_tdb);
#if defined(IPSEC)
struct pfsync_tdb *tp;
struct mbuf *mp;
int offp;
int i;
int s;
mp = m_pulldown(m, offset, len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
return (-1);
}
tp = (struct pfsync_tdb *)(mp->m_data + offp);
for (i = 0; i < count; i++)
pfsync_update_net_tdb(&tp[i]);
#endif
return (len);
}
#if defined(IPSEC)
/* Update an in-kernel tdb. Silently fail if no tdb is found. */
static void
pfsync_update_net_tdb(struct pfsync_tdb *pt)
{
struct tdb *tdb;
int s;
/* check for invalid values */
if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
(pt->dst.sa.sa_family != AF_INET &&
pt->dst.sa.sa_family != AF_INET6))
goto bad;
tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
if (tdb) {
pt->rpl = ntohl(pt->rpl);
pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
/* Neither replay nor byte counter should ever decrease. */
if (pt->rpl < tdb->tdb_rpl ||
pt->cur_bytes < tdb->tdb_cur_bytes) {
goto bad;
}
tdb->tdb_rpl = pt->rpl;
tdb->tdb_cur_bytes = pt->cur_bytes;
}
return;
bad:
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
"invalid value\n");
V_pfsyncstats.pfsyncs_badstate++;
return;
}
#endif
static int
pfsync_in_eof(struct mbuf *m, int offset, int count, int flags, int action)
{
/* check if we are at the right place in the packet */
if (offset != m->m_pkthdr.len)
V_pfsyncstats.pfsyncs_badlen++;
/* we're done. free and let the caller return */
m_freem(m);
return (-1);
}
static int
pfsync_in_error(struct mbuf *m, int offset, int count, int flags, int action)
{
V_pfsyncstats.pfsyncs_badact++;
m_freem(m);
return (-1);
}
static int
pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
struct route *rt)
{
m_freem(m);
return (0);
}
/* ARGSUSED */
static int
pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct pfsync_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
struct pfsyncreq pfsyncr;
size_t nvbuflen;
int error;
int c;
switch (cmd) {
case SIOCSIFFLAGS:
PFSYNC_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
ifp->if_drv_flags |= IFF_DRV_RUNNING;
PFSYNC_UNLOCK(sc);
pfsync_pointers_init();
} else {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
PFSYNC_UNLOCK(sc);
pfsync_pointers_uninit();
}
break;
case SIOCSIFMTU:
if (!sc->sc_sync_if ||
ifr->ifr_mtu <= PFSYNC_MINPKT ||
ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
return (EINVAL);
if (ifr->ifr_mtu < ifp->if_mtu) {
for (c = 0; c < pfsync_buckets; c++) {
PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT)
pfsync_sendout(1, c);
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
}
}
ifp->if_mtu = ifr->ifr_mtu;
break;
case SIOCGETPFSYNC:
bzero(&pfsyncr, sizeof(pfsyncr));
PFSYNC_LOCK(sc);
if (sc->sc_sync_if) {
strlcpy(pfsyncr.pfsyncr_syncdev,
sc->sc_sync_if->if_xname, IFNAMSIZ);
}
pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr;
pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
pfsyncr.pfsyncr_defer = sc->sc_flags;
PFSYNC_UNLOCK(sc);
return (copyout(&pfsyncr, ifr_data_get_ptr(ifr),
sizeof(pfsyncr)));
case SIOCGETPFSYNCNV:
{
nvlist_t *nvl_syncpeer;
nvlist_t *nvl = nvlist_create(0);
if (nvl == NULL)
return (ENOMEM);
if (sc->sc_sync_if)
nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname);
nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates);
nvlist_add_number(nvl, "flags", sc->sc_flags);
nvlist_add_number(nvl, "version", sc->sc_version);
if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL)
nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer);
void *packed = NULL;
packed = nvlist_pack(nvl, &nvbuflen);
if (packed == NULL) {
free(packed, M_NVLIST);
nvlist_destroy(nvl);
return (ENOMEM);
}
if (nvbuflen > ifr->ifr_cap_nv.buf_length) {
ifr->ifr_cap_nv.length = nvbuflen;
ifr->ifr_cap_nv.buffer = NULL;
free(packed, M_NVLIST);
nvlist_destroy(nvl);
return (EFBIG);
}
ifr->ifr_cap_nv.length = nvbuflen;
error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen);
nvlist_destroy(nvl);
nvlist_destroy(nvl_syncpeer);
free(packed, M_NVLIST);
break;
}
case SIOCSETPFSYNC:
{
struct pfsync_kstatus status;
if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
return (error);
if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr,
sizeof(pfsyncr))))
return (error);
memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status);
error = pfsync_kstatus_to_softc(&status, sc);
return (error);
}
case SIOCSETPFSYNCNV:
{
struct pfsync_kstatus status;
void *data;
nvlist_t *nvl;
if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
return (error);
if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE)
return (EINVAL);
data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK);
if ((error = copyin(ifr->ifr_cap_nv.buffer, data,
ifr->ifr_cap_nv.length)) != 0) {
free(data, M_TEMP);
return (error);
}
if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) {
free(data, M_TEMP);
return (EINVAL);
}
memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
pfsync_nvstatus_to_kstatus(nvl, &status);
nvlist_destroy(nvl);
free(data, M_TEMP);
error = pfsync_kstatus_to_softc(&status, sc);
return (error);
}
default:
return (ENOTTY);
}
return (0);
}
static void
pfsync_out_state_1301(struct pf_kstate *st, void *buf)
{
union pfsync_state_union *sp = buf;
pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1301);
}
static void
pfsync_out_state_1400(struct pf_kstate *st, void *buf)
{
union pfsync_state_union *sp = buf;
pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1400);
}
static void
pfsync_out_iack(struct pf_kstate *st, void *buf)
{
struct pfsync_ins_ack *iack = buf;
iack->id = st->id;
iack->creatorid = st->creatorid;
}
static void
pfsync_out_upd_c(struct pf_kstate *st, void *buf)
{
struct pfsync_upd_c *up = buf;
bzero(up, sizeof(*up));
up->id = st->id;
pf_state_peer_hton(&st->src, &up->src);
pf_state_peer_hton(&st->dst, &up->dst);
up->creatorid = st->creatorid;
up->timeout = st->timeout;
}
static void
pfsync_out_del_c(struct pf_kstate *st, void *buf)
{
struct pfsync_del_c *dp = buf;
dp->id = st->id;
dp->creatorid = st->creatorid;
st->state_flags |= PFSTATE_NOSYNC;
}
static void
pfsync_drop(struct pfsync_softc *sc)
{
struct pf_kstate *st, *next;
struct pfsync_upd_req_item *ur;
struct pfsync_bucket *b;
int c;
enum pfsync_q_id q;
for (c = 0; c < pfsync_buckets; c++) {
b = &sc->sc_buckets[c];
for (q = 0; q < PFSYNC_Q_COUNT; q++) {
if (TAILQ_EMPTY(&b->b_qs[q]))
continue;
TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) {
KASSERT(st->sync_state == pfsync_qid_sstate[q],
("%s: st->sync_state == q",
__func__));
st->sync_state = PFSYNC_S_NONE;
pf_release_state(st);
}
TAILQ_INIT(&b->b_qs[q]);
}
while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
free(ur, M_PFSYNC);
}
b->b_len = PFSYNC_MINPKT;
free(b->b_plus, M_PFSYNC);
b->b_plus = NULL;
b->b_pluslen = 0;
}
}
static void
pfsync_sendout(int schedswi, int c)
{
struct pfsync_softc *sc = V_pfsyncif;
struct ifnet *ifp = sc->sc_ifp;
struct mbuf *m;
struct pfsync_header *ph;
struct pfsync_subheader *subh;
struct pf_kstate *st, *st_next;
struct pfsync_upd_req_item *ur;
struct pfsync_bucket *b = &sc->sc_buckets[c];
size_t len;
int aflen, offset, count = 0;
enum pfsync_q_id q;
KASSERT(sc != NULL, ("%s: null sc", __func__));
KASSERT(b->b_len > PFSYNC_MINPKT,
("%s: sc_len %zu", __func__, b->b_len));
PFSYNC_BUCKET_LOCK_ASSERT(b);
if (!bpf_peers_present(ifp->if_bpf) && sc->sc_sync_if == NULL) {
pfsync_drop(sc);
return;
}
m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
V_pfsyncstats.pfsyncs_onomem++;
return;
}
m->m_data += max_linkhdr;
bzero(m->m_data, b->b_len);
len = b->b_len;
/* build the ip header */
switch (sc->sc_sync_peer.ss_family) {
#ifdef INET
case AF_INET:
{
struct ip *ip;
ip = mtod(m, struct ip *);
bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip));
aflen = offset = sizeof(*ip);
len -= sizeof(union inet_template) - sizeof(struct ip);
ip->ip_len = htons(len);
ip_fillid(ip);
break;
}
#endif
#ifdef INET6
case AF_INET6:
{
struct ip6_hdr *ip6;
ip6 = mtod(m, struct ip6_hdr *);
bcopy(&sc->sc_template.ipv6, ip6, sizeof(*ip6));
aflen = offset = sizeof(*ip6);
len -= sizeof(union inet_template) - sizeof(struct ip6_hdr);
ip6->ip6_plen = htons(len);
break;
}
#endif
default:
m_freem(m);
return;
}
m->m_len = m->m_pkthdr.len = len;
/* build the pfsync header */
ph = (struct pfsync_header *)(m->m_data + offset);
offset += sizeof(*ph);
ph->version = PFSYNC_VERSION;
ph->len = htons(len - aflen);
bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
/* walk the queues */
for (q = 0; q < PFSYNC_Q_COUNT; q++) {
if (TAILQ_EMPTY(&b->b_qs[q]))
continue;
subh = (struct pfsync_subheader *)(m->m_data + offset);
offset += sizeof(*subh);
count = 0;
TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) {
KASSERT(st->sync_state == pfsync_qid_sstate[q],
("%s: st->sync_state == q",
__func__));
/*
* XXXGL: some of write methods do unlocked reads
* of state data :(
*/
pfsync_qs[q].write(st, m->m_data + offset);
offset += pfsync_qs[q].len;
st->sync_state = PFSYNC_S_NONE;
pf_release_state(st);
count++;
}
TAILQ_INIT(&b->b_qs[q]);
subh->action = pfsync_qs[q].action;
subh->count = htons(count);
V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
}
if (!TAILQ_EMPTY(&b->b_upd_req_list)) {
subh = (struct pfsync_subheader *)(m->m_data + offset);
offset += sizeof(*subh);
count = 0;
while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
bcopy(&ur->ur_msg, m->m_data + offset,
sizeof(ur->ur_msg));
offset += sizeof(ur->ur_msg);
free(ur, M_PFSYNC);
count++;
}
subh->action = PFSYNC_ACT_UPD_REQ;
subh->count = htons(count);
V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
}
/* has someone built a custom region for us to add? */
if (b->b_plus != NULL) {
bcopy(b->b_plus, m->m_data + offset, b->b_pluslen);
offset += b->b_pluslen;
free(b->b_plus, M_PFSYNC);
b->b_plus = NULL;
b->b_pluslen = 0;
}
subh = (struct pfsync_subheader *)(m->m_data + offset);
offset += sizeof(*subh);
subh->action = PFSYNC_ACT_EOF;
subh->count = htons(1);
V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
/* we're done, let's put it on the wire */
if (bpf_peers_present(ifp->if_bpf)) {
m->m_data += aflen;
m->m_len = m->m_pkthdr.len = len - aflen;
bpf_mtap(ifp->if_bpf, m);
m->m_data -= aflen;
m->m_len = m->m_pkthdr.len = len;
}
if (sc->sc_sync_if == NULL) {
b->b_len = PFSYNC_MINPKT;
m_freem(m);
return;
}
if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
b->b_len = PFSYNC_MINPKT;
if (!_IF_QFULL(&b->b_snd))
_IF_ENQUEUE(&b->b_snd, m);
else {
m_freem(m);
if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
}
if (schedswi)
swi_sched(V_pfsync_swi_cookie, 0);
}
static void
pfsync_insert_state(struct pf_kstate *st)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
if (st->state_flags & PFSTATE_NOSYNC)
return;
if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
st->state_flags |= PFSTATE_NOSYNC;
return;
}
KASSERT(st->sync_state == PFSYNC_S_NONE,
("%s: st->sync_state %u", __func__, st->sync_state));
PFSYNC_BUCKET_LOCK(b);
if (b->b_len == PFSYNC_MINPKT)
callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
pfsync_q_ins(st, PFSYNC_S_INS, true);
PFSYNC_BUCKET_UNLOCK(b);
st->sync_updates = 0;
}
static int
pfsync_defer(struct pf_kstate *st, struct mbuf *m)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_deferral *pd;
struct pfsync_bucket *b;
if (m->m_flags & (M_BCAST|M_MCAST))
return (0);
if (sc == NULL)
return (0);
b = pfsync_get_bucket(sc, st);
PFSYNC_LOCK(sc);
if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) ||
!(sc->sc_flags & PFSYNCF_DEFER)) {
PFSYNC_UNLOCK(sc);
return (0);
}
PFSYNC_BUCKET_LOCK(b);
PFSYNC_UNLOCK(sc);
if (b->b_deferred >= 128)
pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0);
pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
if (pd == NULL) {
PFSYNC_BUCKET_UNLOCK(b);
return (0);
}
b->b_deferred++;
m->m_flags |= M_SKIP_FIREWALL;
st->state_flags |= PFSTATE_ACK;
pd->pd_sc = sc;
pd->pd_st = st;
pf_ref_state(st);
pd->pd_m = m;
TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry);
callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED);
callout_reset(&pd->pd_tmo, (V_pfsync_defer_timeout * hz) / 1000,
pfsync_defer_tmo, pd);
pfsync_push(b);
PFSYNC_BUCKET_UNLOCK(b);
return (1);
}
static void
pfsync_undefer(struct pfsync_deferral *pd, int drop)
{
struct pfsync_softc *sc = pd->pd_sc;
struct mbuf *m = pd->pd_m;
struct pf_kstate *st = pd->pd_st;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PFSYNC_BUCKET_LOCK_ASSERT(b);
TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
b->b_deferred--;
pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
free(pd, M_PFSYNC);
pf_release_state(st);
if (drop)
m_freem(m);
else {
_IF_ENQUEUE(&b->b_snd, m);
pfsync_push(b);
}
}
static void
pfsync_defer_tmo(void *arg)
{
struct epoch_tracker et;
struct pfsync_deferral *pd = arg;
struct pfsync_softc *sc = pd->pd_sc;
struct mbuf *m = pd->pd_m;
struct pf_kstate *st = pd->pd_st;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PFSYNC_BUCKET_LOCK_ASSERT(b);
TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
b->b_deferred--;
pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
PFSYNC_BUCKET_UNLOCK(b);
free(pd, M_PFSYNC);
if (sc->sc_sync_if == NULL) {
pf_release_state(st);
m_freem(m);
return;
}
NET_EPOCH_ENTER(et);
CURVNET_SET(sc->sc_sync_if->if_vnet);
pfsync_tx(sc, m);
pf_release_state(st);
CURVNET_RESTORE();
NET_EPOCH_EXIT(et);
}
static void
pfsync_undefer_state_locked(struct pf_kstate *st, int drop)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_deferral *pd;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PFSYNC_BUCKET_LOCK_ASSERT(b);
TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) {
if (pd->pd_st == st) {
if (callout_stop(&pd->pd_tmo) > 0)
pfsync_undefer(pd, drop);
return;
}
}
panic("%s: unable to find deferred state", __func__);
}
static void
pfsync_undefer_state(struct pf_kstate *st, int drop)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PFSYNC_BUCKET_LOCK(b);
pfsync_undefer_state_locked(st, drop);
PFSYNC_BUCKET_UNLOCK(b);
}
static struct pfsync_bucket*
pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st)
{
int c = PF_IDHASH(st) % pfsync_buckets;
return &sc->sc_buckets[c];
}
static void
pfsync_update_state(struct pf_kstate *st)
{
struct pfsync_softc *sc = V_pfsyncif;
bool sync = false, ref = true;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PF_STATE_LOCK_ASSERT(st);
PFSYNC_BUCKET_LOCK(b);
if (st->state_flags & PFSTATE_ACK)
pfsync_undefer_state_locked(st, 0);
if (st->state_flags & PFSTATE_NOSYNC) {
if (st->sync_state != PFSYNC_S_NONE)
pfsync_q_del(st, true, b);
PFSYNC_BUCKET_UNLOCK(b);
return;
}
if (b->b_len == PFSYNC_MINPKT)
callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
switch (st->sync_state) {
case PFSYNC_S_UPD_C:
case PFSYNC_S_UPD:
case PFSYNC_S_INS:
/* we're already handling it */
if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
st->sync_updates++;
if (st->sync_updates >= sc->sc_maxupdates)
sync = true;
}
break;
case PFSYNC_S_IACK:
pfsync_q_del(st, false, b);
ref = false;
/* FALLTHROUGH */
case PFSYNC_S_NONE:
pfsync_q_ins(st, PFSYNC_S_UPD_C, ref);
st->sync_updates = 0;
break;
default:
panic("%s: unexpected sync state %d", __func__, st->sync_state);
}
if (sync || (time_uptime - st->pfsync_time) < 2)
pfsync_push(b);
PFSYNC_BUCKET_UNLOCK(b);
}
static void
pfsync_request_update(u_int32_t creatorid, u_int64_t id)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_bucket *b = &sc->sc_buckets[0];
struct pfsync_upd_req_item *item;
size_t nlen = sizeof(struct pfsync_upd_req);
PFSYNC_BUCKET_LOCK_ASSERT(b);
/*
* This code does a bit to prevent multiple update requests for the
* same state being generated. It searches current subheader queue,
* but it doesn't lookup into queue of already packed datagrams.
*/
TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry)
if (item->ur_msg.id == id &&
item->ur_msg.creatorid == creatorid)
return;
item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
if (item == NULL)
return; /* XXX stats */
item->ur_msg.id = id;
item->ur_msg.creatorid = creatorid;
if (TAILQ_EMPTY(&b->b_upd_req_list))
nlen += sizeof(struct pfsync_subheader);
if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
pfsync_sendout(0, 0);
nlen = sizeof(struct pfsync_subheader) +
sizeof(struct pfsync_upd_req);
}
TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry);
b->b_len += nlen;
pfsync_push(b);
}
static bool
pfsync_update_state_req(struct pf_kstate *st)
{
struct pfsync_softc *sc = V_pfsyncif;
bool ref = true, full = false;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PF_STATE_LOCK_ASSERT(st);
PFSYNC_BUCKET_LOCK(b);
if (st->state_flags & PFSTATE_NOSYNC) {
if (st->sync_state != PFSYNC_S_NONE)
pfsync_q_del(st, true, b);
PFSYNC_BUCKET_UNLOCK(b);
return (full);
}
switch (st->sync_state) {
case PFSYNC_S_UPD_C:
case PFSYNC_S_IACK:
pfsync_q_del(st, false, b);
ref = false;
/* FALLTHROUGH */
case PFSYNC_S_NONE:
pfsync_q_ins(st, PFSYNC_S_UPD, ref);
pfsync_push(b);
break;
case PFSYNC_S_INS:
case PFSYNC_S_UPD:
case PFSYNC_S_DEL_C:
/* we're already handling it */
break;
default:
panic("%s: unexpected sync state %d", __func__, st->sync_state);
}
if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(union pfsync_state_union))
full = true;
PFSYNC_BUCKET_UNLOCK(b);
return (full);
}
static void
pfsync_delete_state(struct pf_kstate *st)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
bool ref = true;
PFSYNC_BUCKET_LOCK(b);
if (st->state_flags & PFSTATE_ACK)
pfsync_undefer_state_locked(st, 1);
if (st->state_flags & PFSTATE_NOSYNC) {
if (st->sync_state != PFSYNC_S_NONE)
pfsync_q_del(st, true, b);
PFSYNC_BUCKET_UNLOCK(b);
return;
}
if (b->b_len == PFSYNC_MINPKT)
callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
switch (st->sync_state) {
case PFSYNC_S_INS:
/* We never got to tell the world so just forget about it. */
pfsync_q_del(st, true, b);
break;
case PFSYNC_S_UPD_C:
case PFSYNC_S_UPD:
case PFSYNC_S_IACK:
pfsync_q_del(st, false, b);
ref = false;
/* FALLTHROUGH */
case PFSYNC_S_NONE:
pfsync_q_ins(st, PFSYNC_S_DEL_C, ref);
break;
default:
panic("%s: unexpected sync state %d", __func__, st->sync_state);
}
PFSYNC_BUCKET_UNLOCK(b);
}
static void
pfsync_clear_states(u_int32_t creatorid, const char *ifname)
{
struct {
struct pfsync_subheader subh;
struct pfsync_clr clr;
} __packed r;
bzero(&r, sizeof(r));
r.subh.action = PFSYNC_ACT_CLR;
r.subh.count = htons(1);
V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
r.clr.creatorid = creatorid;
pfsync_send_plus(&r, sizeof(r));
}
static enum pfsync_q_id
pfsync_sstate_to_qid(u_int8_t sync_state)
{
struct pfsync_softc *sc = V_pfsyncif;
switch (sync_state) {
case PFSYNC_S_INS:
switch (sc->sc_version) {
case PFSYNC_MSG_VERSION_1301:
return PFSYNC_Q_INS_1301;
case PFSYNC_MSG_VERSION_1400:
return PFSYNC_Q_INS_1400;
}
break;
case PFSYNC_S_IACK:
return PFSYNC_Q_IACK;
case PFSYNC_S_UPD:
switch (sc->sc_version) {
case PFSYNC_MSG_VERSION_1301:
return PFSYNC_Q_UPD_1301;
case PFSYNC_MSG_VERSION_1400:
return PFSYNC_Q_UPD_1400;
}
break;
case PFSYNC_S_UPD_C:
return PFSYNC_Q_UPD_C;
case PFSYNC_S_DEL_C:
return PFSYNC_Q_DEL_C;
default:
panic("%s: Unsupported st->sync_state 0x%02x",
__func__, sync_state);
}
panic("%s: Unsupported pfsync_msg_version %d",
__func__, sc->sc_version);
}
static void
pfsync_q_ins(struct pf_kstate *st, int sync_state, bool ref)
{
enum pfsync_q_id q = pfsync_sstate_to_qid(sync_state);
struct pfsync_softc *sc = V_pfsyncif;
size_t nlen = pfsync_qs[q].len;
struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
PFSYNC_BUCKET_LOCK_ASSERT(b);
KASSERT(st->sync_state == PFSYNC_S_NONE,
("%s: st->sync_state %u", __func__, st->sync_state));
KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
b->b_len));
if (TAILQ_EMPTY(&b->b_qs[q]))
nlen += sizeof(struct pfsync_subheader);
if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
pfsync_sendout(1, b->b_id);
nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
}
b->b_len += nlen;
TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list);
st->sync_state = pfsync_qid_sstate[q];
if (ref)
pf_ref_state(st);
}
static void
pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b)
{
enum pfsync_q_id q;
PFSYNC_BUCKET_LOCK_ASSERT(b);
KASSERT(st->sync_state != PFSYNC_S_NONE,
("%s: st->sync_state != PFSYNC_S_NONE", __func__));
q = pfsync_sstate_to_qid(st->sync_state);
b->b_len -= pfsync_qs[q].len;
TAILQ_REMOVE(&b->b_qs[q], st, sync_list);
st->sync_state = PFSYNC_S_NONE;
if (unref)
pf_release_state(st);
if (TAILQ_EMPTY(&b->b_qs[q]))
b->b_len -= sizeof(struct pfsync_subheader);
}
static void
pfsync_bulk_start(void)
{
struct pfsync_softc *sc = V_pfsyncif;
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: received bulk update request\n");
PFSYNC_BLOCK(sc);
sc->sc_ureq_received = time_uptime;
sc->sc_bulk_hashid = 0;
sc->sc_bulk_stateid = 0;
pfsync_bulk_status(PFSYNC_BUS_START);
callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
PFSYNC_BUNLOCK(sc);
}
static void
pfsync_bulk_update(void *arg)
{
struct pfsync_softc *sc = arg;
struct pf_kstate *s;
int i;
PFSYNC_BLOCK_ASSERT(sc);
CURVNET_SET(sc->sc_ifp->if_vnet);
/*
* Start with last state from previous invocation.
* It may had gone, in this case start from the
* hash slot.
*/
s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
if (s != NULL)
i = PF_IDHASH(s);
else
i = sc->sc_bulk_hashid;
for (; i <= pf_hashmask; i++) {
struct pf_idhash *ih = &V_pf_idhash[i];
if (s != NULL)
PF_HASHROW_ASSERT(ih);
else {
PF_HASHROW_LOCK(ih);
s = LIST_FIRST(&ih->states);
}
for (; s; s = LIST_NEXT(s, entry)) {
if (s->sync_state == PFSYNC_S_NONE &&
s->timeout < PFTM_MAX &&
s->pfsync_time <= sc->sc_ureq_received) {
if (pfsync_update_state_req(s)) {
/* We've filled a packet. */
sc->sc_bulk_hashid = i;
sc->sc_bulk_stateid = s->id;
sc->sc_bulk_creatorid = s->creatorid;
PF_HASHROW_UNLOCK(ih);
callout_reset(&sc->sc_bulk_tmo, 1,
pfsync_bulk_update, sc);
goto full;
}
}
}
PF_HASHROW_UNLOCK(ih);
}
/* We're done. */
pfsync_bulk_status(PFSYNC_BUS_END);
full:
CURVNET_RESTORE();
}
static void
pfsync_bulk_status(u_int8_t status)
{
struct {
struct pfsync_subheader subh;
struct pfsync_bus bus;
} __packed r;
struct pfsync_softc *sc = V_pfsyncif;
bzero(&r, sizeof(r));
r.subh.action = PFSYNC_ACT_BUS;
r.subh.count = htons(1);
V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
r.bus.creatorid = V_pf_status.hostid;
r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
r.bus.status = status;
pfsync_send_plus(&r, sizeof(r));
}
static void
pfsync_bulk_fail(void *arg)
{
struct pfsync_softc *sc = arg;
struct pfsync_bucket *b = &sc->sc_buckets[0];
CURVNET_SET(sc->sc_ifp->if_vnet);
PFSYNC_BLOCK_ASSERT(sc);
if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
/* Try again */
callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
pfsync_bulk_fail, V_pfsyncif);
PFSYNC_BUCKET_LOCK(b);
pfsync_request_update(0, 0);
PFSYNC_BUCKET_UNLOCK(b);
} else {
/* Pretend like the transfer was ok. */
sc->sc_ureq_sent = 0;
sc->sc_bulk_tries = 0;
PFSYNC_LOCK(sc);
if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
(*carp_demote_adj_p)(-V_pfsync_carp_adj,
"pfsync bulk fail");
sc->sc_flags |= PFSYNCF_OK;
PFSYNC_UNLOCK(sc);
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: failed to receive bulk update\n");
}
CURVNET_RESTORE();
}
static void
pfsync_send_plus(void *plus, size_t pluslen)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_bucket *b = &sc->sc_buckets[0];
uint8_t *newplus;
PFSYNC_BUCKET_LOCK(b);
if (b->b_len + pluslen > sc->sc_ifp->if_mtu)
pfsync_sendout(1, b->b_id);
newplus = malloc(pluslen + b->b_pluslen, M_PFSYNC, M_NOWAIT);
if (newplus == NULL)
goto out;
if (b->b_plus != NULL) {
memcpy(newplus, b->b_plus, b->b_pluslen);
free(b->b_plus, M_PFSYNC);
} else {
MPASS(b->b_pluslen == 0);
}
memcpy(newplus + b->b_pluslen, plus, pluslen);
b->b_plus = newplus;
b->b_pluslen += pluslen;
b->b_len += pluslen;
pfsync_sendout(1, b->b_id);
out:
PFSYNC_BUCKET_UNLOCK(b);
}
static void
pfsync_timeout(void *arg)
{
struct pfsync_bucket *b = arg;
CURVNET_SET(b->b_sc->sc_ifp->if_vnet);
PFSYNC_BUCKET_LOCK(b);
pfsync_push(b);
PFSYNC_BUCKET_UNLOCK(b);
CURVNET_RESTORE();
}
static void
pfsync_push(struct pfsync_bucket *b)
{
PFSYNC_BUCKET_LOCK_ASSERT(b);
b->b_flags |= PFSYNCF_BUCKET_PUSH;
swi_sched(V_pfsync_swi_cookie, 0);
}
static void
pfsync_push_all(struct pfsync_softc *sc)
{
int c;
struct pfsync_bucket *b;
for (c = 0; c < pfsync_buckets; c++) {
b = &sc->sc_buckets[c];
PFSYNC_BUCKET_LOCK(b);
pfsync_push(b);
PFSYNC_BUCKET_UNLOCK(b);
}
}
static void
pfsync_tx(struct pfsync_softc *sc, struct mbuf *m)
{
struct ip *ip;
int af, error = 0;
ip = mtod(m, struct ip *);
MPASS(ip->ip_v == IPVERSION || ip->ip_v == (IPV6_VERSION >> 4));
af = ip->ip_v == IPVERSION ? AF_INET : AF_INET6;
/*
* We distinguish between a deferral packet and our
* own pfsync packet based on M_SKIP_FIREWALL
* flag. This is XXX.
*/
switch (af) {
#ifdef INET
case AF_INET:
if (m->m_flags & M_SKIP_FIREWALL) {
error = ip_output(m, NULL, NULL, 0,
NULL, NULL);
} else {
error = ip_output(m, NULL, NULL,
IP_RAWOUTPUT, &sc->sc_imo, NULL);
}
break;
#endif
#ifdef INET6
case AF_INET6:
if (m->m_flags & M_SKIP_FIREWALL) {
error = ip6_output(m, NULL, NULL, 0,
NULL, NULL, NULL);
} else {
error = ip6_output(m, NULL, NULL, 0,
&sc->sc_im6o, NULL, NULL);
}
break;
#endif
}
if (error == 0)
V_pfsyncstats.pfsyncs_opackets++;
else
V_pfsyncstats.pfsyncs_oerrors++;
}
static void
pfsyncintr(void *arg)
{
struct epoch_tracker et;
struct pfsync_softc *sc = arg;
struct pfsync_bucket *b;
struct mbuf *m, *n;
int c;
NET_EPOCH_ENTER(et);
CURVNET_SET(sc->sc_ifp->if_vnet);
for (c = 0; c < pfsync_buckets; c++) {
b = &sc->sc_buckets[c];
PFSYNC_BUCKET_LOCK(b);
if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) {
pfsync_sendout(0, b->b_id);
b->b_flags &= ~PFSYNCF_BUCKET_PUSH;
}
_IF_DEQUEUE_ALL(&b->b_snd, m);
PFSYNC_BUCKET_UNLOCK(b);
for (; m != NULL; m = n) {
n = m->m_nextpkt;
m->m_nextpkt = NULL;
pfsync_tx(sc, m);
}
}
CURVNET_RESTORE();
NET_EPOCH_EXIT(et);
}
static int
pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
struct in_mfilter* imf, struct in6_mfilter* im6f)
{
#ifdef INET
struct ip_moptions *imo = &sc->sc_imo;
#endif
#ifdef INET6
struct ip6_moptions *im6o = &sc->sc_im6o;
struct sockaddr_in6 *syncpeer_sa6 = NULL;
#endif
if (!(ifp->if_flags & IFF_MULTICAST))
return (EADDRNOTAVAIL);
switch (sc->sc_sync_peer.ss_family) {
#ifdef INET
case AF_INET:
{
int error;
ip_mfilter_init(&imo->imo_head);
imo->imo_multicast_vif = -1;
if ((error = in_joingroup(ifp,
&((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL,
&imf->imf_inm)) != 0)
return (error);
ip_mfilter_insert(&imo->imo_head, imf);
imo->imo_multicast_ifp = ifp;
imo->imo_multicast_ttl = PFSYNC_DFLTTL;
imo->imo_multicast_loop = 0;
break;
}
#endif
#ifdef INET6
case AF_INET6:
{
int error;
syncpeer_sa6 = (struct sockaddr_in6 *)&sc->sc_sync_peer;
if ((error = in6_setscope(&syncpeer_sa6->sin6_addr, ifp, NULL)))
return (error);
ip6_mfilter_init(&im6o->im6o_head);
if ((error = in6_joingroup(ifp, &syncpeer_sa6->sin6_addr, NULL,
&(im6f->im6f_in6m), 0)) != 0)
return (error);
ip6_mfilter_insert(&im6o->im6o_head, im6f);
im6o->im6o_multicast_ifp = ifp;
im6o->im6o_multicast_hlim = PFSYNC_DFLTTL;
im6o->im6o_multicast_loop = 0;
break;
}
#endif
}
return (0);
}
static void
pfsync_multicast_cleanup(struct pfsync_softc *sc)
{
#ifdef INET
struct ip_moptions *imo = &sc->sc_imo;
struct in_mfilter *imf;
while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
ip_mfilter_remove(&imo->imo_head, imf);
in_leavegroup(imf->imf_inm, NULL);
ip_mfilter_free(imf);
}
imo->imo_multicast_ifp = NULL;
#endif
#ifdef INET6
struct ip6_moptions *im6o = &sc->sc_im6o;
struct in6_mfilter *im6f;
while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) {
ip6_mfilter_remove(&im6o->im6o_head, im6f);
in6_leavegroup(im6f->im6f_in6m, NULL);
ip6_mfilter_free(im6f);
}
im6o->im6o_multicast_ifp = NULL;
#endif
}
void
pfsync_detach_ifnet(struct ifnet *ifp)
{
struct pfsync_softc *sc = V_pfsyncif;
if (sc == NULL)
return;
PFSYNC_LOCK(sc);
if (sc->sc_sync_if == ifp) {
/* We don't need mutlicast cleanup here, because the interface
* is going away. We do need to ensure we don't try to do
* cleanup later.
*/
ip_mfilter_init(&sc->sc_imo.imo_head);
sc->sc_imo.imo_multicast_ifp = NULL;
sc->sc_im6o.im6o_multicast_ifp = NULL;
sc->sc_sync_if = NULL;
}
PFSYNC_UNLOCK(sc);
}
static int
pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status)
{
struct sockaddr_storage sa;
status->maxupdates = pfsyncr->pfsyncr_maxupdates;
status->flags = pfsyncr->pfsyncr_defer;
strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ);
memset(&sa, 0, sizeof(sa));
if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) {
struct sockaddr_in *in = (struct sockaddr_in *)&sa;
in->sin_family = AF_INET;
in->sin_len = sizeof(*in);
in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr;
}
status->syncpeer = sa;
return 0;
}
static int
pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc)
{
struct ifnet *sifp;
struct in_mfilter *imf = NULL;
struct in6_mfilter *im6f = NULL;
int error;
int c;
if ((status->maxupdates < 0) || (status->maxupdates > 255))
return (EINVAL);
if (status->syncdev[0] == '\0')
sifp = NULL;
else if ((sifp = ifunit_ref(status->syncdev)) == NULL)
return (EINVAL);
switch (status->syncpeer.ss_family) {
#ifdef INET
case AF_UNSPEC:
case AF_INET: {
struct sockaddr_in *status_sin;
status_sin = (struct sockaddr_in *)&(status->syncpeer);
if (sifp != NULL) {
if (status_sin->sin_addr.s_addr == 0 ||
status_sin->sin_addr.s_addr ==
htonl(INADDR_PFSYNC_GROUP)) {
status_sin->sin_family = AF_INET;
status_sin->sin_len = sizeof(*status_sin);
status_sin->sin_addr.s_addr =
htonl(INADDR_PFSYNC_GROUP);
}
if (IN_MULTICAST(ntohl(status_sin->sin_addr.s_addr))) {
imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
}
}
break;
}
#endif
#ifdef INET6
case AF_INET6: {
struct sockaddr_in6 *status_sin6;
status_sin6 = (struct sockaddr_in6*)&(status->syncpeer);
if (sifp != NULL) {
if (IN6_IS_ADDR_UNSPECIFIED(&status_sin6->sin6_addr) ||
IN6_ARE_ADDR_EQUAL(&status_sin6->sin6_addr,
&in6addr_linklocal_pfsync_group)) {
status_sin6->sin6_family = AF_INET6;
status_sin6->sin6_len = sizeof(*status_sin6);
status_sin6->sin6_addr =
in6addr_linklocal_pfsync_group;
}
if (IN6_IS_ADDR_MULTICAST(&status_sin6->sin6_addr)) {
im6f = ip6_mfilter_alloc(M_WAITOK, 0, 0);
}
}
break;
}
#endif
}
PFSYNC_LOCK(sc);
switch (status->version) {
case PFSYNC_MSG_VERSION_UNSPECIFIED:
sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT;
break;
case PFSYNC_MSG_VERSION_1301:
case PFSYNC_MSG_VERSION_1400:
sc->sc_version = status->version;
break;
default:
PFSYNC_UNLOCK(sc);
return (EINVAL);
}
switch (status->syncpeer.ss_family) {
case AF_INET: {
struct sockaddr_in *status_sin = (struct sockaddr_in *)&(status->syncpeer);
struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer;
sc_sin->sin_family = AF_INET;
sc_sin->sin_len = sizeof(*sc_sin);
if (status_sin->sin_addr.s_addr == 0) {
sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
} else {
sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr;
}
break;
}
case AF_INET6: {
struct sockaddr_in6 *status_sin = (struct sockaddr_in6 *)&(status->syncpeer);
struct sockaddr_in6 *sc_sin = (struct sockaddr_in6 *)&sc->sc_sync_peer;
sc_sin->sin6_family = AF_INET6;
sc_sin->sin6_len = sizeof(*sc_sin);
if(IN6_IS_ADDR_UNSPECIFIED(&status_sin->sin6_addr)) {
sc_sin->sin6_addr = in6addr_linklocal_pfsync_group;
} else {
sc_sin->sin6_addr = status_sin->sin6_addr;
}
break;
}
}
sc->sc_maxupdates = status->maxupdates;
if (status->flags & PFSYNCF_DEFER) {
sc->sc_flags |= PFSYNCF_DEFER;
V_pfsync_defer_ptr = pfsync_defer;
} else {
sc->sc_flags &= ~PFSYNCF_DEFER;
V_pfsync_defer_ptr = NULL;
}
if (sifp == NULL) {
if (sc->sc_sync_if)
if_rele(sc->sc_sync_if);
sc->sc_sync_if = NULL;
pfsync_multicast_cleanup(sc);
PFSYNC_UNLOCK(sc);
return (0);
}
for (c = 0; c < pfsync_buckets; c++) {
PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT &&
(sifp->if_mtu < sc->sc_ifp->if_mtu ||
(sc->sc_sync_if != NULL &&
sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
pfsync_sendout(1, c);
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
}
pfsync_multicast_cleanup(sc);
if (((sc->sc_sync_peer.ss_family == AF_INET) &&
IN_MULTICAST(ntohl(((struct sockaddr_in *)
&sc->sc_sync_peer)->sin_addr.s_addr))) ||
((sc->sc_sync_peer.ss_family == AF_INET6) &&
IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6*)
&sc->sc_sync_peer)->sin6_addr))) {
error = pfsync_multicast_setup(sc, sifp, imf, im6f);
if (error) {
if_rele(sifp);
PFSYNC_UNLOCK(sc);
#ifdef INET
if (imf != NULL)
ip_mfilter_free(imf);
#endif
#ifdef INET6
if (im6f != NULL)
ip6_mfilter_free(im6f);
#endif
return (error);
}
}
if (sc->sc_sync_if)
if_rele(sc->sc_sync_if);
sc->sc_sync_if = sifp;
switch (sc->sc_sync_peer.ss_family) {
#ifdef INET
case AF_INET: {
struct ip *ip;
ip = &sc->sc_template.ipv4;
bzero(ip, sizeof(*ip));
ip->ip_v = IPVERSION;
ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2;
ip->ip_tos = IPTOS_LOWDELAY;
/* len and id are set later. */
ip->ip_off = htons(IP_DF);
ip->ip_ttl = PFSYNC_DFLTTL;
ip->ip_p = IPPROTO_PFSYNC;
ip->ip_src.s_addr = INADDR_ANY;
ip->ip_dst = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr;
break;
}
#endif
#ifdef INET6
case AF_INET6: {
struct ip6_hdr *ip6;
ip6 = &sc->sc_template.ipv6;
bzero(ip6, sizeof(*ip6));
ip6->ip6_vfc = IPV6_VERSION;
ip6->ip6_hlim = PFSYNC_DFLTTL;
ip6->ip6_nxt = IPPROTO_PFSYNC;
ip6->ip6_dst = ((struct sockaddr_in6 *)&sc->sc_sync_peer)->sin6_addr;
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
in6_selectsrc_addr(if_getfib(sc->sc_sync_if), &ip6->ip6_dst, 0,
sc->sc_sync_if, &ip6->ip6_src, NULL);
NET_EPOCH_EXIT(et);
break;
}
#endif
}
/* Request a full state table update. */
if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
(*carp_demote_adj_p)(V_pfsync_carp_adj,
"pfsync bulk start");
sc->sc_flags &= ~PFSYNCF_OK;
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("pfsync: requesting bulk update\n");
PFSYNC_UNLOCK(sc);
PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
pfsync_request_update(0, 0);
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
PFSYNC_BLOCK(sc);
sc->sc_ureq_sent = time_uptime;
callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc);
PFSYNC_BUNLOCK(sc);
return (0);
}
static void
pfsync_pointers_init(void)
{
PF_RULES_WLOCK();
V_pfsync_state_import_ptr = pfsync_state_import;
V_pfsync_insert_state_ptr = pfsync_insert_state;
V_pfsync_update_state_ptr = pfsync_update_state;
V_pfsync_delete_state_ptr = pfsync_delete_state;
V_pfsync_clear_states_ptr = pfsync_clear_states;
V_pfsync_defer_ptr = pfsync_defer;
PF_RULES_WUNLOCK();
}
static void
pfsync_pointers_uninit(void)
{
PF_RULES_WLOCK();
V_pfsync_state_import_ptr = NULL;
V_pfsync_insert_state_ptr = NULL;
V_pfsync_update_state_ptr = NULL;
V_pfsync_delete_state_ptr = NULL;
V_pfsync_clear_states_ptr = NULL;
V_pfsync_defer_ptr = NULL;
PF_RULES_WUNLOCK();
}
static void
vnet_pfsync_init(const void *unused __unused)
{
int error;
V_pfsync_cloner = if_clone_simple(pfsyncname,
pfsync_clone_create, pfsync_clone_destroy, 1);
error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif,
SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
if (error) {
if_clone_detach(V_pfsync_cloner);
log(LOG_INFO, "swi_add() failed in %s\n", __func__);
}
pfsync_pointers_init();
}
VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
vnet_pfsync_init, NULL);
static void
vnet_pfsync_uninit(const void *unused __unused)
{
int ret __diagused;
pfsync_pointers_uninit();
if_clone_detach(V_pfsync_cloner);
ret = swi_remove(V_pfsync_swi_cookie);
MPASS(ret == 0);
ret = intr_event_destroy(V_pfsync_swi_ie);
MPASS(ret == 0);
}
VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH,
vnet_pfsync_uninit, NULL);
static int
pfsync_init(void)
{
int error;
pfsync_detach_ifnet_ptr = pfsync_detach_ifnet;
#ifdef INET
error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL);
if (error)
return (error);
#endif
#ifdef INET6
error = ip6proto_register(IPPROTO_PFSYNC, pfsync6_input, NULL);
if (error) {
ipproto_unregister(IPPROTO_PFSYNC);
return (error);
}
#endif
return (0);
}
static void
pfsync_uninit(void)
{
pfsync_detach_ifnet_ptr = NULL;
#ifdef INET
ipproto_unregister(IPPROTO_PFSYNC);
#endif
#ifdef INET6
ip6proto_unregister(IPPROTO_PFSYNC);
#endif
}
static int
pfsync_modevent(module_t mod, int type, void *data)
{
int error = 0;
switch (type) {
case MOD_LOAD:
error = pfsync_init();
break;
case MOD_UNLOAD:
pfsync_uninit();
break;
default:
error = EINVAL;
break;
}
return (error);
}
static moduledata_t pfsync_mod = {
pfsyncname,
pfsync_modevent,
0
};
#define PFSYNC_MODVER 1
/* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */
DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
MODULE_VERSION(pfsync, PFSYNC_MODVER);
MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
diff --git a/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c b/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c8f7574a12fa..f3a85f3a0cb7 100644
--- a/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1,1465 +1,1461 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <sys/cdefs.h>
#include "ipoib.h"
#include <sys/eventhandler.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
#include <net/infiniband.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
int ipoib_sendq_size = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size = IPOIB_RX_RING_SIZE;
module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
int ipoib_debug_level = 1;
module_param_named(debug_level, ipoib_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif
struct ipoib_path_iter {
struct ipoib_dev_priv *priv;
struct ipoib_path path;
};
static const u8 ipv4_bcast_addr[] = {
0x00, 0xff, 0xff, 0xff,
0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
};
struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
static void ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static if_t ipoib_get_net_dev_by_params(
struct ib_device *dev, u8 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
static void ipoib_start(if_t dev);
static int ipoib_ioctl(if_t ifp, u_long command, caddr_t data);
static struct unrhdr *ipoib_unrhdr;
static void
ipoib_unrhdr_init(void *arg)
{
ipoib_unrhdr = new_unrhdr(0, 65535, NULL);
}
SYSINIT(ipoib_unrhdr_init, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_init, NULL);
static void
ipoib_unrhdr_uninit(void *arg)
{
if (ipoib_unrhdr != NULL) {
struct unrhdr *hdr;
hdr = ipoib_unrhdr;
ipoib_unrhdr = NULL;
delete_unrhdr(hdr);
}
}
SYSUNINIT(ipoib_unrhdr_uninit, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_uninit, NULL);
static struct ib_client ipoib_client = {
.name = "ipoib",
.add = ipoib_add_one,
.remove = ipoib_remove_one,
.get_net_dev_by_params = ipoib_get_net_dev_by_params,
};
int
ipoib_open(struct ipoib_dev_priv *priv)
{
if_t dev = priv->dev;
ipoib_dbg(priv, "bringing up interface\n");
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_pkey_dev_delay_open(priv))
return 0;
if (ipoib_ib_dev_open(priv))
goto err_disable;
if (ipoib_ib_dev_up(priv))
goto err_stop;
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list)
if ((if_getdrvflags(cpriv->dev) & IFF_DRV_RUNNING) == 0)
ipoib_open(cpriv);
mutex_unlock(&priv->vlan_mutex);
}
if_setdrvflagbits(dev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
return 0;
err_stop:
ipoib_ib_dev_stop(priv, 1);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
return -EINVAL;
}
static void
ipoib_init(void *arg)
{
if_t dev;
struct ipoib_dev_priv *priv;
priv = arg;
dev = priv->dev;
if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0)
ipoib_open(priv);
queue_work(ipoib_workqueue, &priv->flush_light);
}
static int
ipoib_stop(struct ipoib_dev_priv *priv)
{
if_t dev = priv->dev;
ipoib_dbg(priv, "stopping interface\n");
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if_setdrvflagbits(dev, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
ipoib_ib_dev_down(priv, 0);
ipoib_ib_dev_stop(priv, 0);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list)
if ((if_getdrvflags(cpriv->dev) & IFF_DRV_RUNNING) != 0)
ipoib_stop(cpriv);
mutex_unlock(&priv->vlan_mutex);
}
return 0;
}
static int
ipoib_propagate_ifnet_mtu(struct ipoib_dev_priv *priv, int new_mtu,
bool propagate)
{
if_t ifp;
struct ifreq ifr;
int error;
ifp = priv->dev;
if (if_getmtu(ifp) == new_mtu)
return (0);
if (propagate) {
strlcpy(ifr.ifr_name, if_name(ifp), IFNAMSIZ);
ifr.ifr_mtu = new_mtu;
CURVNET_SET(if_getvnet(ifp));
error = ifhwioctl(SIOCSIFMTU, ifp, (caddr_t)&ifr, curthread);
CURVNET_RESTORE();
} else {
if_setmtu(ifp, new_mtu);
error = 0;
}
return (error);
}
int
ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu, bool propagate)
{
int error, prev_admin_mtu;
/* dev->if_mtu > 2K ==> connected mode */
if (ipoib_cm_admin_enabled(priv)) {
if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)))
return -EINVAL;
if (new_mtu > priv->mcast_mtu)
ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
priv->mcast_mtu);
return (ipoib_propagate_ifnet_mtu(priv, new_mtu, propagate));
}
if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
prev_admin_mtu = priv->admin_mtu;
priv->admin_mtu = new_mtu;
error = ipoib_propagate_ifnet_mtu(priv, min(priv->mcast_mtu,
priv->admin_mtu), propagate);
if (error == 0) {
/* check for MTU change to avoid infinite loop */
if (prev_admin_mtu != new_mtu)
queue_work(ipoib_workqueue, &priv->flush_light);
} else
priv->admin_mtu = prev_admin_mtu;
return (error);
}
static int
ipoib_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct ipoib_dev_priv *priv = if_getsoftc(ifp);
struct ifaddr *ifa = (struct ifaddr *) data;
struct ifreq *ifr = (struct ifreq *) data;
int error = 0;
/* check if detaching */
if (priv == NULL)
return (ENXIO);
/* wait for device to become ready, if any */
while (priv->gone == 2)
pause("W", 1);
/* check for device gone */
if (priv->gone != 0)
return (ENXIO);
switch (command) {
case SIOCSIFFLAGS:
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
error = -ipoib_open(priv);
} else
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
ipoib_stop(priv);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
queue_work(ipoib_workqueue, &priv->restart_task);
break;
case SIOCSIFADDR:
if_setflagbits(ifp, IFF_UP, 0);
switch (ifa->ifa_addr->sa_family) {
#ifdef INET
case AF_INET:
if_init(ifp, if_getsoftc(ifp)); /* before arpwhohas */
arp_ifinit(ifp, ifa);
break;
#endif
default:
if_init(ifp, if_getsoftc(ifp));
break;
}
break;
case SIOCGIFADDR:
bcopy(if_getlladdr(ifp), &ifr->ifr_addr.sa_data[0],
INFINIBAND_ALEN);
break;
case SIOCSIFMTU:
/*
* Set the interface MTU.
*/
error = -ipoib_change_mtu(priv, ifr->ifr_mtu, false);
break;
default:
error = EINVAL;
break;
}
return (error);
}
static struct ipoib_path *
__path_find(struct ipoib_dev_priv *priv, void *gid)
{
struct rb_node *n = priv->path_tree.rb_node;
struct ipoib_path *path;
int ret;
while (n) {
path = rb_entry(n, struct ipoib_path, rb_node);
ret = memcmp(gid, path->pathrec.dgid.raw,
sizeof (union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else
return path;
}
return NULL;
}
static int
__path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path)
{
struct rb_node **n = &priv->path_tree.rb_node;
struct rb_node *pn = NULL;
struct ipoib_path *tpath;
int ret;
while (*n) {
pn = *n;
tpath = rb_entry(pn, struct ipoib_path, rb_node);
ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
sizeof (union ib_gid));
if (ret < 0)
n = &pn->rb_left;
else if (ret > 0)
n = &pn->rb_right;
else
return -EEXIST;
}
rb_link_node(&path->rb_node, pn, n);
rb_insert_color(&path->rb_node, &priv->path_tree);
list_add_tail(&path->list, &priv->path_list);
return 0;
}
void
ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path)
{
_IF_DRAIN(&path->queue);
if (path->ah)
ipoib_put_ah(path->ah);
if (ipoib_cm_get(path))
ipoib_cm_destroy_tx(ipoib_cm_get(path));
kfree(path);
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_path_iter *
ipoib_path_iter_init(struct ipoib_dev_priv *priv)
{
struct ipoib_path_iter *iter;
iter = kmalloc(sizeof *iter, GFP_KERNEL);
if (!iter)
return NULL;
iter->priv = priv;
memset(iter->path.pathrec.dgid.raw, 0, 16);
if (ipoib_path_iter_next(iter)) {
kfree(iter);
return NULL;
}
return iter;
}
int
ipoib_path_iter_next(struct ipoib_path_iter *iter)
{
struct ipoib_dev_priv *priv = iter->priv;
struct rb_node *n;
struct ipoib_path *path;
int ret = 1;
spin_lock_irq(&priv->lock);
n = rb_first(&priv->path_tree);
while (n) {
path = rb_entry(n, struct ipoib_path, rb_node);
if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
sizeof (union ib_gid)) < 0) {
iter->path = *path;
ret = 0;
break;
}
n = rb_next(n);
}
spin_unlock_irq(&priv->lock);
return ret;
}
void
ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path)
{
*path = iter->path;
}
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
void
ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv)
{
struct ipoib_path *path, *tp;
spin_lock_irq(&priv->lock);
list_for_each_entry_safe(path, tp, &priv->path_list, list) {
ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n",
be16_to_cpu(path->pathrec.dlid),
path->pathrec.dgid.raw, ":");
path->valid = 0;
}
spin_unlock_irq(&priv->lock);
}
void
ipoib_flush_paths(struct ipoib_dev_priv *priv)
{
struct ipoib_path *path, *tp;
LIST_HEAD(remove_list);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->path_list, &remove_list);
list_for_each_entry(path, &remove_list, list)
rb_erase(&path->rb_node, &priv->path_tree);
list_for_each_entry_safe(path, tp, &remove_list, list) {
if (path->query)
ib_sa_cancel_query(path->query_id, path->query);
spin_unlock_irqrestore(&priv->lock, flags);
wait_for_completion(&path->done);
ipoib_path_free(priv, path);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
}
static void
path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr)
{
struct ipoib_path *path = path_ptr;
struct ipoib_dev_priv *priv = path->priv;
if_t dev = priv->dev;
struct ipoib_ah *ah = NULL;
struct ipoib_ah *old_ah = NULL;
struct epoch_tracker et;
struct ifqueue mbqueue;
struct mbuf *mb;
unsigned long flags;
if (!status)
ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n",
be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":");
else
ipoib_dbg(priv, "PathRec status %d for GID %16D\n",
status, path->pathrec.dgid.raw, ":");
bzero(&mbqueue, sizeof(mbqueue));
if (!status) {
struct ib_ah_attr av;
if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
ah = ipoib_create_ah(priv, priv->pd, &av);
}
spin_lock_irqsave(&priv->lock, flags);
if (ah) {
path->pathrec = *pathrec;
old_ah = path->ah;
path->ah = ah;
ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
for (;;) {
_IF_DEQUEUE(&path->queue, mb);
if (mb == NULL)
break;
_IF_ENQUEUE(&mbqueue, mb);
}
#ifdef CONFIG_INFINIBAND_IPOIB_CM
if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path))
ipoib_cm_set(path, ipoib_cm_create_tx(priv, path));
#endif
path->valid = 1;
}
path->query = NULL;
complete(&path->done);
spin_unlock_irqrestore(&priv->lock, flags);
if (old_ah)
ipoib_put_ah(old_ah);
NET_EPOCH_ENTER(et);
for (;;) {
_IF_DEQUEUE(&mbqueue, mb);
if (mb == NULL)
break;
mb->m_pkthdr.rcvif = dev;
if (if_transmit(dev, mb))
ipoib_warn(priv, "dev_queue_xmit failed "
"to requeue packet\n");
}
NET_EPOCH_EXIT(et);
}
static struct ipoib_path *
path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr)
{
struct ipoib_path *path;
if (!priv->broadcast)
return NULL;
path = kzalloc(sizeof *path, GFP_ATOMIC);
if (!path)
return NULL;
path->priv = priv;
bzero(&path->queue, sizeof(path->queue));
#ifdef CONFIG_INFINIBAND_IPOIB_CM
memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN);
#endif
memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid));
path->pathrec.sgid = priv->local_gid;
path->pathrec.pkey = cpu_to_be16(priv->pkey);
path->pathrec.numb_path = 1;
path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
return path;
}
static int
path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path)
{
if_t dev = priv->dev;
ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU;
struct ib_sa_path_rec p_rec;
p_rec = path->pathrec;
p_rec.mtu_selector = IB_SA_GT;
switch (roundup_pow_of_two(if_getmtu(dev) + IPOIB_ENCAP_LEN)) {
case 512:
p_rec.mtu = IB_MTU_256;
break;
case 1024:
p_rec.mtu = IB_MTU_512;
break;
case 2048:
p_rec.mtu = IB_MTU_1024;
break;
case 4096:
p_rec.mtu = IB_MTU_2048;
break;
default:
/* Wildcard everything */
comp_mask = 0;
p_rec.mtu = 0;
p_rec.mtu_selector = 0;
}
ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n",
p_rec.dgid.raw, ":",
comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0);
init_completion(&path->done);
path->query_id =
ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
&p_rec, comp_mask |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_TRAFFIC_CLASS |
IB_SA_PATH_REC_PKEY,
1000, GFP_ATOMIC,
path_rec_completion,
path, &path->query);
if (path->query_id < 0) {
ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
path->query = NULL;
complete(&path->done);
return path->query_id;
}
return 0;
}
static void
ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh)
{
struct ipoib_path *path;
path = __path_find(priv, eh->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
path = path_rec_create(priv, eh->hwaddr);
new_path = 1;
}
if (path) {
if (_IF_QLEN(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE)
_IF_ENQUEUE(&path->queue, mb);
else {
if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1);
m_freem(mb);
}
if (!path->query && path_rec_start(priv, path)) {
if (new_path)
ipoib_path_free(priv, path);
return;
} else
__path_add(priv, path);
} else {
if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1);
m_freem(mb);
}
return;
}
if (ipoib_cm_get(path) && ipoib_cm_up(path)) {
ipoib_cm_send(priv, mb, ipoib_cm_get(path));
} else if (path->ah) {
ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr));
} else if ((path->query || !path_rec_start(priv, path)) &&
path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) {
_IF_ENQUEUE(&path->queue, mb);
} else {
if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1);
m_freem(mb);
}
}
static int
ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb)
{
struct ipoib_header *eh;
eh = mtod(mb, struct ipoib_header *);
if (IPOIB_IS_MULTICAST(eh->hwaddr)) {
/* Add in the P_Key for multicast*/
eh->hwaddr[8] = (priv->pkey >> 8) & 0xff;
eh->hwaddr[9] = priv->pkey & 0xff;
ipoib_mcast_send(priv, eh->hwaddr + 4, mb);
} else
ipoib_unicast_send(mb, priv, eh);
return 0;
}
void
ipoib_start_locked(if_t dev, struct ipoib_dev_priv *priv)
{
struct mbuf *mb;
assert_spin_locked(&priv->lock);
while (!if_sendq_empty(dev) &&
(if_getdrvflags(dev) & IFF_DRV_OACTIVE) == 0) {
mb = if_dequeue(dev);
if (mb == NULL)
break;
infiniband_bpf_mtap(dev, mb);
ipoib_send_one(priv, mb);
}
}
static void
_ipoib_start(if_t dev, struct ipoib_dev_priv *priv)
{
if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
spin_lock(&priv->lock);
ipoib_start_locked(dev, priv);
spin_unlock(&priv->lock);
}
static void
ipoib_start(if_t dev)
{
_ipoib_start(dev, if_getsoftc(dev));
}
static void
ipoib_vlan_start(if_t dev)
{
struct ipoib_dev_priv *priv;
struct mbuf *mb;
priv = VLAN_COOKIE(dev);
if (priv != NULL)
return _ipoib_start(dev, priv);
while (!if_sendq_empty(dev)) {
mb = if_dequeue(dev);
if (mb == NULL)
break;
m_freem(mb);
if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
}
}
int
ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port)
{
/* Allocate RX/TX "rings" to hold queued mbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
goto out;
}
priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL);
if (!priv->tx_ring) {
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
if (ipoib_ib_dev_init(priv, ca, port))
goto out_tx_ring_cleanup;
return 0;
out_tx_ring_cleanup:
kfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
out:
return -ENOMEM;
}
static void
ipoib_ifdetach(struct ipoib_dev_priv *priv)
{
if_t dev;
dev = priv->dev;
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
priv->gone = 1;
infiniband_ifdetach(dev);
}
}
static void
ipoib_detach(struct ipoib_dev_priv *priv)
{
if_t dev;
dev = priv->dev;
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
if_free(dev);
free_unr(ipoib_unrhdr, priv->unit);
} else
VLAN_SETCOOKIE(priv->dev, NULL);
free(priv, M_TEMP);
}
void
ipoib_dev_cleanup(struct ipoib_dev_priv *priv)
{
struct ipoib_dev_priv *cpriv, *tcpriv;
/* Delete any child interfaces first */
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
ipoib_ifdetach(cpriv);
ipoib_dev_cleanup(cpriv);
ipoib_detach(cpriv);
}
ipoib_ib_dev_cleanup(priv);
kfree(priv->rx_ring);
kfree(priv->tx_ring);
priv->rx_ring = NULL;
priv->tx_ring = NULL;
}
static struct ipoib_dev_priv *
ipoib_priv_alloc(void)
{
struct ipoib_dev_priv *priv;
priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->drain_lock);
mutex_init(&priv->vlan_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list);
INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN);
return (priv);
}
struct ipoib_dev_priv *
ipoib_intf_alloc(const char *name, struct ib_device *hca)
{
struct ipoib_dev_priv *priv;
if_t dev;
priv = ipoib_priv_alloc();
dev = priv->dev = if_alloc(IFT_INFINIBAND);
- if (!dev) {
- free(priv, M_TEMP);
- return NULL;
- }
if_setsoftc(dev, priv);
priv->gone = 2; /* initializing */
priv->unit = alloc_unr(ipoib_unrhdr);
if (priv->unit == -1) {
if_free(dev);
free(priv, M_TEMP);
return NULL;
}
if_initname(dev, name, priv->unit);
if_setflags(dev, IFF_BROADCAST | IFF_MULTICAST);
if ((hca->attrs.device_cap_flags & IB_DEVICE_KNOWSEPOCH) == 0)
if_setflagbits(dev, IFF_NEEDSEPOCH, 0);
infiniband_ifattach(priv->dev, NULL, priv->broadcastaddr);
if_setinitfn(dev, ipoib_init);
if_setioctlfn(dev, ipoib_ioctl);
if_setstartfn(dev, ipoib_start);
if_setsendqlen(dev, ipoib_sendq_size * 2);
priv->dev = dev;
if_link_state_change(priv->dev, LINK_STATE_DOWN);
return if_getsoftc(dev);
}
int
ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
{
struct ib_device_attr *device_attr = &hca->attrs;
priv->hca_caps = device_attr->device_cap_flags;
if_sethwassist(priv->dev, 0);
if_setcapabilities(priv->dev, 0);
#ifndef CONFIG_INFINIBAND_IPOIB_CM
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
set_bit(IPOIB_FLAG_CSUM, &priv->flags);
if_sethwassist(priv->dev, CSUM_IP | CSUM_TCP | CSUM_UDP);
if_setcapabilities(priv->dev, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM);
}
#if 0
if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) {
priv->dev->if_capabilities |= IFCAP_TSO4;
priv->dev->if_hwassist |= CSUM_TSO;
}
#endif
#endif
if_setcapabilitiesbit(priv->dev,
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE, 0);
if_setcapenable(priv->dev, if_getcapabilities(priv->dev));
return 0;
}
static if_t
ipoib_add_port(const char *format, struct ib_device *hca, u8 port)
{
struct ipoib_dev_priv *priv;
struct ib_port_attr attr;
int result = -ENOMEM;
priv = ipoib_intf_alloc(format, hca);
if (!priv)
goto alloc_mem_failed;
if (!ib_query_port(hca, port, &attr))
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
hca->name, port);
goto device_init_failed;
}
/* MTU will be reset when mcast join happens */
if_setmtu(priv->dev, IPOIB_UD_MTU(priv->max_ib_mtu));
priv->mcast_mtu = priv->admin_mtu = if_getmtu(priv->dev);
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
}
if (ipoib_set_dev_features(priv, hca))
goto device_init_failed;
/*
* Set the full membership bit, so that we join the right
* broadcast group, etc.
*/
priv->pkey |= 0x8000;
priv->broadcastaddr[8] = priv->pkey >> 8;
priv->broadcastaddr[9] = priv->pkey & 0xff;
result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
if (result) {
printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
}
memcpy(if_getlladdr(priv->dev) + 4, priv->local_gid.raw, sizeof(union ib_gid));
result = ipoib_dev_init(priv, hca, port);
if (result < 0) {
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
}
if (ipoib_cm_admin_enabled(priv))
if_setmtu(priv->dev, IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)));
INIT_IB_EVENT_HANDLER(&priv->event_handler,
priv->ca, ipoib_event);
result = ib_register_event_handler(&priv->event_handler);
if (result < 0) {
printk(KERN_WARNING "%s: ib_register_event_handler failed for "
"port %d (ret = %d)\n",
hca->name, port, result);
goto event_failed;
}
if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port);
priv->gone = 0; /* ready */
return priv->dev;
event_failed:
ipoib_dev_cleanup(priv);
device_init_failed:
ipoib_ifdetach(priv);
ipoib_detach(priv);
alloc_mem_failed:
return ERR_PTR(result);
}
static void
ipoib_add_one(struct ib_device *device)
{
struct list_head *dev_list;
if_t dev;
struct ipoib_dev_priv *priv;
int s, e, p;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
if (!dev_list)
return;
INIT_LIST_HEAD(dev_list);
if (device->node_type == RDMA_NODE_IB_SWITCH) {
s = 0;
e = 0;
} else {
s = 1;
e = device->phys_port_cnt;
}
for (p = s; p <= e; ++p) {
if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
continue;
dev = ipoib_add_port("ib", device, p);
if (!IS_ERR(dev)) {
priv = if_getsoftc(dev);
list_add_tail(&priv->list, dev_list);
}
}
ib_set_client_data(device, &ipoib_client, dev_list);
}
static void
ipoib_remove_one(struct ib_device *device, void *client_data)
{
struct ipoib_dev_priv *priv, *tmp;
struct list_head *dev_list = client_data;
if (!dev_list)
return;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND)
continue;
ipoib_ifdetach(priv);
ipoib_stop(priv);
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
ipoib_dev_cleanup(priv);
ipoib_detach(priv);
}
kfree(dev_list);
}
static u_int
ipoib_match_dev_addr_cb(void *arg, struct ifaddr *ifa, u_int count)
{
struct sockaddr *addr = arg;
/* If a match is already found, skip this. */
if (count > 0)
return (0);
if (ifa->ifa_addr->sa_len != addr->sa_len)
return (0);
if (memcmp(ifa->ifa_addr, addr, addr->sa_len) == 0)
return (1);
return (0);
}
static int
ipoib_match_dev_addr(const struct sockaddr *addr, if_t dev)
{
struct epoch_tracker et;
int retval = 0;
NET_EPOCH_ENTER(et);
retval = if_foreach_addr_type(dev, addr->sa_family,
ipoib_match_dev_addr_cb, __DECONST(void *, addr));
NET_EPOCH_EXIT(et);
return (retval);
}
/*
* ipoib_match_gid_pkey_addr - returns the number of IPoIB netdevs on
* top a given ipoib device matching a pkey_index and address, if one
* exists.
*
* @found_net_dev: contains a matching net_device if the return value
* >= 1, with a reference held.
*/
static int
ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
const union ib_gid *gid, u16 pkey_index, const struct sockaddr *addr,
if_t *found_net_dev)
{
struct ipoib_dev_priv *child_priv;
int matches = 0;
if (priv->pkey_index == pkey_index &&
(!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
if (addr == NULL || ipoib_match_dev_addr(addr, priv->dev) != 0) {
if (*found_net_dev == NULL) {
if_t net_dev;
if (priv->parent != NULL)
net_dev = priv->parent;
else
net_dev = priv->dev;
*found_net_dev = net_dev;
dev_hold(net_dev);
}
matches++;
}
}
/* Check child interfaces */
mutex_lock(&priv->vlan_mutex);
list_for_each_entry(child_priv, &priv->child_intfs, list) {
matches += ipoib_match_gid_pkey_addr(child_priv, gid,
pkey_index, addr, found_net_dev);
if (matches > 1)
break;
}
mutex_unlock(&priv->vlan_mutex);
return matches;
}
/*
* __ipoib_get_net_dev_by_params - returns the number of matching
* net_devs found (between 0 and 2). Also return the matching
* net_device in the @net_dev parameter, holding a reference to the
* net_device, if the number of matches >= 1
*/
static int
__ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
u16 pkey_index, const union ib_gid *gid,
const struct sockaddr *addr, if_t *net_dev)
{
struct ipoib_dev_priv *priv;
int matches = 0;
*net_dev = NULL;
list_for_each_entry(priv, dev_list, list) {
if (priv->port != port)
continue;
matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
addr, net_dev);
if (matches > 1)
break;
}
return matches;
}
static if_t
ipoib_get_net_dev_by_params(struct ib_device *dev, u8 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr, void *client_data)
{
if_t net_dev;
struct list_head *dev_list = client_data;
u16 pkey_index;
int matches;
int ret;
if (!rdma_protocol_ib(dev, port))
return NULL;
ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
if (ret)
return NULL;
if (!dev_list)
return NULL;
/* See if we can find a unique device matching the L2 parameters */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
switch (matches) {
case 0:
return NULL;
case 1:
return net_dev;
}
dev_put(net_dev);
/* Couldn't find a unique device with L2 parameters only. Use L3
* address to uniquely match the net device */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, addr, &net_dev);
switch (matches) {
case 0:
return NULL;
default:
dev_warn_ratelimited(&dev->dev,
"duplicate IP address detected\n");
/* Fall through */
case 1:
return net_dev;
}
}
static void
ipoib_config_vlan(void *arg, if_t ifp, uint16_t vtag)
{
struct ipoib_dev_priv *parent;
struct ipoib_dev_priv *priv;
struct epoch_tracker et;
if_t dev;
uint16_t pkey;
int error;
if (if_gettype(ifp) != IFT_INFINIBAND)
return;
NET_EPOCH_ENTER(et);
dev = VLAN_DEVAT(ifp, vtag);
NET_EPOCH_EXIT(et);
if (dev == NULL)
return;
priv = NULL;
error = 0;
parent = if_getsoftc(ifp);
/* We only support 15 bits of pkey. */
if (vtag & 0x8000)
return;
pkey = vtag | 0x8000; /* Set full membership bit. */
if (pkey == parent->pkey)
return;
/* Check for dups */
mutex_lock(&parent->vlan_mutex);
list_for_each_entry(priv, &parent->child_intfs, list) {
if (priv->pkey == pkey) {
priv = NULL;
error = EBUSY;
goto out;
}
}
priv = ipoib_priv_alloc();
priv->dev = dev;
priv->max_ib_mtu = parent->max_ib_mtu;
priv->mcast_mtu = priv->admin_mtu = if_getmtu(parent->dev);
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
error = ipoib_set_dev_features(priv, parent->ca);
if (error)
goto out;
priv->pkey = pkey;
priv->broadcastaddr[8] = pkey >> 8;
priv->broadcastaddr[9] = pkey & 0xff;
if_setbroadcastaddr(dev, priv->broadcastaddr);
error = ipoib_dev_init(priv, parent->ca, parent->port);
if (error)
goto out;
priv->parent = parent->dev;
list_add_tail(&priv->list, &parent->child_intfs);
VLAN_SETCOOKIE(dev, priv);
if_setstartfn(dev, ipoib_vlan_start);
if_setdrvflagbits(dev, 0, IFF_DRV_RUNNING);
if_setifheaderlen(dev, IPOIB_HEADER_LEN);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
ipoib_open(priv);
mutex_unlock(&parent->vlan_mutex);
return;
out:
mutex_unlock(&parent->vlan_mutex);
if (priv)
free(priv, M_TEMP);
if (error)
ipoib_warn(parent,
"failed to initialize subinterface: device %s, port %d vtag 0x%X",
parent->ca->name, parent->port, vtag);
return;
}
static void
ipoib_unconfig_vlan(void *arg, if_t ifp, uint16_t vtag)
{
struct ipoib_dev_priv *parent;
struct ipoib_dev_priv *priv;
struct epoch_tracker et;
if_t dev;
uint16_t pkey;
if (if_gettype(ifp) != IFT_INFINIBAND)
return;
NET_EPOCH_ENTER(et);
dev = VLAN_DEVAT(ifp, vtag);
NET_EPOCH_EXIT(et);
if (dev)
VLAN_SETCOOKIE(dev, NULL);
pkey = vtag | 0x8000;
parent = if_getsoftc(ifp);
mutex_lock(&parent->vlan_mutex);
list_for_each_entry(priv, &parent->child_intfs, list) {
if (priv->pkey == pkey) {
ipoib_dev_cleanup(priv);
list_del(&priv->list);
break;
}
}
mutex_unlock(&parent->vlan_mutex);
}
eventhandler_tag ipoib_vlan_attach;
eventhandler_tag ipoib_vlan_detach;
static int __init
ipoib_init_module(void)
{
int ret;
ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
IPOIB_MIN_QUEUE_SIZE));
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
#endif
ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST);
ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST);
/*
* We create our own workqueue mainly because we want to be
* able to flush it when devices are being removed. We can't
* use schedule_work()/flush_scheduled_work() because both
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
*/
ipoib_workqueue = create_singlethread_workqueue("ipoib");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
}
ib_sa_register_client(&ipoib_sa_client);
ret = ib_register_client(&ipoib_client);
if (ret)
goto err_sa;
return 0;
err_sa:
ib_sa_unregister_client(&ipoib_sa_client);
destroy_workqueue(ipoib_workqueue);
err_fs:
return ret;
}
static void __exit
ipoib_cleanup_module(void)
{
EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach);
EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach);
ib_unregister_client(&ipoib_client);
ib_sa_unregister_client(&ipoib_sa_client);
destroy_workqueue(ipoib_workqueue);
}
module_init_order(ipoib_init_module, SI_ORDER_FIFTH);
module_exit_order(ipoib_cleanup_module, SI_ORDER_FIFTH);
static int
ipoib_evhand(module_t mod, int event, void *arg)
{
return (0);
}
static moduledata_t ipoib_mod = {
.name = "ipoib",
.evhand = ipoib_evhand,
};
DECLARE_MODULE(ipoib, ipoib_mod, SI_SUB_LAST, SI_ORDER_ANY);
MODULE_DEPEND(ipoib, ibcore, 1, 1, 1);
MODULE_DEPEND(ipoib, if_infiniband, 1, 1, 1);
MODULE_DEPEND(ipoib, linuxkpi, 1, 1, 1);

File Metadata

Mime Type
application/octet-stream
Expires
Wed, Jul 10, 8:16 AM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
Tmx133BjOClA
Default Alt Text
(7 MB)

Event Timeline